source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
GB_bitmap_AxB_saxpy_A_sparse_B_bitmap_template.c | //------------------------------------------------------------------------------
// GB_bitmap_AxB_saxpy_A_sparse_B_bitmap: C<#M>+=A*B, C bitmap, M any format
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// C is bitmap or full. A is hyper/sparse, B is bitmap/full.
// if C is bitmap: no accumulator is used
// if C is full: C += A*B is computed with the accumulator identical to
// the monoid
{
if (use_coarse_tasks)
{
//----------------------------------------------------------------------
// C<#M> += A*B using coarse tasks
//----------------------------------------------------------------------
// number of columns in the workspace for each task
#define GB_PANEL_SIZE 4
if (B_iso)
{
// No special cases needed. GB_GETB handles the B iso case.
}
//----------------------------------------------------------------------
// allocate workspace for each task
//----------------------------------------------------------------------
GB_WERK_PUSH (H_slice, ntasks, int64_t) ;
if (H_slice == NULL)
{
// out of memory
GB_FREE_ALL ;
return (GrB_OUT_OF_MEMORY) ;
}
int64_t hwork = 0 ;
int tid ;
for (tid = 0 ; tid < ntasks ; tid++)
{
int64_t jstart, jend ;
GB_PARTITION (jstart, jend, bvdim, tid, ntasks) ;
int64_t jtask = jend - jstart ;
int64_t jpanel = GB_IMIN (jtask, GB_PANEL_SIZE) ;
H_slice [tid] = hwork ;
#if ( !GB_C_IS_BITMAP )
// bitmap case always needs Hx workspace; full case only needs it
// if jpanel > 1
if (jpanel > 1)
#endif
{
hwork += jpanel ;
}
}
//----------------------------------------------------------------------
int64_t cvlenx = (GB_IS_ANY_PAIR_SEMIRING ? 0 : cvlen) * GB_CSIZE ;
#if GB_C_IS_BITMAP
Wf = GB_MALLOC_WORK (hwork * cvlen, int8_t, &Wf_size) ;
#endif
Wcx = GB_MALLOC_WORK (hwork * cvlenx, GB_void, &Wcx_size) ;
if ((GB_C_IS_BITMAP && Wf == NULL) || Wcx == NULL)
{
// out of memory
GB_FREE_ALL ;
return (GrB_OUT_OF_MEMORY) ;
}
//----------------------------------------------------------------------
// C<#M> += A*B
//----------------------------------------------------------------------
#if GB_C_IS_BITMAP
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \
reduction(+:cnvals)
#else
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1)
#endif
for (tid = 0 ; tid < ntasks ; tid++)
{
//------------------------------------------------------------------
// determine the vectors of B and C for this coarse task
//------------------------------------------------------------------
int64_t jstart, jend ;
GB_PARTITION (jstart, jend, bvdim, tid, ntasks) ;
int64_t jtask = jend - jstart ;
int64_t jpanel = GB_IMIN (jtask, GB_PANEL_SIZE) ;
#if GB_C_IS_BITMAP
int64_t task_cnvals = 0 ;
#endif
//------------------------------------------------------------------
// get the workspace for this task
//------------------------------------------------------------------
// Hf and Hx workspace to compute the panel of C
#if GB_C_IS_BITMAP
int8_t *restrict Hf = Wf + (H_slice [tid] * cvlen) ;
#endif
#if ( !GB_IS_ANY_PAIR_SEMIRING )
GB_CTYPE *restrict Hx = (GB_CTYPE *) (Wcx + H_slice [tid] * cvlenx);
#endif
//------------------------------------------------------------------
// clear the panel
//------------------------------------------------------------------
#if GB_C_IS_BITMAP
memset (Hf, 0, jpanel * cvlen) ;
#endif
//------------------------------------------------------------------
// C<#M>(:,jstart:jend-1) += A * B(:,jstart:jend-1) by panel
//------------------------------------------------------------------
for (int64_t j1 = jstart ; j1 < jend ; j1 += jpanel)
{
//--------------------------------------------------------------
// get the panel of np vectors j1:j2-1
//--------------------------------------------------------------
int64_t j2 = GB_IMIN (jend, j1 + jpanel) ;
int64_t np = j2 - j1 ;
//--------------------------------------------------------------
// G = B(:,j1:j2-1), of size bvlen-by-np, in column major order
//--------------------------------------------------------------
int8_t *restrict Gb = (int8_t *) (Bb + (j1 * bvlen)) ;
#if ( !GB_IS_ANY_PAIR_SEMIRING )
GB_BTYPE *restrict Gx = (GB_BTYPE *)
(((GB_void *) (B->x)) +
(B_iso ? 0 : ((j1 * bvlen) * GB_BSIZE))) ;
#endif
//--------------------------------------------------------------
// clear the panel H to compute C(:,j1:j2-1)
//--------------------------------------------------------------
#if ( !GB_C_IS_BITMAP )
if (np == 1)
{
// Make H an alias to C(:,j1)
int64_t j = j1 ;
int64_t pC_start = j * cvlen ; // get pointer to C(:,j)
Hx = Cx + pC_start ;
}
else
{
// Hx = identity
int64_t nc = np * cvlen ;
#if GB_HAS_IDENTITY_BYTE
memset (Hx, GB_IDENTITY_BYTE, nc * GB_CSIZE) ;
#else
for (int64_t i = 0 ; i < nc ; i++)
{
Hx [i] = GB_IDENTITY ;
}
#endif
}
#endif
#if GB_IS_PLUS_FC32_MONOID
float *restrict Hx_real = (float *) Hx ;
float *restrict Hx_imag = Hx_real + 1 ;
#elif GB_IS_PLUS_FC64_MONOID
double *restrict Hx_real = (double *) Hx ;
double *restrict Hx_imag = Hx_real + 1 ;
#endif
//--------------------------------------------------------------
// H += A*G for one panel
//--------------------------------------------------------------
#undef GB_B_kj_PRESENT
#if GB_B_IS_BITMAP
#define GB_B_kj_PRESENT(b) b
#else
#define GB_B_kj_PRESENT(b) 1
#endif
#undef GB_MULT_A_ik_G_kj
#if GB_IS_PAIR_MULTIPLIER
// t = A(i,k) * B (k,j) is already #defined as 1
#define GB_MULT_A_ik_G_kj(gkj,jj)
#else
// t = A(i,k) * B (k,j)
#define GB_MULT_A_ik_G_kj(gkj,jj) \
GB_CIJ_DECLARE (t) ; \
GB_MULT (t, aik, gkj, i, k, j1 + jj)
#endif
#undef GB_HX_COMPUTE
#if GB_C_IS_BITMAP
#define GB_HX_COMPUTE(gkj,gb,jj) \
{ \
/* H (i,jj) += A(i,k) * B(k,j) */ \
if (GB_B_kj_PRESENT (gb)) \
{ \
/* t = A(i,k) * B (k,j) */ \
GB_MULT_A_ik_G_kj (gkj, jj) ; \
if (Hf [pH+jj] == 0) \
{ \
/* H(i,jj) is a new entry */ \
GB_HX_WRITE (pH+jj, t) ; /* Hx(i,jj)=t */ \
Hf [pH+jj] = 1 ; \
} \
else \
{ \
/* H(i,jj) is already present */ \
/* Hx(i,jj)+=t */ \
GB_HX_UPDATE (pH+jj, t) ; \
} \
} \
}
#else
#define GB_HX_COMPUTE(gkj,gb,jj) \
{ \
/* H (i,jj) += A(i,k) * B(k,j) */ \
if (GB_B_kj_PRESENT (gb)) \
{ \
/* t = A(i,k) * B (k,j) */ \
GB_MULT_A_ik_G_kj (gkj, jj) ; \
/* Hx(i,jj)+=t */ \
GB_HX_UPDATE (pH+jj, t) ; \
} \
}
#endif
switch (np)
{
case 4 :
for (int64_t kA = 0 ; kA < anvec ; kA++)
{
// get A(:,k)
const int64_t k = GBH (Ah, kA) ;
// get B(k,j1:j2-1)
#if GB_B_IS_BITMAP
const int8_t gb0 = Gb [k ] ;
const int8_t gb1 = Gb [k + bvlen] ;
const int8_t gb2 = Gb [k + 2*bvlen] ;
const int8_t gb3 = Gb [k + 3*bvlen] ;
if (!(gb0 || gb1 || gb2 || gb3)) continue ;
#endif
GB_GETB (gk0, Gx, k , B_iso) ;
GB_GETB (gk1, Gx, k + bvlen, B_iso) ;
GB_GETB (gk2, Gx, k + 2*bvlen, B_iso) ;
GB_GETB (gk3, Gx, k + 3*bvlen, B_iso) ;
// H += A(:,k)*B(k,j1:j2-1)
const int64_t pA_end = Ap [kA+1] ;
for (int64_t pA = Ap [kA] ; pA < pA_end ; pA++)
{
const int64_t i = Ai [pA] ;
const int64_t pH = i * 4 ;
GB_GETA (aik, Ax, pA, A_iso) ;
GB_HX_COMPUTE (gk0, gb0, 0) ;
GB_HX_COMPUTE (gk1, gb1, 1) ;
GB_HX_COMPUTE (gk2, gb2, 2) ;
GB_HX_COMPUTE (gk3, gb3, 3) ;
}
}
break ;
case 3 :
for (int64_t kA = 0 ; kA < anvec ; kA++)
{
// get A(:,k)
const int64_t k = GBH (Ah, kA) ;
// get B(k,j1:j2-1)
#if GB_B_IS_BITMAP
const int8_t gb0 = Gb [k ] ;
const int8_t gb1 = Gb [k + bvlen] ;
const int8_t gb2 = Gb [k + 2*bvlen] ;
if (!(gb0 || gb1 || gb2)) continue ;
#endif
GB_GETB (gk0, Gx, k , B_iso) ;
GB_GETB (gk1, Gx, k + bvlen, B_iso) ;
GB_GETB (gk2, Gx, k + 2*bvlen, B_iso) ;
// H += A(:,k)*B(k,j1:j2-1)
const int64_t pA_end = Ap [kA+1] ;
for (int64_t pA = Ap [kA] ; pA < pA_end ; pA++)
{
const int64_t i = Ai [pA] ;
const int64_t pH = i * 3 ;
GB_GETA (aik, Ax, pA, A_iso) ;
GB_HX_COMPUTE (gk0, gb0, 0) ;
GB_HX_COMPUTE (gk1, gb1, 1) ;
GB_HX_COMPUTE (gk2, gb2, 2) ;
}
}
break ;
case 2 :
for (int64_t kA = 0 ; kA < anvec ; kA++)
{
// get A(:,k)
const int64_t k = GBH (Ah, kA) ;
// get B(k,j1:j2-1)
#if GB_B_IS_BITMAP
const int8_t gb0 = Gb [k ] ;
const int8_t gb1 = Gb [k + bvlen] ;
if (!(gb0 || gb1)) continue ;
#endif
// H += A(:,k)*B(k,j1:j2-1)
GB_GETB (gk0, Gx, k , B_iso) ;
GB_GETB (gk1, Gx, k + bvlen, B_iso) ;
const int64_t pA_end = Ap [kA+1] ;
for (int64_t pA = Ap [kA] ; pA < pA_end ; pA++)
{
const int64_t i = Ai [pA] ;
const int64_t pH = i * 2 ;
GB_GETA (aik, Ax, pA, A_iso) ;
GB_HX_COMPUTE (gk0, gb0, 0) ;
GB_HX_COMPUTE (gk1, gb1, 1) ;
}
}
break ;
case 1 :
for (int64_t kA = 0 ; kA < anvec ; kA++)
{
// get A(:,k)
const int64_t k = GBH (Ah, kA) ;
// get B(k,j1:j2-1) where j1 == j2-1
#if GB_B_IS_BITMAP
const int8_t gb0 = Gb [k] ;
if (!gb0) continue ;
#endif
// H += A(:,k)*B(k,j1:j2-1)
GB_GETB (gk0, Gx, k, B_iso) ;
const int64_t pA_end = Ap [kA+1] ;
for (int64_t pA = Ap [kA] ; pA < pA_end ; pA++)
{
const int64_t i = Ai [pA] ;
const int64_t pH = i ;
GB_GETA (aik, Ax, pA, A_iso) ;
GB_HX_COMPUTE (gk0, 1, 0) ;
}
}
break ;
default:;
}
#undef GB_HX_COMPUTE
#undef GB_B_kj_PRESENT
#undef GB_MULT_A_ik_G_kj
//--------------------------------------------------------------
// C<#M>(:,j1:j2-1) = H
//--------------------------------------------------------------
#if ( !GB_C_IS_BITMAP )
if (np == 1)
{
// Hx is already aliased to Cx; no more work to do
continue ;
}
#endif
for (int64_t jj = 0 ; jj < np ; jj++)
{
//----------------------------------------------------------
// C<#M>(:,j) = H (:,jj)
//----------------------------------------------------------
int64_t j = j1 + jj ;
int64_t pC_start = j * cvlen ; // get pointer to C(:,j)
for (int64_t i = 0 ; i < cvlen ; i++)
{
int64_t pC = pC_start + i ; // pointer to C(i,j)
int64_t pH = i * np + jj ; // pointer to H(i,jj)
#if GB_C_IS_BITMAP
if (!Hf [pH]) continue ;
Hf [pH] = 0 ; // clear the panel
int8_t cb = Cb [pC] ;
#endif
//------------------------------------------------------
// check M(i,j)
//------------------------------------------------------
#if GB_MASK_IS_SPARSE_OR_HYPER
// M is sparse or hypersparse
bool mij = ((cb & 2) != 0) ^ Mask_comp ;
if (!mij) continue ;
cb = (cb & 1) ;
#elif GB_MASK_IS_BITMAP_OR_FULL
// M is bitmap or full
GB_GET_M_ij (pC) ;
mij = mij ^ Mask_comp ;
if (!mij) continue ;
#endif
//------------------------------------------------------
// C(i,j) += H(i,jj)
//------------------------------------------------------
#if GB_C_IS_BITMAP
if (cb == 0)
{
// C(i,j) = H(i,jj)
GB_CIJ_GATHER (pC, pH) ;
Cb [pC] = keep ;
task_cnvals++ ;
}
else
{
// Currently, the matrix C is a newly allocated
// matrix, not the C_in input matrix to GrB_mxm.
// As a result, this condition is not used. It
// will be in the future when this method is
// modified to modify C in-place.
ASSERT (GB_DEAD_CODE) ;
// C(i,j) += H(i,jj)
GB_CIJ_GATHER_UPDATE (pC, pH) ;
}
#else
{
// C(i,j) = H(i,jj)
GB_CIJ_GATHER_UPDATE (pC, pH) ;
}
#endif
}
}
}
#if GB_C_IS_BITMAP
cnvals += task_cnvals ;
#endif
}
#undef GB_PANEL_SIZE
}
else if (use_atomics)
{
//----------------------------------------------------------------------
// C<#M> += A*B using fine tasks and atomics
//----------------------------------------------------------------------
if (B_iso)
{
// No special cases needed. GB_GET_B_kj (bkj = B(k,j))
// handles the B iso case.
}
int tid ;
#if GB_C_IS_BITMAP
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \
reduction(+:cnvals)
#else
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1)
#endif
for (tid = 0 ; tid < ntasks ; tid++)
{
//------------------------------------------------------------------
// determine the vector of B and C for this fine task
//------------------------------------------------------------------
// The fine task operates on C(:,j) and B(:,j). Its fine task
// id ranges from 0 to nfine_tasks_per_vector-1, and determines
// which slice of A to operate on.
int64_t j = tid / nfine_tasks_per_vector ;
int fine_tid = tid % nfine_tasks_per_vector ;
int64_t kfirst = A_slice [fine_tid] ;
int64_t klast = A_slice [fine_tid + 1] ;
int64_t pB_start = j * bvlen ; // pointer to B(:,j)
int64_t pC_start = j * cvlen ; // pointer to C(:,j)
GB_GET_T_FOR_SECONDJ ; // t = j or j+1 for SECONDJ*
#if GB_C_IS_BITMAP
int64_t task_cnvals = 0 ;
#endif
// for Hx Gustavason workspace: use C(:,j) in-place:
#if ( !GB_IS_ANY_PAIR_SEMIRING )
GB_CTYPE *restrict Hx = (GB_CTYPE *)
(((GB_void *) Cx) + (pC_start * GB_CSIZE)) ;
#endif
#if GB_IS_PLUS_FC32_MONOID || GB_IS_ANY_FC32_MONOID
float *restrict Hx_real = (float *) Hx ;
float *restrict Hx_imag = Hx_real + 1 ;
#elif GB_IS_PLUS_FC64_MONOID || GB_IS_ANY_FC64_MONOID
double *restrict Hx_real = (double *) Hx ;
double *restrict Hx_imag = Hx_real + 1 ;
#endif
//------------------------------------------------------------------
// C<#M>(:,j) += A(:,k1:k2) * B(k1:k2,j)
//------------------------------------------------------------------
for (int64_t kk = kfirst ; kk < klast ; kk++)
{
//--------------------------------------------------------------
// C<#M>(:,j) += A(:,k) * B(k,j)
//--------------------------------------------------------------
int64_t k = GBH (Ah, kk) ; // k in range k1:k2
int64_t pB = pB_start + k ; // get pointer to B(k,j)
#if GB_B_IS_BITMAP
if (!GBB (Bb, pB)) continue ;
#endif
int64_t pA = Ap [kk] ;
int64_t pA_end = Ap [kk+1] ;
GB_GET_B_kj ; // bkj = B(k,j)
for ( ; pA < pA_end ; pA++)
{
//----------------------------------------------------------
// get A(i,k) and C(i,j)
//----------------------------------------------------------
int64_t i = Ai [pA] ; // get A(i,k) index
int64_t pC = pC_start + i ; // get C(i,j) pointer
//----------------------------------------------------------
// C<#M>(i,j) += A(i,k) * B(k,j)
//----------------------------------------------------------
#if ( !GB_C_IS_BITMAP )
{
//------------------------------------------------------
// C is full: the monoid is always atomic
//------------------------------------------------------
GB_MULT_A_ik_B_kj ; // t = A(i,k) * B(k,j)
GB_ATOMIC_UPDATE_HX (i, t) ; // C(i,j) += t
}
#elif GB_MASK_IS_SPARSE_OR_HYPER
{
//------------------------------------------------------
// M is sparse, and scattered into the C bitmap
//------------------------------------------------------
// finite-state machine in Cb [pC]:
// 0: cij not present, mij zero
// 1: cij present, mij zero (keep==1 for !M)
// 2: cij not present, mij one
// 3: cij present, mij one (keep==3 for M)
// 7: cij is locked
int8_t cb ;
#if GB_HAS_ATOMIC
{
// if C(i,j) is already present and can be modified
// (cb==keep), and the monoid can be done
// atomically, then do the atomic update. No need
// to modify Cb [pC].
GB_ATOMIC_READ
cb = Cb [pC] ; // grab the entry
if (cb == keep)
{
#if !GB_IS_ANY_MONOID
GB_MULT_A_ik_B_kj ; // t = A(i,k) * B(k,j)
GB_ATOMIC_UPDATE_HX (i, t) ; // C(i,j) += t
#endif
continue ; // C(i,j) has been updated
}
}
#endif
do // lock the entry
{
// do this atomically:
// { cb = Cb [pC] ; Cb [pC] = 7 ; }
GB_ATOMIC_CAPTURE_INT8 (cb, Cb [pC], 7) ;
} while (cb == 7) ; // lock owner gets 0, 1, 2, or 3
if (cb == keep-1)
{
// C(i,j) is a new entry
GB_MULT_A_ik_B_kj ; // t = A(i,k)*B(k,j)
GB_ATOMIC_WRITE_HX (i, t) ; // C(i,j) = t
task_cnvals++ ;
cb = keep ; // keep the entry
}
else if (cb == keep)
{
// C(i,j) is already present
#if !GB_IS_ANY_MONOID
GB_MULT_A_ik_B_kj ; // t = A(i,k)*B(k,j)
GB_ATOMIC_UPDATE_HX (i, t) ; // C(i,j) += t
#endif
}
GB_ATOMIC_WRITE
Cb [pC] = cb ; // unlock the entry
}
#else
{
//------------------------------------------------------
// M is not present, or bitmap/full
//------------------------------------------------------
// finite-state machine in Cb [pC]:
// 0: cij not present; can be written
// 1: cij present; can be updated
// 7: cij is locked
#if GB_MASK_IS_BITMAP_OR_FULL
{
// M is bitmap or full, and not in C bitmap.
// Do not modify C(i,j) if not permitted by the mask
GB_GET_M_ij (pC) ;
mij = mij ^ Mask_comp ;
if (!mij) continue ;
}
#endif
//------------------------------------------------------
// C(i,j) += A(i,j) * B(k,j)
//------------------------------------------------------
int8_t cb ;
#if GB_HAS_ATOMIC
{
// if C(i,j) is already present (cb==1), and the
// monoid can be done atomically, then do the
// atomic update. No need to modify Cb [pC].
GB_ATOMIC_READ
cb = Cb [pC] ; // grab the entry
if (cb == 1)
{
#if !GB_IS_ANY_MONOID
GB_MULT_A_ik_B_kj ; // t = A(i,k) * B(k,j)
GB_ATOMIC_UPDATE_HX (i, t) ; // C(i,j) += t
#endif
continue ; // C(i,j) has been updated
}
}
#endif
do // lock the entry
{
// do this atomically:
// { cb = Cb [pC] ; Cb [pC] = 7 ; }
GB_ATOMIC_CAPTURE_INT8 (cb, Cb [pC], 7) ;
} while (cb == 7) ; // lock owner gets 0 or 1
if (cb == 0)
{
// C(i,j) is a new entry
GB_MULT_A_ik_B_kj ; // t = A(i,k)*B(k,j)
GB_ATOMIC_WRITE_HX (i, t) ; // C(i,j) = t
task_cnvals++ ;
}
else // cb == 1
{
// C(i,j) is already present
#if !GB_IS_ANY_MONOID
GB_MULT_A_ik_B_kj ; // t = A(i,k)*B(k,j)
GB_ATOMIC_UPDATE_HX (i, t) ; // C(i,j) += t
#endif
}
GB_ATOMIC_WRITE
Cb [pC] = 1 ; // unlock the entry
}
#endif
}
}
#if GB_C_IS_BITMAP
cnvals += task_cnvals ;
#endif
}
}
else
{
//----------------------------------------------------------------------
// C<#M> += A*B using fine tasks and workspace, with no atomics
//----------------------------------------------------------------------
// Each fine task is given size-cvlen workspace to compute its result
// in the first phase, W(:,tid) = A(:,k1:k2) * B(k1:k2,j), where k1:k2
// is defined by the fine_tid of the task. The workspaces are then
// summed into C in the second phase.
if (B_iso)
{
// No special cases needed. GB_GET_B_kj (bkj = B(k,j))
// handles the B iso case.
}
//----------------------------------------------------------------------
// allocate workspace
//----------------------------------------------------------------------
size_t workspace = cvlen * ntasks ;
size_t cxsize = (GB_IS_ANY_PAIR_SEMIRING) ? 0 : GB_CSIZE ;
#if GB_C_IS_BITMAP
Wf = GB_MALLOC_WORK (workspace, int8_t, &Wf_size) ;
#endif
Wcx = GB_MALLOC_WORK (workspace * cxsize, GB_void, &Wcx_size) ;
if ((GB_C_IS_BITMAP && Wf == NULL) || Wcx == NULL)
{
// out of memory
GB_FREE_ALL ;
return (GrB_OUT_OF_MEMORY) ;
}
//----------------------------------------------------------------------
// first phase: W (:,tid) = A (:,k1:k2) * B (k2:k2,j) for each fine task
//----------------------------------------------------------------------
int tid ;
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1)
for (tid = 0 ; tid < ntasks ; tid++)
{
//------------------------------------------------------------------
// determine the vector of B and C for this fine task
//------------------------------------------------------------------
// The fine task operates on C(:,j) and B(:,j). Its fine task
// id ranges from 0 to nfine_tasks_per_vector-1, and determines
// which slice of A to operate on.
int64_t j = tid / nfine_tasks_per_vector ;
int fine_tid = tid % nfine_tasks_per_vector ;
int64_t kfirst = A_slice [fine_tid] ;
int64_t klast = A_slice [fine_tid + 1] ;
int64_t pB_start = j * bvlen ; // pointer to B(:,j)
int64_t pC_start = j * cvlen ; // pointer to C(:,j), for bitmap
int64_t pW_start = tid * cvlen ; // pointer to W(:,tid)
GB_GET_T_FOR_SECONDJ ; // t = j or j+1 for SECONDJ*
#if GB_C_IS_BITMAP
int64_t task_cnvals = 0 ;
#endif
// for Hf and Hx Gustavason workspace: use W(:,tid):
#if GB_C_IS_BITMAP
int8_t *restrict Hf = Wf + pW_start ;
#endif
#if ( !GB_IS_ANY_PAIR_SEMIRING )
GB_CTYPE *restrict Hx = (GB_CTYPE *) (Wcx + (pW_start * cxsize)) ;
#endif
#if GB_IS_PLUS_FC32_MONOID
float *restrict Hx_real = (float *) Hx ;
float *restrict Hx_imag = Hx_real + 1 ;
#elif GB_IS_PLUS_FC64_MONOID
double *restrict Hx_real = (double *) Hx ;
double *restrict Hx_imag = Hx_real + 1 ;
#endif
//------------------------------------------------------------------
// clear the panel
//------------------------------------------------------------------
#if GB_C_IS_BITMAP
{
memset (Hf, 0, cvlen) ;
}
#else
{
// set Hx to identity
#if GB_HAS_IDENTITY_BYTE
memset (Hx, GB_IDENTITY_BYTE, cvlen * GB_CSIZE) ;
#else
for (int64_t i = 0 ; i < cvlen ; i++)
{
Hx [i] = GB_IDENTITY ;
}
#endif
}
#endif
//------------------------------------------------------------------
// W<#M> = A(:,k1:k2) * B(k1:k2,j)
//------------------------------------------------------------------
for (int64_t kk = kfirst ; kk < klast ; kk++)
{
//--------------------------------------------------------------
// W<#M>(:,tid) += A(:,k) * B(k,j)
//--------------------------------------------------------------
int64_t k = GBH (Ah, kk) ; // k in range k1:k2
int64_t pB = pB_start + k ; // get pointer to B(k,j)
#if GB_B_IS_BITMAP
if (!GBB (Bb, pB)) continue ;
#endif
int64_t pA = Ap [kk] ;
int64_t pA_end = Ap [kk+1] ;
GB_GET_B_kj ; // bkj = B(k,j)
for ( ; pA < pA_end ; pA++)
{
//----------------------------------------------------------
// get A(i,k)
//----------------------------------------------------------
int64_t i = Ai [pA] ; // get A(i,k) index
//----------------------------------------------------------
// check M(i,j)
//----------------------------------------------------------
#if GB_MASK_IS_SPARSE_OR_HYPER
{
// M is sparse or hypersparse
int64_t pC = pC_start + i ;
int8_t cb = Cb [pC] ;
bool mij = ((cb & 2) != 0) ^ Mask_comp ;
if (!mij) continue ;
}
#elif GB_MASK_IS_BITMAP_OR_FULL
{
// M is bitmap or full
int64_t pC = pC_start + i ;
GB_GET_M_ij (pC) ;
mij = mij ^ Mask_comp ;
if (!mij) continue ;
}
#endif
//----------------------------------------------------------
// W<#M>(i) += A(i,k) * B(k,j)
//----------------------------------------------------------
#if GB_IS_ANY_PAIR_SEMIRING
{
Hf [i] = 1 ;
}
#else
{
GB_MULT_A_ik_B_kj ; // t = A(i,k)*B(k,j)
#if GB_C_IS_BITMAP
if (Hf [i] == 0)
{
// W(i) is a new entry
GB_HX_WRITE (i, t) ; // Hx(i) = t
Hf [i] = 1 ;
}
else
#endif
{
// W(i) is already present
GB_HX_UPDATE (i, t) ; // Hx(i) += t
}
}
#endif
}
}
}
//----------------------------------------------------------------------
// second phase: C<#M> += reduce (W)
//----------------------------------------------------------------------
#if GB_C_IS_BITMAP
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \
reduction(+:cnvals)
#else
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1)
#endif
for (tid = 0 ; tid < ntasks ; tid++)
{
//------------------------------------------------------------------
// determine the W and C for this fine task
//------------------------------------------------------------------
// The fine task operates on C(i1:i2,j) and W(i1:i2,w1:w2), where
// i1:i2 is defined by the fine task id. Its fine task id ranges
// from 0 to nfine_tasks_per_vector-1.
// w1:w2 are the updates to C(:,j), where w1:w2 =
// [j*nfine_tasks_per_vector : (j+1)*nfine_tasks_per_vector-1].
int64_t j = tid / nfine_tasks_per_vector ;
int fine_tid = tid % nfine_tasks_per_vector ;
int64_t istart, iend ;
GB_PARTITION (istart, iend, cvlen, fine_tid,
nfine_tasks_per_vector) ;
int64_t pC_start = j * cvlen ; // pointer to C(:,j)
int64_t wstart = j * nfine_tasks_per_vector ;
int64_t wend = (j + 1) * nfine_tasks_per_vector ;
#if GB_C_IS_BITMAP
int64_t task_cnvals = 0 ;
#endif
// Hx = (typecasted) Wcx workspace, use Wf as-is
#if ( !GB_IS_ANY_PAIR_SEMIRING )
GB_CTYPE *restrict Hx = ((GB_CTYPE *) Wcx) ;
#endif
#if GB_IS_PLUS_FC32_MONOID
float *restrict Hx_real = (float *) Hx ;
float *restrict Hx_imag = Hx_real + 1 ;
#elif GB_IS_PLUS_FC64_MONOID
double *restrict Hx_real = (double *) Hx ;
double *restrict Hx_imag = Hx_real + 1 ;
#endif
//------------------------------------------------------------------
// C<#M>(i1:i2,j) += reduce (W (i2:i2, wstart:wend))
//------------------------------------------------------------------
for (int64_t w = wstart ; w < wend ; w++)
{
//--------------------------------------------------------------
// C<#M>(i1:i2,j) += W (i1:i2,w)
//--------------------------------------------------------------
int64_t pW_start = w * cvlen ; // pointer to W (:,w)
for (int64_t i = istart ; i < iend ; i++)
{
//----------------------------------------------------------
// get pointer and bitmap C(i,j) and W(i,w)
//----------------------------------------------------------
int64_t pW = pW_start + i ; // pointer to W(i,w)
#if GB_C_IS_BITMAP
if (Wf [pW] == 0) continue ; // skip if not present
#endif
int64_t pC = pC_start + i ; // pointer to C(i,j)
#if GB_C_IS_BITMAP
int8_t cb = Cb [pC] ; // bitmap status of C(i,j)
#endif
//----------------------------------------------------------
// M(i,j) already checked, but adjust Cb if M is sparse
//----------------------------------------------------------
#if GB_MASK_IS_SPARSE_OR_HYPER
{
// M is sparse or hypersparse
cb = (cb & 1) ;
}
#endif
//----------------------------------------------------------
// C(i,j) += W (i,w)
//----------------------------------------------------------
#if GB_C_IS_BITMAP
if (cb == 0)
{
// C(i,j) = W(i,w)
GB_CIJ_GATHER (pC, pW) ;
Cb [pC] = keep ;
task_cnvals++ ;
}
else
#endif
{
// C(i,j) += W(i,w)
GB_CIJ_GATHER_UPDATE (pC, pW) ;
}
}
}
#if GB_C_IS_BITMAP
cnvals += task_cnvals ;
#endif
}
}
}
|
uni_dir_ctx.h | /*
* Copyright (c) 2018 Intel Corporation. All rights reserved.
* This software is available to you under the BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
static inline
void uni_bw_ctx(int len, perf_metrics_t *metric_info, int streaming_node)
{
double start = 0.0, end = 0.0;
unsigned long int i, j;
int dest = partner_node(metric_info);
static int check_once = 0;
if (!check_once) {
/* check to see whether sender and receiver are the same process */
if (dest == metric_info->my_node) {
fprintf(stderr, "Warning: Sender and receiver are the same "
"process (%d)\n", dest);
}
/* hostname validation for all sender and receiver processes */
int status = check_hostname_validation(metric_info);
if (status != 0) return;
check_once++;
}
shmem_barrier_all();
if (streaming_node) {
#pragma omp parallel default(none) firstprivate(len, dest) private(i, j) \
shared(metric_info, start, end) num_threads(metric_info->nthreads)
{
const int thread_id = omp_get_thread_num();
shmem_ctx_t ctx;
int err = shmem_ctx_create(SHMEM_CTX_PRIVATE, &ctx);
if (err) {
printf("PE %d, Thr. %d: Error, context creation failed\n",
metric_info->my_node, thread_id);
/* Exit with success to avoid test failures in automated testing */
shmem_global_exit(0);
}
for (i = 0; i < metric_info->warmup; i++) {
for (j = 0; j < metric_info->window_size; j++) {
#ifdef USE_NONBLOCKING_API
shmem_ctx_putmem_nbi(ctx, metric_info->dest + thread_id * len,
metric_info->src + thread_id * len, len, dest);
#else
shmem_ctx_putmem(ctx, metric_info->dest + thread_id * len,
metric_info->src + thread_id * len, len, dest);
#endif
}
shmem_ctx_quiet(ctx);
}
shmem_ctx_destroy(ctx);
}
}
shmem_barrier_all();
if (streaming_node) {
#pragma omp parallel default(none) firstprivate(len, dest) private(i, j) \
shared(metric_info, start, end) num_threads(metric_info->nthreads)
{
const int thread_id = omp_get_thread_num();
shmem_ctx_t ctx;
int err = shmem_ctx_create(SHMEM_CTX_PRIVATE, &ctx);
if (err) {
printf("PE %d, Thr. %d: Error, context creation failed\n",
metric_info->my_node, thread_id);
/* Exit with success to avoid test failures in automated testing */
shmem_global_exit(0);
}
#pragma omp barrier
#pragma omp master
{
start = perf_shmemx_wtime();
}
for (i = 0; i < metric_info->trials; i++) {
for (j = 0; j < metric_info->window_size; j++) {
#ifdef USE_NONBLOCKING_API
shmem_ctx_putmem_nbi(ctx, metric_info->dest + thread_id * len,
metric_info->src + thread_id * len, len, dest);
#else
shmem_ctx_putmem(ctx, metric_info->dest + thread_id * len,
metric_info->src + thread_id * len, len, dest);
#endif
}
shmem_ctx_quiet(ctx);
}
shmem_ctx_destroy(ctx);
}
}
shmem_barrier_all();
if (streaming_node) {
end = perf_shmemx_wtime();
calc_and_print_results(end, start, len, metric_info);
}
shmem_barrier_all();
}
|
nesting-1.c | void
f_acc_data (void)
{
#pragma acc data
{
int i;
#pragma omp atomic write
i = 0;
}
}
void
f_acc_kernels (void)
{
#pragma acc kernels
{
int i;
#pragma omp atomic write
i = 0;
}
}
#pragma acc routine vector
void
f_acc_loop (void)
{
int i;
#pragma acc loop
for (i = 0; i < 2; ++i)
{
#pragma omp atomic write
i = 0;
}
}
void
f_acc_parallel (void)
{
#pragma acc parallel
{
int i;
#pragma omp atomic write
i = 0;
}
}
|
raytracing.h | #ifndef RAYTRACING_H
#define RAYTRACING_H
#include "scene.h"
#include <omp.h>
#define MAX_MANFANDEP 2
#define MAX_TOTALDEP 10
class raytracer
{
public:
scene nowscene;
color calmanfanshe(object* rightobj,zhuang info,int &nowhash)
{
color nowcolor;
if (rightobj->mt->background)
nowcolor=rightobj->mt->mcolor*rightobj->getcolor(info);
else
nowcolor=rightobj->mt->mcolor;
color ret=nowcolor*nowscene.background*rightobj->mt->pmanfan;
for (light*rightlight=nowscene.nowlight;rightlight;rightlight=rightlight->next)
{
double shade=rightlight->calshade(info.wz,nowscene.nowobj,nowscene.nowcamera->shadecttime);
if (shade<EPS) continue;
Vec3f R=rightlight->wz-info.wz;
R.Normalize();
double dot=R.dot(info.N);
if (dot>EPS)
{
if (nowhash && rightlight->type==0)
{
nowhash = (nowhash+rightlight->bianhao) ;
}
if (rightobj->mt->pmanfan>EPS)
{
double manfan=rightobj->mt->pmanfan*dot*shade;
ret = ret+nowcolor*rightlight->nowcolor*manfan;
}
if (rightobj->mt->pjingmianmanfan>EPS)
{
double jingmianmanfan=rightobj->mt->pjingmianmanfan*pow(dot,20)*shade;
ret = ret+nowcolor*rightlight->nowcolor*jingmianmanfan;
}
}
}
return ret;
}
color calfanshe(object* rightobj,zhuang info,ray inray,int dep,int &nowhash)
{
color nowcolor=rightobj->mt->mcolor;
inray.direct=inray.direct.reflect(info.N);
inray.start=info.wz;
inray.direct.Normalize();
if (rightobj->mt->rjingmianmanfan<EPS || dep>MAX_MANFANDEP)
return raytracing(inray,dep+1,nowhash)*nowcolor*rightobj->mt->pfan;
Vec3f dx(1,0,0);
dx=inray.direct*dx;
if (dx.iszero()) dx=Vec3f(1,0,0);
Vec3f dy=inray.direct*dx;
color ret(0,0,0);
int totaltime=nowscene.nowcamera->jingmianmanfancttime*16;
for (int i=0;i<totaltime;i++)
{
double fx=1,fy=1;
while (sqr(fx)+sqr(fy)>1)
{
fx=rand_at(-1,1);
fy=rand_at(-1,1);
}
fx *= rightobj->mt->rjingmianmanfan;
fy *= rightobj->mt->rjingmianmanfan;
ray newray=inray;
newray.direct=newray.direct+dx*fx+dy*fy;
int newhash=0;
ret =ret+ raytracing(newray,dep+MAX_MANFANDEP,newhash);
}
ret = ret *nowcolor*rightobj->mt->pfan/totaltime;
return ret;
}
color calzheshe(object* rightobj,zhuang info,ray inray,int dep,int &nowhash)
{
double n=rightobj->mt->zheshelv;
if (info.isfront) n=1/n;
inray.direct.Normalize();
inray.direct=inray.direct.zheshe(info.N,n);
inray.start=info.wz;
color rcol=raytracing(inray,dep+1,nowhash);
color result=rcol*rightobj->mt->pzhe;
//cout<<dep<<' '<<info.isfront<<' '; result.print();
if (info.isfront) return rcol*rightobj->mt->pzhe;
color absor=rightobj->mt->acolor*info.dist;
absor.r=exp(-absor.r);
absor.g=exp(-absor.g);
absor.b=exp(-absor.b);
result=result*absor;
//cout<<dep<<' '; result.print();
return result;
}
color raytracing(ray nowray,int dep,int &nowhash)
{
if (dep>MAX_TOTALDEP) return color(0,0,0);
color ret;
zhuang infoobj; double infolight;
//cout<<"rc1"<<endl;
object* nearobj=nowscene.findnearobj(nowray,infoobj);
light* nearlight=nowscene.findnearlight(nowray,infolight);
if (nearlight && (!nearobj || infolight<infoobj.dist))
{
nowhash=(nowhash+nearlight->bianhao) ;
ret = ret+nearlight->nowcolor;
}
if (nearobj)
{
nowhash=(nowhash+nearobj->bianhao) ;
if (nearobj->mt->pmanfan>EPS || nearobj->mt->pjingmianmanfan>EPS)
ret = ret+calmanfanshe(nearobj,infoobj,nowhash);
if (nearobj->mt->pfan>EPS)
ret = ret+calfanshe(nearobj,infoobj,nowray,dep,nowhash);
if (nearobj->mt->pzhe>EPS)
ret = ret+calzheshe(nearobj,infoobj,nowray,dep,nowhash);
}
if (!nearlight && !nearobj)
ret=nowscene.background;
nowhash=nowhash*5;
ret.Repaircolor();
//cout<<"rc2"<<endl;
return ret;
}
void run()
{
int H=nowscene.nowcamera->height;
int W=nowscene.nowcamera->width;
int *hashzhi = new int[H*W];
for (int i=0;i<H*W;i++) hashzhi[i]=0;
int js=1;
#pragma omp parallel for schedule(dynamic)
for (int i=0;i<H;i++)
{
cout<<omp_get_num_threads()<<endl;
//if (i>15)
safeprint(i,H);
Vec3f csfx;
ray csray;
color thiscolor;
for (int j=0;j<W;j++)
{
if (!nowscene.nowcamera->focuson)
{
csfx=nowscene.nowcamera->shechu(i,j);
csray.start=nowscene.nowcamera->wz;
csray.direct=csfx;
csray.direct.Normalize();
thiscolor=raytracing(csray,1,hashzhi[i*W+j]);
nowscene.nowcamera->nowimage->drawpixel(i,j,thiscolor);
}
else
{
thiscolor=color(0,0,0);
for (int is=0;is<nowscene.nowcamera->focustime;is++)
{
csray=nowscene.nowcamera->focusshechu(i,j);
thiscolor=thiscolor+raytracing(csray,1,hashzhi[i*W+j]);
}
thiscolor = thiscolor / nowscene.nowcamera->focustime;
nowscene.nowcamera->nowimage->drawpixel(i,j,thiscolor);
}
}
}
#pragma omp parallel for schedule(dynamic)
for (int i=0;i<H;i++)
if (!nowscene.nowcamera->focuson)
{
safeprint(i,H);
for (int j=0;j<W;j++)
{
bool havejuchi=false;
ray csray;
Vec3f csfx;
int nowhash;
double newi,newj;
color thiscolor(0,0,0);
if (j!=W-1 && hashzhi[i*W+j]!=hashzhi[i*W+(j+1)]) havejuchi=true;
if (j!=0 && hashzhi[i*W+j]!=hashzhi[i*W+(j-1)]) havejuchi=true;
if (i!=H-1 && hashzhi[i*W+j]!=hashzhi[(i+1)*W+j]) havejuchi=true;
if (i!=0 && hashzhi[i*W+j]!=hashzhi[(i-1)*W+j]) havejuchi=true;
if (havejuchi)
{
//cout<<i<<' '<<j<<endl;
for (double ix=-0.25;ix<=0.25;ix=ix+0.25)
for (double iy=-0.25;iy<=0.25;iy=iy+0.25)
{
newi=i+ix/2; newj=j+iy/2;
nowhash=0;
csfx=nowscene.nowcamera->shechu(newi,newj);
csray.start=nowscene.nowcamera->wz;
csray.direct=csfx;
csray.direct.Normalize();
thiscolor=thiscolor+raytracing(csray,1,nowhash)/9;
}
thiscolor.Repaircolor();
nowscene.nowcamera->nowimage->drawpixel(i,j,thiscolor);
}
}
}
delete[]hashzhi;
}
};
#endif // RAYTRACING_H
|
nco_var_rth.c | /* $Header$ */
/* Purpose: Variable arithmetic */
/* Copyright (C) 1995--present Charlie Zender
This file is part of NCO, the netCDF Operators. NCO is free software.
You may redistribute and/or modify NCO under the terms of the
3-Clause BSD License with exceptions described in the LICENSE file */
#include "nco_var_rth.h" /* Variable arithmetic */
void
nco_var_abs /* [fnc] Replace op1 values by their absolute values */
(const nc_type type, /* I [enm] netCDF type of operand */
const long sz, /* I [nbr] Size (in elements) of operand */
const int has_mss_val, /* I [flg] Flag for missing values */
ptr_unn mss_val, /* I [val] Value of missing value */
ptr_unn op1) /* I/O [val] Values of first operand */
{
/* Threads: Routine is thread safe and calls no unsafe routines */
/* Purpose: Replace op1 values by their absolute values */
/* Absolute value is currently defined as op1:=abs(op1) */
/* NB: Many compilers need to #include "nco_rth_flt.h" for fabsf() prototype */
long idx;
/* Typecast pointer to values before access */
(void)cast_void_nctype(type,&op1);
if(has_mss_val) (void)cast_void_nctype(type,&mss_val);
switch(type){
case NC_FLOAT:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) op1.fp[idx]=fabsf(op1.fp[idx]);
}else{
const float mss_val_flt=*mss_val.fp;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if(op1.fp[idx] != mss_val_flt) op1.fp[idx]=fabsf(op1.fp[idx]);
} /* end for */
} /* end else */
break;
case NC_DOUBLE:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) op1.dp[idx]=fabs(op1.dp[idx]);
}else{
const double mss_val_dbl=*mss_val.dp;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if(op1.dp[idx] != mss_val_dbl) op1.dp[idx]=fabs(op1.dp[idx]);
} /* end for */
} /* end else */
break;
case NC_INT:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) op1.ip[idx]=labs(op1.ip[idx]); /* int abs(int), long labs(long) */
}else{
const nco_int mss_val_ntg=*mss_val.ip;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if(op1.ip[idx] != mss_val_ntg) op1.ip[idx]=labs(op1.ip[idx]); /* int abs(int), long labs(long) */
} /* end for */
} /* end else */
break;
case NC_SHORT:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(op1.sp[idx] < 0) op1.sp[idx]=-op1.sp[idx] ;
}else{
const nco_short mss_val_short=*mss_val.sp;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if(op1.sp[idx] != mss_val_short && op1.sp[idx] < 0) op1.sp[idx]=-op1.sp[idx];
} /* end for */
} /* end else */
break;
case NC_BYTE:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(op1.bp[idx] < 0) op1.bp[idx]=-op1.bp[idx] ;
}else{
const nco_byte mss_val_byte=*mss_val.bp;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if(op1.bp[idx] != mss_val_byte && op1.bp[idx] < 0) op1.bp[idx]=-op1.bp[idx];
} /* end for */
} /* end else */
case NC_UBYTE: break; /* Do nothing */
case NC_USHORT: break; /* Do nothing */
case NC_UINT: break; /* Do nothing */
case NC_INT64:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) op1.i64p[idx]=llabs(op1.i64p[idx]);
}else{
const nco_int64 mss_val_int64=*mss_val.i64p;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if(op1.i64p[idx] != mss_val_int64) op1.i64p[idx]=llabs(op1.i64p[idx]);
} /* end for */
} /* end else */
break;
case NC_UINT64: break; /* Do nothing */
case NC_CHAR: break; /* Do nothing */
case NC_STRING: break; /* Do nothing */
default: nco_dfl_case_nc_type_err(); break;
} /* end switch */
/* NB: it is not neccessary to un-typecast pointers to values after access
because we have only operated on local copies of them. */
} /* end nco_var_abs() */
void
nco_var_add /* [fnc] Add first operand to second operand */
(const nc_type type, /* I [enm] netCDF type of operands */
const long sz, /* I [nbr] Size (in elements) of operands */
const int has_mss_val, /* I [flg] Flag for missing values */
ptr_unn mss_val, /* I [flg] Value of missing value */
ptr_unn op1, /* I [val] Values of first operand */
ptr_unn op2) /* I/O [val] Values of second operand on input, values of sum on output */
{
/* Purpose: Add value of first operand to value of second operand
and store result in second operand.
Assume operands conform, are same type, and are in memory
nco_var_add() does _not_ increment tally counter
nco_var_add_tll_ncra() does increment tally counter */
/* Addition is currently defined as op2:=op1+op2 where op1 != mss_val and op2 != mss_val */
long idx;
/* Typecast pointer to values before access */
(void)cast_void_nctype(type,&op1);
(void)cast_void_nctype(type,&op2);
if(has_mss_val) (void)cast_void_nctype(type,&mss_val);
switch(type){
case NC_FLOAT:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) op2.fp[idx]+=op1.fp[idx];
}else{
const float mss_val_flt=*mss_val.fp;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if((op2.fp[idx] != mss_val_flt) && (op1.fp[idx] != mss_val_flt)) op2.fp[idx]+=op1.fp[idx]; else op2.fp[idx]=mss_val_flt;
} /* end for */
} /* end else */
break;
case NC_DOUBLE:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) op2.dp[idx]+=op1.dp[idx];
}else{
const double mss_val_dbl=*mss_val.dp;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if((op2.dp[idx] != mss_val_dbl) && (op1.dp[idx] != mss_val_dbl)) op2.dp[idx]+=op1.dp[idx]; else op2.dp[idx]=mss_val_dbl;
} /* end for */
} /* end else */
break;
case NC_INT:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) op2.ip[idx]+=op1.ip[idx];
}else{
const nco_int mss_val_ntg=*mss_val.ip;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if((op2.ip[idx] != mss_val_ntg) && (op1.ip[idx] != mss_val_ntg)) op2.ip[idx]+=op1.ip[idx]; else op2.ip[idx]=mss_val_ntg;
} /* end for */
} /* end else */
break;
case NC_SHORT:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) op2.sp[idx]+=op1.sp[idx];
}else{
const nco_short mss_val_short=*mss_val.sp;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if((op2.sp[idx] != mss_val_short) && (op1.sp[idx] != mss_val_short)) op2.sp[idx]+=op1.sp[idx]; else op2.sp[idx]=mss_val_short;
} /* end for */
} /* end else */
break;
case NC_USHORT:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) op2.usp[idx]+=op1.usp[idx];
}else{
const nco_ushort mss_val_ushort=*mss_val.usp;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if((op2.usp[idx] != mss_val_ushort) && (op1.usp[idx] != mss_val_ushort)) op2.usp[idx]+=op1.usp[idx]; else op2.usp[idx]=mss_val_ushort;
} /* end for */
} /* end else */
break;
case NC_UINT:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) op2.uip[idx]+=op1.uip[idx];
}else{
const nco_uint mss_val_uint=*mss_val.uip;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if((op2.uip[idx] != mss_val_uint) && (op1.uip[idx] != mss_val_uint)) op2.uip[idx]+=op1.uip[idx]; else op2.uip[idx]=mss_val_uint;
} /* end for */
} /* end else */
break;
case NC_INT64:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) op2.i64p[idx]+=op1.i64p[idx];
}else{
const nco_int64 mss_val_int64=*mss_val.i64p;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if((op2.i64p[idx] != mss_val_int64) && (op1.i64p[idx] != mss_val_int64)) op2.i64p[idx]+=op1.i64p[idx]; else op2.i64p[idx]=mss_val_int64;
} /* end for */
} /* end else */
break;
case NC_UINT64:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) op2.ui64p[idx]+=op1.ui64p[idx];
}else{
const nco_uint64 mss_val_uint64=*mss_val.ui64p;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if((op2.ui64p[idx] != mss_val_uint64) && (op1.ui64p[idx] != mss_val_uint64)) op2.ui64p[idx]+=op1.ui64p[idx]; else op2.ui64p[idx]=mss_val_uint64;
} /* end for */
} /* end else */
break;
case NC_BYTE:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) op2.bp[idx]+=op1.bp[idx];
}else{
const nco_byte mss_val_byte=*mss_val.bp;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if((op2.bp[idx] != mss_val_byte) && (op1.bp[idx] != mss_val_byte)) op2.bp[idx]+=op1.bp[idx]; else op2.bp[idx]=mss_val_byte;
} /* end for */
} /* end else */
break;
case NC_UBYTE:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) op2.ubp[idx]+=op1.ubp[idx];
}else{
const nco_ubyte mss_val_ubyte=*mss_val.ubp;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if((op2.ubp[idx] != mss_val_ubyte) && (op1.ubp[idx] != mss_val_ubyte)) op2.ubp[idx]+=op1.ubp[idx]; else op2.ubp[idx]=mss_val_ubyte;
} /* end for */
} /* end else */
break;
case NC_CHAR: break; /* Do nothing */
case NC_STRING: break; /* Do nothing */
default: nco_dfl_case_nc_type_err(); break;
} /* end switch */
/* NB: it is not neccessary to un-typecast pointers to values after access
because we have only operated on local copies of them. */
} /* end nco_var_add() */
void
nco_var_add_tll_ncflint /* [fnc] Add first operand to second operand, increment tally */
(const nc_type type, /* I [enm] netCDF type of operands */
const long sz, /* I [nbr] Size (in elements) of operands */
const int has_mss_val, /* I [flg] Flag for missing values */
ptr_unn mss_val, /* I [flg] Value of missing value */
long * restrict const tally, /* I/O [nbr] Counter space */
ptr_unn op1, /* I [val] Values of first operand */
ptr_unn op2) /* I/O [val] Values of second operand on input, values of sum on output */
{
/* Purpose: Add value of first operand to value of second operand
and store result in second operand.
Assume operands conform, are same type, and are in memory
nco_var_add() does _not_ increment tally counter
nco_var_add_tll_ncflint() does increment tally counter */
/* Addition is currently defined as op2:=op1+op2 where op1 != mss_val and op2 != mss_val */
long idx;
/* Typecast pointer to values before access */
(void)cast_void_nctype(type,&op1);
(void)cast_void_nctype(type,&op2);
if(has_mss_val) (void)cast_void_nctype(type,&mss_val);
/* Return missing_value where either or both input values are missing
Algorithm used since 20040603
NB: Tally is incremented but not used */
switch(type){
case NC_FLOAT:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
op2.fp[idx]+=op1.fp[idx];
tally[idx]++;
} /* end for */
}else{
const float mss_val_flt=*mss_val.fp;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if((op2.fp[idx] != mss_val_flt) && (op1.fp[idx] != mss_val_flt)){
op2.fp[idx]+=op1.fp[idx];
tally[idx]++;
}else{
op2.fp[idx]=mss_val_flt;
}/* end else */
} /* end for */
} /* end else */
break;
case NC_DOUBLE:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
op2.dp[idx]+=op1.dp[idx];
tally[idx]++;
} /* end for */
}else{
const double mss_val_dbl=*mss_val.dp;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if((op2.dp[idx] != mss_val_dbl) && (op1.dp[idx] != mss_val_dbl)){
op2.dp[idx]+=op1.dp[idx];
tally[idx]++;
}else{
op2.dp[idx]=mss_val_dbl;
} /* end else */
} /* end for */
} /* end else */
break;
case NC_INT:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
op2.ip[idx]+=op1.ip[idx];
tally[idx]++;
} /* end for */
}else{
const nco_int mss_val_ntg=*mss_val.ip;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if((op2.ip[idx] != mss_val_ntg) && (op1.ip[idx] != mss_val_ntg)){
op2.ip[idx]+=op1.ip[idx];
tally[idx]++;
}else{
op2.ip[idx]=mss_val_ntg;
} /* end else */
} /* end for */
} /* end else */
break;
case NC_SHORT:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
op2.sp[idx]+=op1.sp[idx];
tally[idx]++;
} /* end for */
}else{
const nco_short mss_val_short=*mss_val.sp;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if((op2.sp[idx] != mss_val_short) && (op1.sp[idx] != mss_val_short)){
op2.sp[idx]+=op1.sp[idx];
tally[idx]++;
}else{
op2.sp[idx]=mss_val_short;
} /* end else */
} /* end for */
} /* end else */
break;
case NC_USHORT:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
op2.usp[idx]+=op1.usp[idx];
tally[idx]++;
} /* end for */
}else{
const nco_ushort mss_val_ushort=*mss_val.usp;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if((op2.usp[idx] != mss_val_ushort) && (op1.usp[idx] != mss_val_ushort)){
op2.usp[idx]+=op1.usp[idx];
tally[idx]++;
}else{
op2.usp[idx]=mss_val_ushort;
} /* end else */
} /* end for */
} /* end else */
break;
case NC_UINT:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
op2.uip[idx]+=op1.uip[idx];
tally[idx]++;
} /* end for */
}else{
const nco_uint mss_val_uint=*mss_val.uip;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if((op2.uip[idx] != mss_val_uint) && (op1.uip[idx] != mss_val_uint)){
op2.uip[idx]+=op1.uip[idx];
tally[idx]++;
}else{
op2.uip[idx]=mss_val_uint;
} /* end else */
} /* end for */
} /* end else */
break;
case NC_INT64:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
op2.i64p[idx]+=op1.i64p[idx];
tally[idx]++;
} /* end for */
}else{
const nco_int64 mss_val_int64=*mss_val.i64p;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if((op2.i64p[idx] != mss_val_int64) && (op1.i64p[idx] != mss_val_int64)){
op2.i64p[idx]+=op1.i64p[idx];
tally[idx]++;
}else{
op2.i64p[idx]=mss_val_int64;
} /* end else */
} /* end for */
} /* end else */
break;
case NC_UINT64:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
op2.ui64p[idx]+=op1.ui64p[idx];
tally[idx]++;
} /* end for */
}else{
const nco_uint64 mss_val_uint64=*mss_val.ui64p;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if((op2.ui64p[idx] != mss_val_uint64) && (op1.ui64p[idx] != mss_val_uint64)){
op2.ui64p[idx]+=op1.ui64p[idx];
tally[idx]++;
}else{
op2.ui64p[idx]=mss_val_uint64;
} /* end else */
} /* end for */
} /* end else */
break;
case NC_BYTE:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
op2.bp[idx]+=op1.bp[idx];
tally[idx]++;
} /* end for */
}else{
const nco_byte mss_val_byte=*mss_val.bp;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if((op2.bp[idx] != mss_val_byte) && (op1.bp[idx] != mss_val_byte)){
op2.bp[idx]+=op1.bp[idx];
tally[idx]++;
}else{
op2.bp[idx]=mss_val_byte;
} /* end else */
} /* end for */
} /* end else */
break;
case NC_UBYTE:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
op2.ubp[idx]+=op1.ubp[idx];
tally[idx]++;
} /* end for */
}else{
const nco_ubyte mss_val_ubyte=*mss_val.ubp;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if((op2.ubp[idx] != mss_val_ubyte) && (op1.ubp[idx] != mss_val_ubyte)){
op2.ubp[idx]+=op1.ubp[idx];
tally[idx]++;
}else{
op2.ubp[idx]=mss_val_ubyte;
} /* end else */
} /* end for */
} /* end else */
break;
case NC_CHAR: break; /* Do nothing */
case NC_STRING: break; /* Do nothing */
default: nco_dfl_case_nc_type_err(); break;
} /* end switch */
/* Used this block of code until 20040603.
It keeps track of tally but does not do anything with it later */
#if 0
switch(type){
case NC_FLOAT:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
op2.fp[idx]+=op1.fp[idx];
tally[idx]++;
} /* end for */
}else{
const float mss_val_flt=*mss_val.fp;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if((op2.fp[idx] != mss_val_flt) && (op1.fp[idx] != mss_val_flt)){
op2.fp[idx]+=op1.fp[idx];
tally[idx]++;
} /* end if */
} /* end for */
} /* end else */
break;
case NC_DOUBLE:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
op2.dp[idx]+=op1.dp[idx];
tally[idx]++;
} /* end for */
}else{
const double mss_val_dbl=*mss_val.dp;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if((op2.dp[idx] != mss_val_dbl) && (op1.dp[idx] != mss_val_dbl)){
op2.dp[idx]+=op1.dp[idx];
tally[idx]++;
} /* end if */
} /* end for */
} /* end else */
break;
case NC_INT:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
op2.ip[idx]+=op1.ip[idx];
tally[idx]++;
} /* end for */
}else{
const nco_int mss_val_ntg=*mss_val.ip;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if((op2.ip[idx] != mss_val_ntg) && (op1.ip[idx] != mss_val_ntg)){
op2.ip[idx]+=op1.ip[idx];
tally[idx]++;
} /* end if */
} /* end for */
} /* end else */
break;
case NC_SHORT:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
op2.sp[idx]+=op1.sp[idx];
tally[idx]++;
} /* end for */
}else{
const nco_short mss_val_short=*mss_val.sp;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if((op2.sp[idx] != mss_val_short) && (op1.sp[idx] != mss_val_short)){
op2.sp[idx]+=op1.sp[idx];
tally[idx]++;
} /* end if */
} /* end for */
} /* end else */
break;
case NC_USHORT:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
op2.usp[idx]+=op1.usp[idx];
tally[idx]++;
} /* end for */
}else{
const nco_ushort mss_val_ushort=*mss_val.usp;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if((op2.usp[idx] != mss_val_ushort) && (op1.usp[idx] != mss_val_ushort)){
op2.usp[idx]+=op1.usp[idx];
tally[idx]++;
} /* end if */
} /* end for */
} /* end else */
break;
case NC_UINT:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
op2.uip[idx]+=op1.uip[idx];
tally[idx]++;
} /* end for */
}else{
const nco_uint mss_val_uint=*mss_val.uip;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if((op2.uip[idx] != mss_val_uint) && (op1.uip[idx] != mss_val_uint)){
op2.uip[idx]+=op1.uip[idx];
tally[idx]++;
} /* end if */
} /* end for */
} /* end else */
break;
case NC_INT64:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
op2.i64p[idx]+=op1.i64p[idx];
tally[idx]++;
} /* end for */
}else{
const nco_int64 mss_val_int64=*mss_val.i64p;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if((op2.i64p[idx] != mss_val_int64) && (op1.i64p[idx] != mss_val_int64)){
op2.i64p[idx]+=op1.i64p[idx];
tally[idx]++;
} /* end if */
} /* end for */
} /* end else */
break;
case NC_UINT64:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
op2.ui64p[idx]+=op1.ui64p[idx];
tally[idx]++;
} /* end for */
}else{
const nco_uint64 mss_val_uint64=*mss_val.ui64p;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if((op2.ui64p[idx] != mss_val_uint64) && (op1.ui64p[idx] != mss_val_uint64)){
op2.ui64p[idx]+=op1.ui64p[idx];
tally[idx]++;
} /* end if */
} /* end for */
} /* end else */
break;
case NC_BYTE:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
op2.bp[idx]+=op1.bp[idx];
tally[idx]++;
} /* end for */
}else{
const nco_byte mss_val_byte=*mss_val.bp;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if((op2.bp[idx] != mss_val_byte) && (op1.bp[idx] != mss_val_byte)){
op2.bp[idx]+=op1.bp[idx];
tally[idx]++;
} /* end if */
} /* end for */
} /* end else */
break;
case NC_UBYTE:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
op2.ubp[idx]+=op1.ubp[idx];
tally[idx]++;
} /* end for */
}else{
const nco_ubyte mss_val_ubyte=*mss_val.ubp;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if((op2.ubp[idx] != mss_val_ubyte) && (op1.ubp[idx] != mss_val_ubyte)){
op2.ubp[idx]+=op1.ubp[idx];
tally[idx]++;
} /* end if */
} /* end for */
} /* end else */
break;
case NC_CHAR: break; /* Do nothing */
case NC_STRING: break; /* Do nothing */
default: nco_dfl_case_nc_type_err(); break;
} /* end switch */
#endif /* endif 0 */
/* NB: it is not neccessary to un-typecast pointers to values after access
because we have only operated on local copies of them. */
} /* end nco_var_add_tll_ncflint() */
void
nco_var_add_tll_ncra /* [fnc] Add first operand to second operand, increment tally */
(const nc_type type, /* I [enm] netCDF type of operands */
const long sz, /* I [nbr] Size (in elements) of operands */
const int has_mss_val, /* I [flg] Flag for missing values */
ptr_unn mss_val, /* I [flg] Value of missing value */
long * restrict const tally, /* I/O [nbr] Counter space */
const double wgt_crr, /* I [frc] Weight of current record (ncra/ncea only) */
double * restrict const wgt_sum, /* I/O [frc] Running sum of per-file weights (ncra/ncea only) */
ptr_unn op1, /* I [val] Values of first operand */
ptr_unn op2) /* I/O [val] Values of second operand (running sum) on input, values of new sum on output */
{
/* Purpose: Add value of first operand to value of second operand
and store result in second operand.
Assume operands conform, are same type, and are in memory
nco_var_add() does _not_ increment tally counter.
nco_var_add_tll_ncflint() adds if neither operand equals missing value
nco_var_add_tll_ncflint() does increment tally counter (unlike nco_var_add())
nco_var_add_tll_ncra() adds if op1 does not equal missing value
nco_var_add_tll_ncra() does increment tally counter (like nco_var_add_tll_ncflint())
nco_var_add_tll_ncra() is designed to:
1. Work for "running average" algorithms only
2. Assume running sum is valid and is stored in op2
3. Assume new record is stored in op1
4. Check only if new record (not running sum) equals missing_value
Note that missing_value is associated with op1, i.e., new record, not running sum
5. Accumulate running sum only if new record is valid
6. Increment tally
Difference between nco_var_add_tll_ncra() and nco_var_add_tll_ncflint() is that
nco_var_add_tll_ncflint() checks both operands against the missing_value, whereas
nco_var_add_tll_ncra() only checks first operand (new record) against missing_value
nco_var_add_tll_ncflint() algorithm fails as running average algorithm when
missing value is zero because running sum is bootstrapped to zero and this
causes comparison to missing_value to always be true.
nco_var_add_tll_ncflint() also fails as running average algorithm whenever
running sum happens to equal missing_value (regardless if missing value is zero).
NCO uses nco_var_add_tll_ncflint() only for ncflint
NCO uses nco_var_add_tll_ncra() only for ncra/nces */
/* Addition is currently defined as op2:=op1+op2 where op1 != mss_val */
long idx;
/* Typecast pointer to values before access */
(void)cast_void_nctype(type,&op1);
(void)cast_void_nctype(type,&op2);
if(has_mss_val) (void)cast_void_nctype(type,&mss_val);
switch(type){
case NC_FLOAT:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
op2.fp[idx]+=op1.fp[idx];
tally[idx]++;
} /* end for */
}else{
const float mss_val_flt=*mss_val.fp;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if(op1.fp[idx] != mss_val_flt){
op2.fp[idx]+=op1.fp[idx];
if(wgt_sum) wgt_sum[idx]+=wgt_crr;
tally[idx]++;
} /* end if */
} /* end for */
} /* end else */
break;
case NC_DOUBLE:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
op2.dp[idx]+=op1.dp[idx];
tally[idx]++;
} /* end for */
}else{
const double mss_val_dbl=*mss_val.dp;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if(op1.dp[idx] != mss_val_dbl){
op2.dp[idx]+=op1.dp[idx];
if(wgt_sum) wgt_sum[idx]+=wgt_crr;
tally[idx]++;
} /* end if */
} /* end for */
} /* end else */
break;
case NC_INT:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
op2.ip[idx]+=op1.ip[idx];
tally[idx]++;
} /* end for */
}else{
const nco_int mss_val_ntg=*mss_val.ip;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if(op1.ip[idx] != mss_val_ntg){
op2.ip[idx]+=op1.ip[idx];
if(wgt_sum) wgt_sum[idx]+=wgt_crr;
tally[idx]++;
} /* end if */
} /* end for */
} /* end else */
break;
case NC_SHORT:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
op2.sp[idx]+=op1.sp[idx];
tally[idx]++;
} /* end for */
}else{
const nco_short mss_val_short=*mss_val.sp;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if(op1.sp[idx] != mss_val_short){
op2.sp[idx]+=op1.sp[idx];
if(wgt_sum) wgt_sum[idx]+=wgt_crr;
tally[idx]++;
} /* end if */
} /* end for */
} /* end else */
break;
case NC_USHORT:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
op2.usp[idx]+=op1.usp[idx];
tally[idx]++;
} /* end for */
}else{
const nco_ushort mss_val_ushort=*mss_val.usp;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if(op1.usp[idx] != mss_val_ushort){
op2.usp[idx]+=op1.usp[idx];
if(wgt_sum) wgt_sum[idx]+=wgt_crr;
tally[idx]++;
} /* end if */
} /* end for */
} /* end else */
break;
case NC_UINT:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
op2.uip[idx]+=op1.uip[idx];
tally[idx]++;
} /* end for */
}else{
const nco_uint mss_val_uint=*mss_val.uip;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if(op1.uip[idx] != mss_val_uint){
op2.uip[idx]+=op1.uip[idx];
if(wgt_sum) wgt_sum[idx]+=wgt_crr;
tally[idx]++;
} /* end if */
} /* end for */
} /* end else */
break;
case NC_INT64:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
op2.i64p[idx]+=op1.i64p[idx];
tally[idx]++;
} /* end for */
}else{
const nco_int64 mss_val_int64=*mss_val.i64p;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if(op1.i64p[idx] != mss_val_int64){
op2.i64p[idx]+=op1.i64p[idx];
if(wgt_sum) wgt_sum[idx]+=wgt_crr;
tally[idx]++;
} /* end if */
} /* end for */
} /* end else */
break;
case NC_UINT64:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
op2.ui64p[idx]+=op1.ui64p[idx];
tally[idx]++;
} /* end for */
}else{
const nco_uint64 mss_val_uint64=*mss_val.ui64p;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if(op1.ui64p[idx] != mss_val_uint64){
op2.ui64p[idx]+=op1.ui64p[idx];
if(wgt_sum) wgt_sum[idx]+=wgt_crr;
tally[idx]++;
} /* end if */
} /* end for */
} /* end else */
break;
case NC_BYTE:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
op2.bp[idx]+=op1.bp[idx];
tally[idx]++;
} /* end for */
}else{
const nco_byte mss_val_byte=*mss_val.bp;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if(op1.bp[idx] != mss_val_byte){
op2.bp[idx]+=op1.bp[idx];
if(wgt_sum) wgt_sum[idx]+=wgt_crr;
tally[idx]++;
} /* end if */
} /* end for */
} /* end else */
break;
case NC_UBYTE:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
op2.ubp[idx]+=op1.ubp[idx];
tally[idx]++;
} /* end for */
}else{
const nco_ubyte mss_val_ubyte=*mss_val.ubp;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if(op1.ubp[idx] != mss_val_ubyte){
op2.ubp[idx]+=op1.ubp[idx];
if(wgt_sum) wgt_sum[idx]+=wgt_crr;
tally[idx]++;
} /* end if */
} /* end for */
} /* end else */
break;
case NC_CHAR: break; /* Do nothing */
case NC_STRING: break; /* Do nothing */
default: nco_dfl_case_nc_type_err(); break;
} /* end switch */
/* NB: it is not neccessary to un-typecast pointers to values after access
because we have only operated on local copies of them. */
} /* end nco_var_add_tll_ncra() */
void
nco_var_copy_tll /* [fnc] Copy hyperslab variables of type var_typ from op1 to op2, accounting for missing values in tally */
(const nc_type type, /* I [enm] netCDF type */
const long sz, /* I [nbr] Number of elements to copy */
const int has_mss_val, /* I [flg] Flag for missing values */
ptr_unn mss_val, /* I [val] Value of missing value */
long * restrict const tally, /* O [nbr] Counter space */
const ptr_unn op1, /* I [sct] Values to copy */
ptr_unn op2) /* O [sct] Destination to copy values to */
{
/* Purpose: Copy hyperslab variables of type var_typ from op1 to op2
Assumes memory area in op2 has already been malloc()'d
Where the value copied is not equal to the missing value, set the tally to one
nco_var_copy(): Does nothing with missing values and tallies
nco_var_copy_tll(): Accounts for missing values in tally */
/* Algorithm is currently defined as: op2:=op1 */
long idx;
/* Use fast nco_var_copy() method to copy variable */
(void)memcpy((void *)(op2.vp),(void *)(op1.vp),sz*nco_typ_lng(type));
if(has_mss_val){
/* Tally remains zero until verified (below) that datum is not missing value */
(void)nco_set_long(sz,0L,tally);
}else{ /* !has_mss_val */
/* Tally is one if no missing value is defined */
(void)nco_set_long(sz,1L,tally);
return;
} /* !has_mss_val */
/* Typecast pointer to values before access */
(void)cast_void_nctype(type,&op2);
if(has_mss_val) (void)cast_void_nctype(type,&mss_val);
/* Overwrite one's with zero's where value equals missing value */
switch(type){
case NC_FLOAT:
{
const float mss_val_flt=*mss_val.fp;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(op2.fp[idx] == mss_val_flt) op2.fp[idx]=0.0f; else tally[idx]=1L;
}
break;
case NC_DOUBLE:
{
const double mss_val_dbl=*mss_val.dp;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(op2.dp[idx] == mss_val_dbl) op2.dp[idx]=0.0; else tally[idx]=1L;
}
break;
case NC_INT:
{
const nco_int mss_val_ntg=*mss_val.ip;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(op2.ip[idx] == mss_val_ntg) op2.ip[idx]=0; else tally[idx]=1L;
}
break;
case NC_SHORT:
{
const nco_short mss_val_short=*mss_val.sp;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(op2.sp[idx] == mss_val_short) op2.sp[idx]=0; else tally[idx]=1L;
}
break;
case NC_USHORT:
{
const nco_ushort mss_val_ushort=*mss_val.usp;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(op2.usp[idx] == mss_val_ushort) op2.usp[idx]=0; else tally[idx]=1L;
}
break;
case NC_UINT:
{
const nco_uint mss_val_uint=*mss_val.uip;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(op2.uip[idx] == mss_val_uint) op2.uip[idx]=0; else tally[idx]=1L;
}
break;
case NC_INT64:
{
const nco_int64 mss_val_int64=*mss_val.i64p;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(op2.i64p[idx] == mss_val_int64) op2.i64p[idx]=0; else tally[idx]=1L;
}
break;
case NC_UINT64:
{
const nco_uint64 mss_val_uint64=*mss_val.ui64p;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(op2.ui64p[idx] == mss_val_uint64) op2.ui64p[idx]=0; else tally[idx]=1L;
}
break;
case NC_BYTE:
{
const nco_byte mss_val_byte=*mss_val.bp;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(op2.bp[idx] == mss_val_byte) op2.bp[idx]=0; else tally[idx]=1L;
}
break;
case NC_UBYTE:
{
const nco_ubyte mss_val_ubyte=*mss_val.ubp;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(op2.ubp[idx] == mss_val_ubyte) op2.ubp[idx]=0; else tally[idx]=1L;
}
break;
case NC_CHAR: break; /* Do nothing */
case NC_STRING: break; /* Do nothing */
default: nco_dfl_case_nc_type_err(); break;
} /* end switch */
/* NB: it is not neccessary to un-typecast pointers to values after access
because we have only operated on local copies of them. */
} /* end nco_var_copy_tll() */
void
nco_var_dvd /* [fnc] Divide second operand by first operand */
(const nc_type type, /* I [type] netCDF type of operands */
const long sz, /* I [nbr] Size (in elements) of operands */
const int has_mss_val, /* I [flg] Flag for missing values */
ptr_unn mss_val, /* I [flg] Value of missing value */
ptr_unn op1, /* I [val] Values of denominator */
ptr_unn op2) /* I/O [val] Values of numerator on input, values of quotient on output */
{
/* Threads: Routine is thread safe and calls no unsafe routines */
/* Purpose: Divide value of first operand by value of second operand
and store result in second operand.
Assume operands conform, are same type, and are in memory */
/* Variable-variable division is currently defined as op2:=op2/op1 */
long idx;
/* Typecast pointer to values before access */
(void)cast_void_nctype(type,&op1);
(void)cast_void_nctype(type,&op2);
if(has_mss_val) (void)cast_void_nctype(type,&mss_val);
switch(type){
case NC_FLOAT:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) op2.fp[idx]/=op1.fp[idx];
}else{
const float mss_val_flt=*mss_val.fp;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if((op2.fp[idx] != mss_val_flt) && (op1.fp[idx] != mss_val_flt)) op2.fp[idx]/=op1.fp[idx]; else op2.fp[idx]=mss_val_flt;
} /* end for */
} /* end else */
break; /* end NC_FLOAT */
case NC_DOUBLE:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) op2.dp[idx]/=op1.dp[idx];
}else{
const double mss_val_dbl=*mss_val.dp;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if((op2.dp[idx] != mss_val_dbl) && (op1.dp[idx] != mss_val_dbl)) op2.dp[idx]/=op1.dp[idx]; else op2.dp[idx]=mss_val_dbl;
} /* end for */
} /* end else */
break; /* end NC_DOUBLE */
case NC_INT:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) op2.ip[idx]/=op1.ip[idx];
}else{
const nco_int mss_val_ntg=*mss_val.ip;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if((op2.ip[idx] != mss_val_ntg) && (op1.ip[idx] != mss_val_ntg)) op2.ip[idx]/=op1.ip[idx]; else op2.ip[idx]=mss_val_ntg;
} /* end for */
} /* end else */
break; /* end NC_INT */
case NC_SHORT:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) op2.sp[idx]/=op1.sp[idx];
}else{
const nco_short mss_val_short=*mss_val.sp;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if((op2.sp[idx] != mss_val_short) && (op1.sp[idx] != mss_val_short)) op2.sp[idx]/=op1.sp[idx]; else op2.sp[idx]=mss_val_short;
} /* end for */
} /* end else */
break; /* end NC_SHORT */
case NC_USHORT:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) op2.usp[idx]/=op1.usp[idx];
}else{
const nco_ushort mss_val_ushort=*mss_val.usp;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if((op2.usp[idx] != mss_val_ushort) && (op1.usp[idx] != mss_val_ushort)) op2.usp[idx]/=op1.usp[idx]; else op2.usp[idx]=mss_val_ushort;
} /* end for */
} /* end else */
break; /* end NC_USHORT */
case NC_UINT:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) op2.uip[idx]/=op1.uip[idx];
}else{
const nco_uint mss_val_uint=*mss_val.uip;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if((op2.uip[idx] != mss_val_uint) && (op1.uip[idx] != mss_val_uint)) op2.uip[idx]/=op1.uip[idx]; else op2.uip[idx]=mss_val_uint;
} /* end for */
} /* end else */
break; /* end NC_UINT */
case NC_INT64:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) op2.i64p[idx]/=op1.i64p[idx];
}else{
const nco_int64 mss_val_int64=*mss_val.i64p;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if((op2.i64p[idx] != mss_val_int64) && (op1.i64p[idx] != mss_val_int64)) op2.i64p[idx]/=op1.i64p[idx]; else op2.i64p[idx]=mss_val_int64;
} /* end for */
} /* end else */
break; /* end NC_INT64 */
case NC_UINT64:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) op2.ui64p[idx]/=op1.ui64p[idx];
}else{
const nco_uint64 mss_val_uint64=*mss_val.ui64p;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if((op2.ui64p[idx] != mss_val_uint64) && (op1.ui64p[idx] != mss_val_uint64)) op2.ui64p[idx]/=op1.ui64p[idx]; else op2.ui64p[idx]=mss_val_uint64;
} /* end for */
} /* end else */
break; /* end NC_UINT64 */
case NC_BYTE:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) op2.bp[idx]/=op1.bp[idx];
}else{
const nco_byte mss_val_byte=*mss_val.bp;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if((op2.bp[idx] != mss_val_byte) && (op1.bp[idx] != mss_val_byte)) op2.bp[idx]/=op1.bp[idx]; else op2.bp[idx]=mss_val_byte;
} /* end for */
} /* end else */
break; /* end NC_BYTE */
case NC_UBYTE:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) op2.ubp[idx]/=op1.ubp[idx];
}else{
const nco_ubyte mss_val_ubyte=*mss_val.ubp;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if((op2.ubp[idx] != mss_val_ubyte) && (op1.ubp[idx] != mss_val_ubyte)) op2.ubp[idx]/=op1.ubp[idx]; else op2.ubp[idx]=mss_val_ubyte;
} /* end for */
} /* end else */
break; /* end NC_UBYTE */
case NC_CHAR: break; /* Do nothing */
case NC_STRING: break; /* Do nothing */
default: nco_dfl_case_nc_type_err(); break;
} /* end switch */
/* NB: it is not neccessary to un-typecast pointers to values after access
because we have only operated on local copies of them. */
} /* end nco_var_dvd() */
void
nco_var_max_bnr /* [fnc] Maximize two operands */
(const nc_type type, /* I [type] netCDF type of operands */
const long sz, /* I [nbr] Size (in elements) of operands */
const int has_mss_val, /* I [flg] Flag for missing values */
ptr_unn mss_val, /* I [flg] Value of missing value */
ptr_unn op1, /* I [val] Values of first operand */
ptr_unn op2) /* I/O [val] Values of second operand on input, values of maximum on output */
{
/* Purpose: Find maximum value(s) of two operands and store result in second operand
Operands are assumed to conform, be of same specified type, and have values in memory */
long idx;
/* Typecast pointer to values before access */
/* It is not necessary to untype-cast pointer types after using them as we have
operated on local copies of them */
(void)cast_void_nctype(type,&op1);
(void)cast_void_nctype(type,&op2);
if(has_mss_val) (void)cast_void_nctype(type,&mss_val);
switch(type){
case NC_FLOAT:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++)
if(op2.fp[idx] < op1.fp[idx]) op2.fp[idx]=op1.fp[idx];
}else{
const float mss_val_flt=*mss_val.fp;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if(op2.fp[idx] == mss_val_flt)
op2.fp[idx]=op1.fp[idx];
else if((op1.fp[idx] != mss_val_flt) && (op2.fp[idx] < op1.fp[idx]))
op2.fp[idx]=op1.fp[idx];
} /* end for */
} /* end else */
break;
case NC_DOUBLE:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++)
if(op2.dp[idx] < op1.dp[idx]) op2.dp[idx]=op1.dp[idx];
}else{
const double mss_val_dbl=*mss_val.dp;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if(op2.dp[idx] == mss_val_dbl)
op2.dp[idx]=op1.dp[idx];
else if((op1.dp[idx] != mss_val_dbl) && (op2.dp[idx] < op1.dp[idx]))
op2.dp[idx]=op1.dp[idx];
} /* end for */
} /* end else */
break;
case NC_INT:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++)
if(op2.ip[idx] < op1.ip[idx]) op2.ip[idx]=op1.ip[idx];
}else{
const nco_int mss_val_ntg=*mss_val.ip;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if(op2.ip[idx] == mss_val_ntg)
op2.ip[idx]=op1.ip[idx];
else if((op1.ip[idx] != mss_val_ntg) && (op2.ip[idx] < op1.ip[idx]))
op2.ip[idx]=op1.ip[idx];
} /* end for */
} /* end else */
break;
case NC_SHORT:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++)
if(op2.sp[idx] < op1.sp[idx]) op2.sp[idx]=op1.sp[idx];
}else{
const nco_short mss_val_short=*mss_val.sp;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if(op2.sp[idx] == mss_val_short)
op2.sp[idx]=op1.sp[idx];
else if((op1.sp[idx] != mss_val_short) && (op2.sp[idx] < op1.sp[idx]))
op2.sp[idx]=op1.sp[idx];
} /* end for */
} /* end else */
break;
case NC_USHORT:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++)
if(op2.usp[idx] < op1.usp[idx]) op2.usp[idx]=op1.usp[idx];
}else{
const nco_ushort mss_val_ushort=*mss_val.usp;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if(op2.usp[idx] == mss_val_ushort)
op2.usp[idx]=op1.usp[idx];
else if((op1.usp[idx] != mss_val_ushort) && (op2.usp[idx] < op1.usp[idx]))
op2.usp[idx]=op1.usp[idx];
} /* end for */
} /* end else */
break;
case NC_UINT:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++)
if(op2.uip[idx] < op1.uip[idx]) op2.uip[idx]=op1.uip[idx];
}else{
const nco_uint mss_val_uint=*mss_val.uip;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if(op2.uip[idx] == mss_val_uint)
op2.uip[idx]=op1.uip[idx];
else if((op1.uip[idx] != mss_val_uint) && (op2.uip[idx] < op1.uip[idx]))
op2.uip[idx]=op1.uip[idx];
} /* end for */
} /* end else */
break;
case NC_INT64:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++)
if(op2.i64p[idx] < op1.i64p[idx]) op2.i64p[idx]=op1.i64p[idx];
}else{
const nco_int64 mss_val_int64=*mss_val.i64p;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if(op2.i64p[idx] == mss_val_int64)
op2.i64p[idx]=op1.i64p[idx];
else if((op1.i64p[idx] != mss_val_int64) && (op2.i64p[idx] < op1.i64p[idx]))
op2.i64p[idx]=op1.i64p[idx];
} /* end for */
} /* end else */
break;
case NC_UINT64:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++)
if(op2.ui64p[idx] < op1.ui64p[idx]) op2.ui64p[idx]=op1.ui64p[idx];
}else{
const nco_uint64 mss_val_uint64=*mss_val.ui64p;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if(op2.ui64p[idx] == mss_val_uint64)
op2.ui64p[idx]=op1.ui64p[idx];
else if((op1.ui64p[idx] != mss_val_uint64) && (op2.ui64p[idx] < op1.ui64p[idx]))
op2.ui64p[idx]=op1.ui64p[idx];
} /* end for */
} /* end else */
break;
case NC_BYTE:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++)
if(op2.bp[idx] < op1.bp[idx]) op2.bp[idx]=op1.bp[idx];
}else{
const nco_byte mss_val_byte=*mss_val.bp;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if(op2.bp[idx] == mss_val_byte)
op2.bp[idx]=op1.bp[idx];
else if((op1.bp[idx] != mss_val_byte) && (op2.bp[idx] < op1.bp[idx]))
op2.bp[idx]=op1.bp[idx];
} /* end for */
} /* end else */
break;
case NC_UBYTE:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++)
if(op2.ubp[idx] < op1.ubp[idx]) op2.ubp[idx]=op1.ubp[idx];
}else{
const nco_ubyte mss_val_ubyte=*mss_val.ubp;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if(op2.ubp[idx] == mss_val_ubyte)
op2.ubp[idx]=op1.ubp[idx];
else if((op1.ubp[idx] != mss_val_ubyte) && (op2.ubp[idx] < op1.ubp[idx]))
op2.ubp[idx]=op1.ubp[idx];
} /* end for */
} /* end else */
break;
case NC_CHAR: break; /* Do nothing */
case NC_STRING: break; /* Do nothing */
default: nco_dfl_case_nc_type_err(); break;
} /* end switch */
} /* end nco_var_max_bnr() */
void
nco_var_min_bnr /* [fnc] Minimize two operands */
(const nc_type type, /* I [type] netCDF type of operands */
const long sz, /* I [nbr] Size (in elements) of operands */
const int has_mss_val, /* I [flg] Flag for missing values */
ptr_unn mss_val, /* I [flg] Value of missing value */
ptr_unn op1, /* I [val] Values of first operand */
ptr_unn op2) /* I/O [val] Values of second operand on input, values of minimum on output */
{
/* Purpose: Find minimum value(s) of two operands and store result in second operand
Operands are assumed to conform, be of same specified type, and have values in memory */
long idx;
/* Typecast pointer to values before access */
/* It is not necessary to uncast pointer types after using them as we have
operated on local copies of them */
(void)cast_void_nctype(type,&op1);
(void)cast_void_nctype(type,&op2);
if(has_mss_val) (void)cast_void_nctype(type,&mss_val);
switch(type){
case NC_FLOAT:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++)
if(op2.fp[idx] > op1.fp[idx]) op2.fp[idx]=op1.fp[idx];
}else{
const float mss_val_flt=*mss_val.fp;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if(op2.fp[idx] == mss_val_flt)
op2.fp[idx]=op1.fp[idx];
else if((op1.fp[idx] != mss_val_flt) && (op2.fp[idx] > op1.fp[idx]))
op2.fp[idx]=op1.fp[idx];
} /* end for */
} /* end else */
break;
case NC_DOUBLE:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++)
if(op2.dp[idx] > op1.dp[idx]) op2.dp[idx]=op1.dp[idx];
}else{
const double mss_val_dbl=*mss_val.dp;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if(op2.dp[idx] == mss_val_dbl)
op2.dp[idx]=op1.dp[idx];
else if((op1.dp[idx] != mss_val_dbl) && (op2.dp[idx] > op1.dp[idx]))
op2.dp[idx]=op1.dp[idx];
} /* end for */
} /* end else */
break;
case NC_INT:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++)
if(op2.ip[idx] > op1.ip[idx]) op2.ip[idx]=op1.ip[idx];
}else{
const nco_int mss_val_ntg=*mss_val.ip;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if(op2.ip[idx] == mss_val_ntg)
op2.ip[idx]=op1.ip[idx];
else if((op1.ip[idx] != mss_val_ntg) && (op2.ip[idx] > op1.ip[idx]))
op2.ip[idx]=op1.ip[idx];
} /* end for */
} /* end else */
break;
case NC_SHORT:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++)
if(op2.sp[idx] > op1.sp[idx]) op2.sp[idx]=op1.sp[idx];
}else{
const nco_short mss_val_short=*mss_val.sp;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if(op2.sp[idx] == mss_val_short)
op2.sp[idx]=op1.sp[idx];
else if((op1.sp[idx] != mss_val_short) && (op2.sp[idx] > op1.sp[idx]))
op2.sp[idx]=op1.sp[idx];
} /* end for */
} /* end else */
break;
case NC_USHORT:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++)
if(op2.usp[idx] > op1.usp[idx]) op2.usp[idx]=op1.usp[idx];
}else{
const nco_ushort mss_val_ushort=*mss_val.usp;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if(op2.usp[idx] == mss_val_ushort)
op2.usp[idx]=op1.usp[idx];
else if((op1.usp[idx] != mss_val_ushort) && (op2.usp[idx] > op1.usp[idx]))
op2.usp[idx]=op1.usp[idx];
} /* end for */
} /* end else */
break;
case NC_UINT:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++)
if(op2.uip[idx] > op1.uip[idx]) op2.uip[idx]=op1.uip[idx];
}else{
const nco_uint mss_val_uint=*mss_val.uip;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if(op2.uip[idx] == mss_val_uint)
op2.uip[idx]=op1.uip[idx];
else if((op1.uip[idx] != mss_val_uint) && (op2.uip[idx] > op1.uip[idx]))
op2.uip[idx]=op1.uip[idx];
} /* end for */
} /* end else */
break;
case NC_INT64:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++)
if(op2.i64p[idx] > op1.i64p[idx]) op2.i64p[idx]=op1.i64p[idx];
}else{
const nco_int64 mss_val_int64=*mss_val.i64p;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if(op2.i64p[idx] == mss_val_int64)
op2.i64p[idx]=op1.i64p[idx];
else if((op1.i64p[idx] != mss_val_int64) && (op2.i64p[idx] > op1.i64p[idx]))
op2.i64p[idx]=op1.i64p[idx];
} /* end for */
} /* end else */
break;
case NC_UINT64:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++)
if(op2.ui64p[idx] > op1.ui64p[idx]) op2.ui64p[idx]=op1.ui64p[idx];
}else{
const nco_uint64 mss_val_uint64=*mss_val.ui64p;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if(op2.ui64p[idx] == mss_val_uint64)
op2.ui64p[idx]=op1.ui64p[idx];
else if((op1.ui64p[idx] != mss_val_uint64) && (op2.ui64p[idx] > op1.ui64p[idx]))
op2.ui64p[idx]=op1.ui64p[idx];
} /* end for */
} /* end else */
break;
case NC_BYTE:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++)
if(op2.bp[idx] > op1.bp[idx]) op2.bp[idx]=op1.bp[idx];
}else{
const nco_byte mss_val_byte=*mss_val.bp;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if(op2.bp[idx] == mss_val_byte)
op2.bp[idx]=op1.bp[idx];
else if((op1.bp[idx] != mss_val_byte) && (op2.bp[idx] > op1.bp[idx]))
op2.bp[idx]=op1.bp[idx];
} /* end for */
} /* end else */
break;
case NC_UBYTE:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++)
if(op2.ubp[idx] > op1.ubp[idx]) op2.ubp[idx]=op1.ubp[idx];
}else{
const nco_ubyte mss_val_ubyte=*mss_val.ubp;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if(op2.ubp[idx] == mss_val_ubyte)
op2.ubp[idx]=op1.ubp[idx];
else if((op1.ubp[idx] != mss_val_ubyte) && (op2.ubp[idx] > op1.ubp[idx]))
op2.ubp[idx]=op1.ubp[idx];
} /* end for */
} /* end else */
break;
case NC_CHAR: break; /* Do nothing */
case NC_STRING: break; /* Do nothing */
default: nco_dfl_case_nc_type_err(); break;
} /* end switch */
} /* end nco_var_min_bnr() */
void
nco_var_mlt /* [fnc] Multiply first operand by second operand */
(const nc_type type, /* I [type] netCDF type of operands */
const long sz, /* I [nbr] Size (in elements) of operands */
const int has_mss_val, /* I [flg] Flag for missing values */
ptr_unn mss_val, /* I [flg] Value of missing value */
ptr_unn op1, /* I [val] Values of first operand */
ptr_unn op2) /* I/O [val] Values of second operand on input, values of product on output */
{
/* Threads: Routine is thread safe and calls no unsafe routines */
/* Purpose: multiply value of first operand by value of second operand
and store result in second operand.
Assume operands conform, are same type, and are in memory */
/* Multiplication is currently defined as op2:=op1*op2 */
long idx;
/* Typecast pointer to values before access */
(void)cast_void_nctype(type,&op1);
(void)cast_void_nctype(type,&op2);
if(has_mss_val) (void)cast_void_nctype(type,&mss_val);
switch(type){
case NC_FLOAT:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) op2.fp[idx]*=op1.fp[idx];
}else{
const float mss_val_flt=*mss_val.fp;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if((op2.fp[idx] != mss_val_flt) && (op1.fp[idx] != mss_val_flt)) op2.fp[idx]*=op1.fp[idx]; else op2.fp[idx]=mss_val_flt;
} /* end for */
} /* end else */
break;
case NC_DOUBLE:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) op2.dp[idx]*=op1.dp[idx];
}else{
const double mss_val_dbl=*mss_val.dp;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if((op2.dp[idx] != mss_val_dbl) && (op1.dp[idx] != mss_val_dbl)) op2.dp[idx]*=op1.dp[idx]; else op2.dp[idx]=mss_val_dbl;
} /* end for */
} /* end else */
break;
case NC_INT:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) op2.ip[idx]*=op1.ip[idx];
}else{
const nco_int mss_val_ntg=*mss_val.ip;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if((op2.ip[idx] != mss_val_ntg) && (op1.ip[idx] != mss_val_ntg)) op2.ip[idx]*=op1.ip[idx]; else op2.ip[idx]=mss_val_ntg;
} /* end for */
} /* end else */
break;
case NC_SHORT:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) op2.sp[idx]*=op1.sp[idx];
}else{
const nco_short mss_val_short=*mss_val.sp;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if((op2.sp[idx] != mss_val_short) && (op1.sp[idx] != mss_val_short)) op2.sp[idx]*=op1.sp[idx]; else op2.sp[idx]=mss_val_short;
} /* end for */
} /* end else */
break;
case NC_USHORT:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) op2.usp[idx]*=op1.usp[idx];
}else{
const nco_ushort mss_val_ushort=*mss_val.usp;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if((op2.usp[idx] != mss_val_ushort) && (op1.usp[idx] != mss_val_ushort)) op2.usp[idx]*=op1.usp[idx]; else op2.usp[idx]=mss_val_ushort;
} /* end for */
} /* end else */
break;
case NC_UINT:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) op2.uip[idx]*=op1.uip[idx];
}else{
const nco_uint mss_val_uint=*mss_val.uip;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if((op2.uip[idx] != mss_val_uint) && (op1.uip[idx] != mss_val_uint)) op2.uip[idx]*=op1.uip[idx]; else op2.uip[idx]=mss_val_uint;
} /* end for */
} /* end else */
break;
case NC_INT64:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) op2.i64p[idx]*=op1.i64p[idx];
}else{
const nco_int64 mss_val_int64=*mss_val.i64p;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if((op2.i64p[idx] != mss_val_int64) && (op1.i64p[idx] != mss_val_int64)) op2.i64p[idx]*=op1.i64p[idx]; else op2.i64p[idx]=mss_val_int64;
} /* end for */
} /* end else */
break;
case NC_UINT64:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) op2.ui64p[idx]*=op1.ui64p[idx];
}else{
const nco_uint64 mss_val_uint64=*mss_val.ui64p;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if((op2.ui64p[idx] != mss_val_uint64) && (op1.ui64p[idx] != mss_val_uint64)) op2.ui64p[idx]*=op1.ui64p[idx]; else op2.ui64p[idx]=mss_val_uint64;
} /* end for */
} /* end else */
break;
case NC_BYTE:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) op2.bp[idx]*=op1.bp[idx];
}else{
const nco_byte mss_val_byte=*mss_val.bp;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if((op2.bp[idx] != mss_val_byte) && (op1.bp[idx] != mss_val_byte)) op2.bp[idx]*=op1.bp[idx]; else op2.bp[idx]=mss_val_byte;
} /* end for */
} /* end else */
break;
case NC_UBYTE:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) op2.ubp[idx]*=op1.ubp[idx];
}else{
const nco_ubyte mss_val_ubyte=*mss_val.ubp;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if((op2.ubp[idx] != mss_val_ubyte) && (op1.ubp[idx] != mss_val_ubyte)) op2.ubp[idx]*=op1.ubp[idx]; else op2.ubp[idx]=mss_val_ubyte;
} /* end for */
} /* end else */
break;
case NC_CHAR: break; /* Do nothing */
case NC_STRING: break; /* Do nothing */
default: nco_dfl_case_nc_type_err(); break;
} /* end switch */
/* NB: it is not neccessary to un-typecast pointers to values after access
because we have only operated on local copies of them. */
} /* end nco_var_mlt() */
void
nco_var_mod /* [fnc] Remainder (modulo) operation of two variables */
(const nc_type type, /* I [type] netCDF type of operands */
const long sz, /* I [nbr] Size (in elements) of operands */
const int has_mss_val, /* I [flg] Flag for missing values */
ptr_unn mss_val, /* I [flg] Value of missing value */
ptr_unn op1, /* I [val] Values of field */
ptr_unn op2) /* I/O [val] Values of divisor on input, values of remainder on output */
{
/* Threads: Routine is thread safe and calls no unsafe routines */
/* Purpose: Divide value of first operand by value of second operand
and store remainder in second operand.
Assume operands conform, are same type, and are in memory */
/* Remainder (modulo) operation is currently defined as op2:=op1%op2 */
long idx;
/* Typecast pointer to values before access */
(void)cast_void_nctype(type,&op1);
(void)cast_void_nctype(type,&op2);
if(has_mss_val) (void)cast_void_nctype(type,&mss_val);
switch(type){
case NC_FLOAT: /* Hand-code modulo operator for floating point arguments (intrinsic % requires integer arguments) */
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) op2.fp[idx]=op1.fp[idx]-op2.fp[idx]*(int)(op1.fp[idx]/op2.fp[idx]);
}else{
const float mss_val_flt=*mss_val.fp;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if((op2.fp[idx] != mss_val_flt) && (op1.fp[idx] != mss_val_flt)) op2.fp[idx]=op1.fp[idx]-op2.fp[idx]*(int)(op1.fp[idx]/op2.fp[idx]); else op2.fp[idx]=mss_val_flt;
} /* end for */
} /* end else */
break; /* end NC_FLOAT */
case NC_DOUBLE: /* Hand-code modulo operator for floating point arguments (intrinsic % requires integer arguments) */
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) op2.dp[idx]=op1.dp[idx]-op2.dp[idx]*(int)(op1.dp[idx]/op2.dp[idx]);
}else{
const double mss_val_dbl=*mss_val.dp;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if((op2.dp[idx] != mss_val_dbl) && (op1.dp[idx] != mss_val_dbl)) op2.dp[idx]=op1.dp[idx]-op2.dp[idx]*(int)(op1.dp[idx]/op2.dp[idx]); else op2.dp[idx]=mss_val_dbl;
} /* end for */
} /* end else */
break; /* end NC_DOUBLE */
case NC_INT:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) op2.ip[idx]=op1.ip[idx]%op2.ip[idx];
}else{
const nco_int mss_val_ntg=*mss_val.ip;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if((op2.ip[idx] != mss_val_ntg) && (op1.ip[idx] != mss_val_ntg)) op2.ip[idx]=op1.ip[idx]%op2.ip[idx]; else op2.ip[idx]=mss_val_ntg;
} /* end for */
} /* end else */
break; /* end NC_INT */
case NC_SHORT:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) op2.sp[idx]=op1.sp[idx]%op2.sp[idx];
}else{
const nco_short mss_val_short=*mss_val.sp;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if((op2.sp[idx] != mss_val_short) && (op1.sp[idx] != mss_val_short)) op2.sp[idx]=op1.sp[idx]%op2.sp[idx]; else op2.sp[idx]=mss_val_short;
} /* end for */
} /* end else */
break; /* end NC_SHORT */
case NC_USHORT:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) op2.usp[idx]=op1.usp[idx]%op2.usp[idx];
}else{
const nco_ushort mss_val_ushort=*mss_val.usp;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if((op2.usp[idx] != mss_val_ushort) && (op1.usp[idx] != mss_val_ushort)) op2.usp[idx]=op1.usp[idx]%op2.usp[idx]; else op2.usp[idx]=mss_val_ushort;
} /* end for */
} /* end else */
break; /* end NC_USHORT */
case NC_UINT:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) op2.uip[idx]=op1.uip[idx]%op2.uip[idx];
}else{
const nco_uint mss_val_uint=*mss_val.uip;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if((op2.uip[idx] != mss_val_uint) && (op1.uip[idx] != mss_val_uint)) op2.uip[idx]=op1.uip[idx]%op2.uip[idx]; else op2.uip[idx]=mss_val_uint;
} /* end for */
} /* end else */
break; /* end NC_UINT */
case NC_INT64:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) op2.i64p[idx]=op1.i64p[idx]%op2.i64p[idx];
}else{
const nco_int64 mss_val_int64=*mss_val.i64p;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if((op2.i64p[idx] != mss_val_int64) && (op1.i64p[idx] != mss_val_int64)) op2.i64p[idx]=op1.i64p[idx]%op2.i64p[idx]; else op2.i64p[idx]=mss_val_int64;
} /* end for */
} /* end else */
break; /* end NC_INT64 */
case NC_UINT64:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) op2.ui64p[idx]=op1.ui64p[idx]%op2.ui64p[idx];
}else{
const nco_uint64 mss_val_uint64=*mss_val.ui64p;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if((op2.ui64p[idx] != mss_val_uint64) && (op1.ui64p[idx] != mss_val_uint64)) op2.ui64p[idx]=op1.ui64p[idx]%op2.ui64p[idx]; else op2.ui64p[idx]=mss_val_uint64;
} /* end for */
} /* end else */
break; /* end NC_UINT64 */
case NC_BYTE:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) op2.bp[idx]=op1.bp[idx]%op2.bp[idx];
}else{
const nco_byte mss_val_byte=*mss_val.bp;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if((op2.bp[idx] != mss_val_byte) && (op1.bp[idx] != mss_val_byte)) op2.bp[idx]=op1.bp[idx]%op2.bp[idx]; else op2.bp[idx]=mss_val_byte;
} /* end for */
} /* end else */
break; /* end NC_BYTE */
case NC_UBYTE:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) op2.ubp[idx]=op1.ubp[idx]%op2.ubp[idx];
}else{
const nco_ubyte mss_val_ubyte=*mss_val.ubp;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if((op2.ubp[idx] != mss_val_ubyte) && (op1.ubp[idx] != mss_val_ubyte)) op2.ubp[idx]=op1.ubp[idx]%op2.ubp[idx]; else op2.ubp[idx]=mss_val_ubyte;
} /* end for */
} /* end else */
break; /* end NC_UBYTE */
case NC_CHAR: break; /* Do nothing */
case NC_STRING: break; /* Do nothing */
default: nco_dfl_case_nc_type_err(); break;
} /* end switch */
/* NB: it is not neccessary to un-typecast pointers to values after access
because we have only operated on local copies of them. */
} /* end nco_var_mod() */
void
nco_var_msk /* [fnc] Mask third operand where first and second operands fail comparison */
(const nc_type type, /* I [enm] netCDF type of operands */
const long sz, /* I [nbr] Size (in elements) of operand op3 */
const int has_mss_val, /* I [flg] Flag for missing values (basically assumed to be true) */
ptr_unn mss_val, /* I [val] Value of missing value */
const double op1, /* I [val] Target value to compare against mask field (i.e., argument of -M) */
const int op_typ_rlt, /* I [enm] Comparison type test for op2 and op1 */
ptr_unn op2, /* I [val] Value of mask field */
ptr_unn op3) /* I/O [val] Values of second operand on input, masked values on output */
{
/* Threads: Routine is thread safe and calls no unsafe routines */
/* Purpose: Mask third operand where first and second operands fail comparison
Set third operand to missing value wherever second operand fails comparison with first operand */
/* Masking is currently defined as: if(op2 !op_typ_rlt op1) then op3:=mss_val */
long idx;
double mss_val_dbl=double_CEWI;
float mss_val_flt=float_CEWI;
nco_int mss_val_ntg=nco_int_CEWI;
nco_short mss_val_short=nco_short_CEWI;
nco_ushort mss_val_ushort=nco_ushort_CEWI;
nco_uint mss_val_uint=nco_uint_CEWI;
nco_int64 mss_val_int64=nco_int64_CEWI;
nco_uint64 mss_val_uint64=nco_uint64_CEWI;
nco_byte mss_val_byte=nco_byte_CEWI;
nco_ubyte mss_val_ubyte=nco_ubyte_CEWI;
nco_char mss_val_char=nco_char_CEWI;
/* nco_string mss_val_string=nco_string_CEWI;*/ /* 20120206: mss_val_string is not yet used so do not define */
/* Typecast pointer to values before access */
(void)cast_void_nctype(type,&op2);
(void)cast_void_nctype(type,&op3);
if(has_mss_val){
(void)cast_void_nctype(type,&mss_val);
}else{
(void)fprintf(stdout,"%s: ERROR has_mss_val is inconsistent with purpose of var_ask(), i.e., has_mss_val is not True\n",nco_prg_nm_get());
nco_exit(EXIT_FAILURE);
} /* end else */
if(has_mss_val){
switch(type){
case NC_FLOAT: mss_val_flt=*mss_val.fp; break;
case NC_DOUBLE: mss_val_dbl=*mss_val.dp; break;
case NC_INT: mss_val_ntg=*mss_val.ip; break;
case NC_SHORT: mss_val_short=*mss_val.sp; break;
case NC_USHORT: mss_val_ushort=*mss_val.usp; break;
case NC_UINT: mss_val_uint=*mss_val.uip; break;
case NC_INT64: mss_val_int64=*mss_val.i64p; break;
case NC_UINT64: mss_val_uint64=*mss_val.ui64p; break;
case NC_BYTE: mss_val_byte=*mss_val.bp; break;
case NC_UBYTE: mss_val_ubyte=*mss_val.ubp; break;
case NC_CHAR: mss_val_char=*mss_val.cp; break;
/* case NC_STRING: mss_val_string=*mss_val.sngp; break;*/ /* 20120206: mss_val_string is not yet used so do not define */
default: nco_dfl_case_nc_type_err(); break;
} /* end switch */
} /* endif */
/* NB: Explicit coercion when comparing op2 to op1 is necessary */
switch(type){
case NC_FLOAT:
switch(op_typ_rlt){
case nco_op_eq:
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(op2.fp[idx] != (float)op1) op3.fp[idx]=mss_val_flt;
break;
case nco_op_ne:
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(op2.fp[idx] == (float)op1) op3.fp[idx]=mss_val_flt;
break;
case nco_op_lt:
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(op2.fp[idx] >= (float)op1) op3.fp[idx]=mss_val_flt;
break;
case nco_op_gt:
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(op2.fp[idx] <= (float)op1) op3.fp[idx]=mss_val_flt;
break;
case nco_op_le:
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(op2.fp[idx] > (float)op1) op3.fp[idx]=mss_val_flt;
break;
case nco_op_ge:
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(op2.fp[idx] < (float)op1) op3.fp[idx]=mss_val_flt;
break;
} /* end switch */
break;
case NC_DOUBLE:
switch(op_typ_rlt){
case nco_op_eq:
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(op2.dp[idx] != (double)op1) op3.dp[idx]=mss_val_dbl;
break;
case nco_op_ne:
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(op2.dp[idx] == (double)op1) op3.dp[idx]=mss_val_dbl;
break;
case nco_op_lt:
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(op2.dp[idx] >= (double)op1) op3.dp[idx]=mss_val_dbl;
break;
case nco_op_gt:
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(op2.dp[idx] <= (double)op1) op3.dp[idx]=mss_val_dbl;
break;
case nco_op_le:
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(op2.dp[idx] > (double)op1) op3.dp[idx]=mss_val_dbl;
break;
case nco_op_ge:
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(op2.dp[idx] < (double)op1) op3.dp[idx]=mss_val_dbl;
break;
} /* end switch */
break;
case NC_INT:
switch(op_typ_rlt){
case nco_op_eq:
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(op2.ip[idx] != (nco_int)op1) op3.ip[idx]=mss_val_ntg;
break;
case nco_op_ne:
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(op2.ip[idx] == (nco_int)op1) op3.ip[idx]=mss_val_ntg;
break;
case nco_op_lt:
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(op2.ip[idx] >= (nco_int)op1) op3.ip[idx]=mss_val_ntg;
break;
case nco_op_gt:
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(op2.ip[idx] <= (nco_int)op1) op3.ip[idx]=mss_val_ntg;
break;
case nco_op_le:
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(op2.ip[idx] > (nco_int)op1) op3.ip[idx]=mss_val_ntg;
break;
case nco_op_ge:
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(op2.ip[idx] < (nco_int)op1) op3.ip[idx]=mss_val_ntg;
break;
} /* end switch */
break;
case NC_SHORT:
switch(op_typ_rlt){
case nco_op_eq:
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(op2.sp[idx] != (nco_short)op1) op3.sp[idx]=mss_val_short;
break;
case nco_op_ne:
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(op2.sp[idx] == (nco_short)op1) op3.sp[idx]=mss_val_short;
break;
case nco_op_lt:
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(op2.sp[idx] >= (nco_short)op1) op3.sp[idx]=mss_val_short;
break;
case nco_op_gt:
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(op2.sp[idx] <= (nco_short)op1) op3.sp[idx]=mss_val_short;
break;
case nco_op_le:
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(op2.sp[idx] > (nco_short)op1) op3.sp[idx]=mss_val_short;
break;
case nco_op_ge:
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(op2.sp[idx] < (nco_short)op1) op3.sp[idx]=mss_val_short;
break;
} /* end switch */
break;
case NC_USHORT:
switch(op_typ_rlt){
case nco_op_eq:
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(op2.usp[idx] != (nco_ushort)op1) op3.usp[idx]=mss_val_ushort;
break;
case nco_op_ne:
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(op2.usp[idx] == (nco_ushort)op1) op3.usp[idx]=mss_val_ushort;
break;
case nco_op_lt:
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(op2.usp[idx] >= (nco_ushort)op1) op3.usp[idx]=mss_val_ushort;
break;
case nco_op_gt:
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(op2.usp[idx] <= (nco_ushort)op1) op3.usp[idx]=mss_val_ushort;
break;
case nco_op_le:
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(op2.usp[idx] > (nco_ushort)op1) op3.usp[idx]=mss_val_ushort;
break;
case nco_op_ge:
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(op2.usp[idx] < (nco_ushort)op1) op3.usp[idx]=mss_val_ushort;
break;
} /* end switch */
break;
case NC_UINT:
switch(op_typ_rlt){
case nco_op_eq:
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(op2.uip[idx] != (nco_uint)op1) op3.uip[idx]=mss_val_uint;
break;
case nco_op_ne:
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(op2.uip[idx] == (nco_uint)op1) op3.uip[idx]=mss_val_uint;
break;
case nco_op_lt:
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(op2.uip[idx] >= (nco_uint)op1) op3.uip[idx]=mss_val_uint;
break;
case nco_op_gt:
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(op2.uip[idx] <= (nco_uint)op1) op3.uip[idx]=mss_val_uint;
break;
case nco_op_le:
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(op2.uip[idx] > (nco_uint)op1) op3.uip[idx]=mss_val_uint;
break;
case nco_op_ge:
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(op2.uip[idx] < (nco_uint)op1) op3.uip[idx]=mss_val_uint;
break;
} /* end switch */
break;
case NC_INT64:
switch(op_typ_rlt){
case nco_op_eq:
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(op2.i64p[idx] != (nco_int64)op1) op3.i64p[idx]=mss_val_int64;
break;
case nco_op_ne:
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(op2.i64p[idx] == (nco_int64)op1) op3.i64p[idx]=mss_val_int64;
break;
case nco_op_lt:
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(op2.i64p[idx] >= (nco_int64)op1) op3.i64p[idx]=mss_val_int64;
break;
case nco_op_gt:
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(op2.i64p[idx] <= (nco_int64)op1) op3.i64p[idx]=mss_val_int64;
break;
case nco_op_le:
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(op2.i64p[idx] > (nco_int64)op1) op3.i64p[idx]=mss_val_int64;
break;
case nco_op_ge:
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(op2.i64p[idx] < (nco_int64)op1) op3.i64p[idx]=mss_val_int64;
break;
} /* end switch */
break;
case NC_UINT64:
switch(op_typ_rlt){
case nco_op_eq:
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(op2.ui64p[idx] != (nco_uint64)op1) op3.ui64p[idx]=mss_val_uint64;
break;
case nco_op_ne:
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(op2.ui64p[idx] == (nco_uint64)op1) op3.ui64p[idx]=mss_val_uint64;
break;
case nco_op_lt:
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(op2.ui64p[idx] >= (nco_uint64)op1) op3.ui64p[idx]=mss_val_uint64;
break;
case nco_op_gt:
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(op2.ui64p[idx] <= (nco_uint64)op1) op3.ui64p[idx]=mss_val_uint64;
break;
case nco_op_le:
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(op2.ui64p[idx] > (nco_uint64)op1) op3.ui64p[idx]=mss_val_uint64;
break;
case nco_op_ge:
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(op2.ui64p[idx] < (nco_uint64)op1) op3.ui64p[idx]=mss_val_uint64;
break;
} /* end switch */
break;
case NC_BYTE:
switch(op_typ_rlt){
case nco_op_eq:
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(op2.bp[idx] != (nco_byte)op1) op3.bp[idx]=mss_val_byte;
break;
case nco_op_ne:
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(op2.bp[idx] == (nco_byte)op1) op3.bp[idx]=mss_val_byte;
break;
case nco_op_lt:
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(op2.bp[idx] >= (nco_byte)op1) op3.bp[idx]=mss_val_byte;
break;
case nco_op_gt:
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(op2.bp[idx] <= (nco_byte)op1) op3.bp[idx]=mss_val_byte;
break;
case nco_op_le:
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(op2.bp[idx] > (nco_byte)op1) op3.bp[idx]=mss_val_byte;
break;
case nco_op_ge:
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(op2.bp[idx] < (nco_byte)op1) op3.bp[idx]=mss_val_byte;
break;
} /* end switch */
break;
case NC_UBYTE:
switch(op_typ_rlt){
case nco_op_eq:
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(op2.ubp[idx] != (nco_ubyte)op1) op3.ubp[idx]=mss_val_ubyte;
break;
case nco_op_ne:
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(op2.ubp[idx] == (nco_ubyte)op1) op3.ubp[idx]=mss_val_ubyte;
break;
case nco_op_lt:
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(op2.ubp[idx] >= (nco_ubyte)op1) op3.ubp[idx]=mss_val_ubyte;
break;
case nco_op_gt:
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(op2.ubp[idx] <= (nco_ubyte)op1) op3.ubp[idx]=mss_val_ubyte;
break;
case nco_op_le:
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(op2.ubp[idx] > (nco_ubyte)op1) op3.ubp[idx]=mss_val_ubyte;
break;
case nco_op_ge:
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(op2.ubp[idx] < (nco_ubyte)op1) op3.ubp[idx]=mss_val_ubyte;
break;
} /* end switch */
break;
case NC_CHAR:
switch(op_typ_rlt){
case nco_op_eq:
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(op2.cp[idx] != (nco_char)op1) op3.cp[idx]=mss_val_char;
break;
case nco_op_ne:
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(op2.cp[idx] == (nco_char)op1) op3.cp[idx]=mss_val_char;
break;
case nco_op_lt:
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(op2.cp[idx] >= (nco_char)op1) op3.cp[idx]=mss_val_char;
break;
case nco_op_gt:
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(op2.cp[idx] <= (nco_char)op1) op3.cp[idx]=mss_val_char;
break;
case nco_op_le:
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(op2.cp[idx] > (nco_char)op1) op3.cp[idx]=mss_val_char;
break;
case nco_op_ge:
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(op2.cp[idx] < (nco_char)op1) op3.cp[idx]=mss_val_char;
break;
} /* end switch */
break;
case NC_STRING: break; /* Do nothing */
default: nco_dfl_case_nc_type_err(); break;
} /* end switch */
/* It is not neccessary to un-typecast pointers to values after access
because we have only operated on local copies of them. */
} /* end nco_var_msk() */
void
nco_var_tll_zro_mss_val /* [fnc] Write missing value into elements with zero tally */
(const nc_type type, /* I [enm] netCDF type of operand */
const long sz, /* I [nbr] Size (in elements) of operand */
const int has_mss_val, /* I [flg] Flag for missing values */
ptr_unn mss_val, /* I [val] Value of missing value */
const long * const tally, /* I [nbr] Counter to normalize by */
ptr_unn op1) /* I/O [val] Values of first operand on input, possibly missing values on output */
{
/* Threads: Routine is thread safe and calls no unsafe routines */
/* Purpose: Write missing value into elements with zero tally
Routine is necessary because initialization of accumulating sums (specified, e.g., with -y ttl or with -N)
sets initial sum to zero (so augmenting works) regardless if first slice is missing.
Such sums are usually normalized and set to missing if tally is zero.
However, totals are integrals and thus are never normalized.
Initialization value of zero will be output even if tally is zero,
_unless field is processed with this routine after summing and prior to writing_ */
/* Filter currently works as op1:=mss_val where tally == 0 */
long idx;
/* Routine changes nothing unless a missing value is defined */
if(!has_mss_val) return;
/* Typecast pointer to values before access */
(void)cast_void_nctype(type,&op1);
if(has_mss_val) (void)cast_void_nctype(type,&mss_val);
switch(type){
case NC_FLOAT:
{
const float mss_val_flt=*mss_val.fp;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(tally[idx] == 0L) op1.fp[idx]=mss_val_flt;
}
break;
case NC_DOUBLE:
{
const double mss_val_dbl=*mss_val.dp;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(tally[idx] == 0L) op1.dp[idx]=mss_val_dbl;
}
break;
case NC_INT:
{
const nco_int mss_val_ntg=*mss_val.ip;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(tally[idx] == 0L) op1.ip[idx]=mss_val_ntg;
}
break;
case NC_SHORT:
{
const nco_short mss_val_short=*mss_val.sp;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(tally[idx] == 0L) op1.sp[idx]=mss_val_short;
}
break;
case NC_USHORT:
{
const nco_ushort mss_val_ushort=*mss_val.usp;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(tally[idx] == 0L) op1.usp[idx]=mss_val_ushort;
}
break;
case NC_UINT:
{
const nco_uint mss_val_uint=*mss_val.uip;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(tally[idx] == 0L) op1.uip[idx]=mss_val_uint;
}
break;
case NC_INT64:
{
const nco_int64 mss_val_int64=*mss_val.i64p;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(tally[idx] == 0L) op1.i64p[idx]=mss_val_int64;
}
break;
case NC_UINT64:
{
const nco_uint64 mss_val_uint64=*mss_val.ui64p;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(tally[idx] == 0L) op1.ui64p[idx]=mss_val_uint64;
}
break;
case NC_BYTE:
{
const nco_byte mss_val_byte=*mss_val.bp;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(tally[idx] == 0L) op1.bp[idx]=mss_val_byte;
}
break;
case NC_UBYTE:
{
const nco_ubyte mss_val_ubyte=*mss_val.ubp;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(tally[idx] == 0L) op1.ubp[idx]=mss_val_ubyte;
}
break;
case NC_CHAR: break; /* Do nothing */
case NC_STRING: break; /* Do nothing */
default: nco_dfl_case_nc_type_err(); break;
} /* end switch */
/* NB: it is not neccessary to un-typecast pointers to values after access
because we have only operated on local copies of them. */
} /* end nco_var_tll_zro_mss_val() */
void
nco_var_nrm /* [fnc] Normalize value of first operand by count in tally array */
(const nc_type type, /* I [enm] netCDF type of operand */
const long sz, /* I [nbr] Size (in elements) of operand */
const int has_mss_val, /* I [flg] Flag for missing values */
ptr_unn mss_val, /* I [val] Value of missing value */
const long * const tally, /* I [nbr] Counter to normalize by */
ptr_unn op1) /* I/O [val] Values of first operand on input, normalized result on output */
{
/* Threads: Routine is thread safe and calls no unsafe routines */
/* Purpose: Normalize value of first operand by count in tally array */
/* Normalization is currently defined as op1:=op1/tally */
long idx;
/* Typecast pointer to values before access */
(void)cast_void_nctype(type,&op1);
if(has_mss_val) (void)cast_void_nctype(type,&mss_val);
switch(type){
case NC_FLOAT:
if(!has_mss_val){
/* Operations: 1 fp divide, 2 pointer offset, 2 user memory fetch
Repetitions: \dmnszavg^(\dmnnbr-\avgnbr)
Total Counts: \flpnbr=\dmnszavg^(\dmnnbr-\avgnbr), \rthnbr=2\dmnszavg^(\dmnnbr-\avgnbr), \mmrusrnbr=2\dmnszavg^(\dmnnbr-\avgnbr)
NB: Counted LHS+RHS+tally offsets and fetches */
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) op1.fp[idx]/=tally[idx];
}else{
const float mss_val_flt=*mss_val.fp;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(tally[idx] != 0L) op1.fp[idx]/=tally[idx]; else op1.fp[idx]=mss_val_flt;
} /* end else */
break;
case NC_DOUBLE:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) op1.dp[idx]/=tally[idx];
}else{
const double mss_val_dbl=*mss_val.dp;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(tally[idx] != 0L) op1.dp[idx]/=tally[idx]; else op1.dp[idx]=mss_val_dbl;
} /* end else */
break;
case NC_INT:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) op1.ip[idx]/=tally[idx];
}else{
const nco_int mss_val_ntg=*mss_val.ip;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(tally[idx] != 0L) op1.ip[idx]/=tally[idx]; else op1.ip[idx]=mss_val_ntg;
} /* end else */
break;
case NC_SHORT:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) op1.sp[idx]/=tally[idx];
}else{
const nco_short mss_val_short=*mss_val.sp;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(tally[idx] != 0L) op1.sp[idx]/=tally[idx]; else op1.sp[idx]=mss_val_short;
} /* end else */
break;
case NC_USHORT:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) op1.usp[idx]/=tally[idx];
}else{
const nco_ushort mss_val_ushort=*mss_val.usp;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(tally[idx] != 0L) op1.usp[idx]/=tally[idx]; else op1.usp[idx]=mss_val_ushort;
} /* end else */
break;
case NC_UINT:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) op1.uip[idx]/=tally[idx];
}else{
const nco_uint mss_val_uint=*mss_val.uip;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(tally[idx] != 0L) op1.uip[idx]/=tally[idx]; else op1.uip[idx]=mss_val_uint;
} /* end else */
break;
case NC_INT64:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) op1.i64p[idx]/=tally[idx];
}else{
const nco_int64 mss_val_int64=*mss_val.i64p;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(tally[idx] != 0L) op1.i64p[idx]/=tally[idx]; else op1.i64p[idx]=mss_val_int64;
} /* end else */
break;
case NC_UINT64:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) op1.ui64p[idx]/=tally[idx];
}else{
const nco_uint64 mss_val_uint64=*mss_val.ui64p;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(tally[idx] != 0L) op1.ui64p[idx]/=tally[idx]; else op1.ui64p[idx]=mss_val_uint64;
} /* end else */
break;
case NC_BYTE:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) op1.bp[idx]/=tally[idx];
}else{
const nco_byte mss_val_byte=*mss_val.bp;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(tally[idx] != 0L) op1.bp[idx]/=tally[idx]; else op1.bp[idx]=mss_val_byte;
} /* end else */
break;
case NC_UBYTE:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) op1.ubp[idx]/=tally[idx];
}else{
const nco_ubyte mss_val_ubyte=*mss_val.ubp;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(tally[idx] != 0L) op1.ubp[idx]/=tally[idx]; else op1.ubp[idx]=mss_val_ubyte;
} /* end else */
break;
case NC_CHAR: break; /* Do nothing */
case NC_STRING: break; /* Do nothing */
default: nco_dfl_case_nc_type_err(); break;
} /* end switch */
/* NB: it is not neccessary to un-typecast pointers to values after access
because we have only operated on local copies of them. */
} /* end nco_var_nrm() */
void
nco_var_nrm_sdn /* [fnc] Normalize value of first operand by count-1 in tally array */
(const nc_type type, /* I [enm] netCDF type of operand */
const long sz, /* I [nbr] Size (in elements) of operand */
const int has_mss_val, /* I [flg] Flag for missing values */
ptr_unn mss_val, /* I [val] Value of missing value */
const long * const tally, /* I [nbr] Counter to normalize by */
ptr_unn op1) /* I/O [val] Values of first operand on input, normalized result on output */
{
/* Purpose: Normalize value of first operand by count-1 in tally array */
/* Normalization is currently defined as op1:=op1/(--tally) */
/* nco_var_nrm_sdn() is based on nco_var_nrm() and algorithms should be kept consistent with eachother */
long idx;
/* Typecast pointer to values before access */
(void)cast_void_nctype(type,&op1);
if(has_mss_val) (void)cast_void_nctype(type,&mss_val);
switch(type){
case NC_FLOAT:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) op1.fp[idx]/=tally[idx]-1L;
}else{
const float mss_val_flt=*mss_val.fp;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(tally[idx] > 1L) op1.fp[idx]/=tally[idx]-1L; else op1.fp[idx]=mss_val_flt;
} /* end else */
break;
case NC_DOUBLE:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) op1.dp[idx]/=tally[idx]-1L;
}else{
const double mss_val_dbl=*mss_val.dp;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(tally[idx] > 1L) op1.dp[idx]/=tally[idx]-1L; else op1.dp[idx]=mss_val_dbl;
} /* end else */
break;
case NC_INT:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) op1.ip[idx]/=tally[idx]-1L;
}else{
const nco_int mss_val_ntg=*mss_val.ip;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(tally[idx] > 1L) op1.ip[idx]/=tally[idx]-1L; else op1.ip[idx]=mss_val_ntg;
} /* end else */
break;
case NC_SHORT:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) op1.sp[idx]/=tally[idx]-1L;
}else{
const nco_short mss_val_short=*mss_val.sp;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(tally[idx] > 1L) op1.sp[idx]/=tally[idx]-1L; else op1.sp[idx]=mss_val_short;
} /* end else */
break;
case NC_USHORT:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) op1.usp[idx]/=tally[idx]-1L;
}else{
const nco_ushort mss_val_ushort=*mss_val.usp;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(tally[idx] > 1L) op1.usp[idx]/=tally[idx]-1L; else op1.usp[idx]=mss_val_ushort;
} /* end else */
break;
case NC_UINT:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) op1.uip[idx]/=tally[idx]-1L;
}else{
const nco_uint mss_val_uint=*mss_val.uip;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(tally[idx] > 1L) op1.uip[idx]/=tally[idx]-1L; else op1.uip[idx]=mss_val_uint;
} /* end else */
break;
case NC_INT64:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) op1.i64p[idx]/=tally[idx]-1L;
}else{
const nco_int64 mss_val_int64=*mss_val.i64p;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(tally[idx] > 1L) op1.i64p[idx]/=tally[idx]-1L; else op1.i64p[idx]=mss_val_int64;
} /* end else */
break;
case NC_UINT64:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) op1.ui64p[idx]/=tally[idx]-1L;
}else{
const nco_uint64 mss_val_uint64=*mss_val.ui64p;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(tally[idx] > 1L) op1.ui64p[idx]/=tally[idx]-1L; else op1.ui64p[idx]=mss_val_uint64;
} /* end else */
break;
case NC_BYTE:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) op1.bp[idx]/=tally[idx]-1L;
}else{
const nco_byte mss_val_byte=*mss_val.bp;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(tally[idx] > 1L) op1.bp[idx]/=tally[idx]-1L; else op1.bp[idx]=mss_val_byte;
} /* end else */
break;
case NC_UBYTE:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) op1.ubp[idx]/=tally[idx]-1L;
}else{
const nco_ubyte mss_val_ubyte=*mss_val.ubp;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(tally[idx] > 1L) op1.ubp[idx]/=tally[idx]-1L; else op1.ubp[idx]=mss_val_ubyte;
} /* end else */
break;
case NC_CHAR: break; /* Do nothing */
case NC_STRING: break; /* Do nothing */
default: nco_dfl_case_nc_type_err(); break;
} /* end switch */
/* NB: it is not neccessary to un-typecast pointers to values after access
because we have only operated on local copies of them. */
} /* end of nco_var_nrm_sdn */
void
nco_var_nrm_wgt /* [fnc] Normalize value of first operand by weight array */
(const nc_type type, /* I [enm] netCDF type of operand */
const long sz, /* I [nbr] Size (in elements) of operand */
const int has_mss_val, /* I [flg] Flag for missing values */
ptr_unn mss_val, /* I [val] Value of missing value */
const long * const tally, /* I [nbr] Counter to normalize by */
const double * const wgt, /* I [nbr] Weight to normalize by */
ptr_unn op1) /* I/O [val] Values of first operand on input, normalized result on output */
{
/* Threads: Routine is thread safe and calls no unsafe routines */
/* Purpose: Normalize value of first operand by value in weight array
Routine is only called by ncra/ncea for variables that have missing values and weights */
/* Normalization is currently defined as op1:=op1/wgt */
long idx;
/* Typecast pointer to values before access */
(void)cast_void_nctype(type,&op1);
if(has_mss_val) (void)cast_void_nctype(type,&mss_val);
switch(type){
case NC_FLOAT:
{
const float mss_val_flt=*mss_val.fp;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(tally[idx] != 0L) op1.fp[idx]*=tally[idx]/wgt[idx]; else op1.fp[idx]=mss_val_flt;
}
break;
case NC_DOUBLE:
{
const double mss_val_dbl=*mss_val.dp;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(tally[idx] != 0L) op1.dp[idx]*=tally[idx]/wgt[idx]; else op1.dp[idx]=mss_val_dbl;
}
break;
case NC_INT:
{
const nco_int mss_val_ntg=*mss_val.ip;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(tally[idx] != 0L) op1.ip[idx]*=tally[idx]/wgt[idx]; else op1.ip[idx]=mss_val_ntg;
}
break;
case NC_SHORT:
{
const nco_short mss_val_short=*mss_val.sp;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(tally[idx] != 0L) op1.sp[idx]*=tally[idx]/wgt[idx]; else op1.sp[idx]=mss_val_short;
}
break;
case NC_USHORT:
{
const nco_ushort mss_val_ushort=*mss_val.usp;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(tally[idx] != 0L) op1.usp[idx]*=tally[idx]/wgt[idx]; else op1.usp[idx]=mss_val_ushort;
}
break;
case NC_UINT:
{
const nco_uint mss_val_uint=*mss_val.uip;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(tally[idx] != 0L) op1.uip[idx]*=tally[idx]/wgt[idx]; else op1.uip[idx]=mss_val_uint;
}
break;
case NC_INT64:
{
const nco_int64 mss_val_int64=*mss_val.i64p;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(tally[idx] != 0L) op1.i64p[idx]*=tally[idx]/wgt[idx]; else op1.i64p[idx]=mss_val_int64;
}
break;
case NC_UINT64:
{
const nco_uint64 mss_val_uint64=*mss_val.ui64p;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(tally[idx] != 0L) op1.ui64p[idx]*=tally[idx]/wgt[idx]; else op1.ui64p[idx]=mss_val_uint64;
}
break;
case NC_BYTE:
{
const nco_byte mss_val_byte=*mss_val.bp;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(tally[idx] != 0L) op1.bp[idx]*=tally[idx]/wgt[idx]; else op1.bp[idx]=mss_val_byte;
}
break;
case NC_UBYTE:
{
const nco_ubyte mss_val_ubyte=*mss_val.ubp;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) if(tally[idx] != 0L) op1.ubp[idx]*=tally[idx]/wgt[idx]; else op1.ubp[idx]=mss_val_ubyte;
}
break;
case NC_CHAR: break; /* Do nothing */
case NC_STRING: break; /* Do nothing */
default: nco_dfl_case_nc_type_err(); break;
} /* end switch */
/* NB: it is not neccessary to un-typecast pointers to values after access
because we have only operated on local copies of them. */
} /* end nco_var_nrm_wgt() */
void
nco_var_pwr /* [fnc] Raise first operand to power of second operand */
(const nc_type type, /* I [type] netCDF type of operands */
const long sz, /* I [nbr] Size (in elements) of operands */
const int has_mss_val, /* I [flg] Flag for missing values */
ptr_unn mss_val, /* I [flg] Value of missing value */
ptr_unn op1, /* I [val] Values of base */
ptr_unn op2) /* I/O [val] Values of exponent on input, values of power on output */
{
/* Threads: Routine is thread safe and calls no unsafe routines */
/* Purpose: Raise value of first operand to power of second operand
and store result in second operand.
Assume operands conform, are same type, and are in memory */
/* Em-powering is currently defined as op2:=op1^op2 */
long idx;
/* Typecast pointer to values before access */
(void)cast_void_nctype(type,&op1);
(void)cast_void_nctype(type,&op2);
if(has_mss_val) (void)cast_void_nctype(type,&mss_val);
switch(type){
case NC_FLOAT:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) op2.fp[idx]=powf(op1.fp[idx],op2.fp[idx]);
}else{
float mss_val_flt=*mss_val.fp; /* Temporary variable reduces de-referencing */
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if((op1.fp[idx] != mss_val_flt) && (op2.fp[idx] != mss_val_flt)) op2.fp[idx]=powf(op1.fp[idx],op2.fp[idx]); else op2.fp[idx]=mss_val_flt;
} /* end for */
} /* end else */
break; /* end NC_FLOAT */
case NC_DOUBLE:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) op2.dp[idx]=pow(op1.dp[idx],op2.dp[idx]);
}else{
double mss_val_dbl=*mss_val.dp; /* Temporary variable reduces de-referencing */
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if((op1.dp[idx] != mss_val_dbl) && (op2.dp[idx] != mss_val_dbl)) op2.dp[idx]=pow(op1.dp[idx],op2.dp[idx]); else op2.dp[idx]=mss_val_dbl;
} /* end for */
} /* end else */
break; /* end NC_DOUBLE */
case NC_INT:
(void)fprintf(stdout,"%s: ERROR Attempt to em-power integer type in nco_var_pwr(). See TODO #311.\n",nco_prg_nm_get());
break;
case NC_SHORT:
(void)fprintf(stdout,"%s: ERROR Attempt to em-power integer type in nco_var_pwr(). See TODO #311.\n",nco_prg_nm_get());
break;
case NC_USHORT:
(void)fprintf(stdout,"%s: ERROR Attempt to em-power integer type in nco_var_pwr(). See TODO #311.\n",nco_prg_nm_get());
break;
case NC_UINT:
(void)fprintf(stdout,"%s: ERROR Attempt to em-power integer type in nco_var_pwr(). See TODO #311.\n",nco_prg_nm_get());
break;
case NC_INT64:
(void)fprintf(stdout,"%s: ERROR Attempt to em-power integer type in nco_var_pwr(). See TODO #311.\n",nco_prg_nm_get());
break;
case NC_UINT64:
(void)fprintf(stdout,"%s: ERROR Attempt to em-power integer type in nco_var_pwr(). See TODO #311.\n",nco_prg_nm_get());
break;
case NC_BYTE:
(void)fprintf(stdout,"%s: ERROR Attempt to em-power integer type in nco_var_pwr(). See TODO #311.\n",nco_prg_nm_get());
break;
case NC_UBYTE:
(void)fprintf(stdout,"%s: ERROR Attempt to em-power integer type in nco_var_pwr(). See TODO #311.\n",nco_prg_nm_get());
break;
case NC_CHAR: break; /* Do nothing */
case NC_STRING: break; /* Do nothing */
default: nco_dfl_case_nc_type_err(); break;
} /* end switch */
/* NB: it is not neccessary to un-typecast pointers to values after access
because we have only operated on local copies of them. */
} /* end nco_var_pwr */
void
nco_var_sbt /* [fnc] Subtract first operand from second operand */
(const nc_type type, /* I [type] netCDF type of operands */
const long sz, /* I [nbr] Size (in elements) of operands */
const int has_mss_val, /* I [flg] Flag for missing values */
ptr_unn mss_val, /* I [flg] Value of missing value */
ptr_unn op1, /* I [val] Values of first operand */
ptr_unn op2) /* I/O [val] Values of second operand on input, values of difference on output */
{
/* Purpose: Subtract value of first operand from value of second operand
and store result in second operand.
Assume operands conform, are same type, and are in memory */
/* Subtraction is currently defined as op2:=op2-op1 */
const char fnc_nm[]="nco_var_sbt()"; /* [sng] Function name */
long idx;
/* Typecast pointer to values before access */
(void)cast_void_nctype(type,&op1);
(void)cast_void_nctype(type,&op2);
if(has_mss_val) (void)cast_void_nctype(type,&mss_val);
/* 20200826 SIMD timer code
Invoke with, e.g.,
ncbo -O --dbg=2 ${DATA}/bm/eamv1_ne30np4l72.nc ${DATA}/bm/eamv1_ne30np4l72.nc ~/foo.nc */
static double tm_ttl=0.0;
clock_t tm_srt;
clock_t tm_end;
double tm_drn;
if(nco_dbg_lvl_get() >= nco_dbg_fl){
tm_srt=clock();
} /* !dbg */
switch(type){
case NC_FLOAT:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) op2.fp[idx]-=op1.fp[idx];
}else{
const float mss_val_flt=*mss_val.fp;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if((op2.fp[idx] != mss_val_flt) && (op1.fp[idx] != mss_val_flt)) op2.fp[idx]-=op1.fp[idx]; else op2.fp[idx]=mss_val_flt;
} /* end for */
} /* end else */
break;
case NC_DOUBLE:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) op2.dp[idx]-=op1.dp[idx];
}else{
const double mss_val_dbl=*mss_val.dp;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if((op2.dp[idx] != mss_val_dbl) && (op1.dp[idx] != mss_val_dbl)) op2.dp[idx]-=op1.dp[idx]; else op2.dp[idx]=mss_val_dbl;
} /* end for */
} /* end else */
break;
case NC_INT:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) op2.ip[idx]-=op1.ip[idx];
}else{
const nco_int mss_val_ntg=*mss_val.ip;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if((op2.ip[idx] != mss_val_ntg) && (op1.ip[idx] != mss_val_ntg)) op2.ip[idx]-=op1.ip[idx]; else op2.ip[idx]=mss_val_ntg;
} /* end for */
} /* end else */
break;
case NC_SHORT:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) op2.sp[idx]-=op1.sp[idx];
}else{
const nco_short mss_val_short=*mss_val.sp;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if((op2.sp[idx] != mss_val_short) && (op1.sp[idx] != mss_val_short)) op2.sp[idx]-=op1.sp[idx]; else op2.sp[idx]=mss_val_short;
} /* end for */
} /* end else */
break;
case NC_USHORT:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) op2.usp[idx]-=op1.usp[idx];
}else{
const nco_ushort mss_val_ushort=*mss_val.usp;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if((op2.usp[idx] != mss_val_ushort) && (op1.usp[idx] != mss_val_ushort)) op2.usp[idx]-=op1.usp[idx]; else op2.usp[idx]=mss_val_ushort;
} /* end for */
} /* end else */
break;
case NC_UINT:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) op2.uip[idx]-=op1.uip[idx];
}else{
const nco_uint mss_val_uint=*mss_val.uip;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if((op2.uip[idx] != mss_val_uint) && (op1.uip[idx] != mss_val_uint)) op2.uip[idx]-=op1.uip[idx]; else op2.uip[idx]=mss_val_uint;
} /* end for */
} /* end else */
break;
case NC_INT64:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) op2.i64p[idx]-=op1.i64p[idx];
}else{
const nco_int64 mss_val_int64=*mss_val.i64p;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if((op2.i64p[idx] != mss_val_int64) && (op1.i64p[idx] != mss_val_int64)) op2.i64p[idx]-=op1.i64p[idx]; else op2.i64p[idx]=mss_val_int64;
} /* end for */
} /* end else */
break;
case NC_UINT64:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) op2.ui64p[idx]-=op1.ui64p[idx];
}else{
const nco_uint64 mss_val_uint64=*mss_val.ui64p;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if((op2.ui64p[idx] != mss_val_uint64) && (op1.ui64p[idx] != mss_val_uint64)) op2.ui64p[idx]-=op1.ui64p[idx]; else op2.ui64p[idx]=mss_val_uint64;
} /* end for */
} /* end else */
break;
case NC_BYTE:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) op2.bp[idx]-=op1.bp[idx];
}else{
const nco_byte mss_val_byte=*mss_val.bp;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if((op2.bp[idx] != mss_val_byte) && (op1.bp[idx] != mss_val_byte)) op2.bp[idx]-=op1.bp[idx]; else op2.bp[idx]=mss_val_byte;
} /* end for */
} /* end else */
break;
case NC_UBYTE:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) op2.ubp[idx]-=op1.ubp[idx];
}else{
const nco_ubyte mss_val_ubyte=*mss_val.ubp;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if((op2.ubp[idx] != mss_val_ubyte) && (op1.ubp[idx] != mss_val_ubyte)) op2.ubp[idx]-=op1.ubp[idx]; else op2.ubp[idx]=mss_val_ubyte;
} /* end for */
} /* end else */
break;
case NC_CHAR: break; /* Do nothing */
case NC_STRING: break; /* Do nothing */
default: nco_dfl_case_nc_type_err(); break;
} /* end switch */
/* NB: it is not neccessary to un-typecast pointers to values after access
because we have only operated on local copies of them. */
/* 20200826 SIMD timer code */
if(nco_dbg_lvl_get() >= nco_dbg_fl){
if(tm_ttl == 0.0){
/* Print seen/unseen message only once per invocation */
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
(void)fprintf(stdout,"%s: %s reports C-compiler sees #pragma omp simd (because __GNUC__ >= 8 or __clang_major__ >= 8)\n",nco_prg_nm_get(),fnc_nm);
#else /* !__GNUC__ */
(void)fprintf(stdout,"%s: %s reports C-compiler does not see #pragma omp simd\n",nco_prg_nm_get(),fnc_nm);
#endif /* !__GNUC__ */
} /* !tm_ttl */
tm_end=clock();
tm_drn=1.0e6*(tm_end-tm_srt)/CLOCKS_PER_SEC;
tm_ttl+=tm_drn;
(void)fprintf(stdout,"%s: %s reports elapsed time in function is %g us\n",nco_prg_nm_get(),fnc_nm,tm_ttl);
} /* !dbg */
} /* !nco_var_sbt() */
void
nco_var_sqrt /* [fnc] Place squareroot of first operand in value of second operand */
(const nc_type type, /* I [enm] netCDF type of operand */
const long sz, /* I [nbr] Size (in elements) of operand */
const int has_mss_val, /* I [flg] Flag for missing values */
ptr_unn mss_val, /* I [val] Value of missing value */
long * restrict const tally, /* I/O [nbr] Counter space */
ptr_unn op1, /* I [val] Values of first operand */
ptr_unn op2) /* O [val] Squareroot of first operand */
{
/* Purpose: Place squareroot of first operand in value of second operand
Assume operands conform, are same type, and are in memory */
/* Square root is currently defined as op2:=sqrt(op1) */
/* NB: Many compilers need to #include "nco_rth_flt.h" for sqrtf() prototype */
long idx;
/* Typecast pointer to values before access */
(void)cast_void_nctype(type,&op1);
(void)cast_void_nctype(type,&op2);
if(has_mss_val) (void)cast_void_nctype(type,&mss_val);
switch(type){
case NC_FLOAT:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
op2.fp[idx]=sqrtf(op1.fp[idx]);
tally[idx]++;
} /* end for */
}else{
const float mss_val_flt=*mss_val.fp;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if(op1.fp[idx] != mss_val_flt){
op2.fp[idx]=sqrtf(op1.fp[idx]);
tally[idx]++;
} /* end if */
} /* end for */
} /* end else */
break;
case NC_DOUBLE:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
op2.dp[idx]=sqrt(op1.dp[idx]);
tally[idx]++;
} /* end for */
}else{
const double mss_val_dbl=*mss_val.dp;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if(op1.dp[idx] != mss_val_dbl){
op2.dp[idx]=sqrt(op1.dp[idx]);
tally[idx]++;
} /* end if */
} /* end for */
} /* end else */
break;
case NC_INT:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
op2.ip[idx]=(nco_int)sqrt((double)(op1.ip[idx]));
tally[idx]++;
} /* end for */
}else{
const nco_int mss_val_ntg=*mss_val.ip;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if(op1.ip[idx] != mss_val_ntg){
op2.ip[idx]=(nco_int)sqrt((double)(op1.ip[idx]));
tally[idx]++;
} /* end if */
} /* end for */
} /* end else */
break;
case NC_SHORT:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
op2.sp[idx]=(nco_short)sqrt((double)(op1.sp[idx]));
tally[idx]++;
} /* end for */
}else{
const nco_short mss_val_short=*mss_val.sp;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if(op1.sp[idx] != mss_val_short){
op2.sp[idx]=(nco_short)sqrt((double)(op1.sp[idx]));
tally[idx]++;
} /* end if */
} /* end for */
} /* end else */
break;
case NC_USHORT:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
op2.usp[idx]=(nco_ushort)sqrt((double)(op1.usp[idx]));
tally[idx]++;
} /* end for */
}else{
const nco_ushort mss_val_ushort=*mss_val.usp;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if(op1.usp[idx] != mss_val_ushort){
op2.usp[idx]=(nco_ushort)sqrt((double)(op1.usp[idx]));
tally[idx]++;
} /* end if */
} /* end for */
} /* end else */
break;
case NC_UINT:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
op2.uip[idx]=(nco_uint)sqrt((double)(op1.uip[idx]));
tally[idx]++;
} /* end for */
}else{
const nco_uint mss_val_uint=*mss_val.uip;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if(op1.uip[idx] != mss_val_uint){
op2.uip[idx]=(nco_uint)sqrt((double)(op1.uip[idx]));
tally[idx]++;
} /* end if */
} /* end for */
} /* end else */
break;
case NC_INT64:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
op2.i64p[idx]=(nco_int64)sqrt((double)(op1.i64p[idx]));
tally[idx]++;
} /* end for */
}else{
const nco_int64 mss_val_int64=*mss_val.i64p;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if(op1.i64p[idx] != mss_val_int64){
op2.i64p[idx]=(nco_int64)sqrt((double)(op1.i64p[idx]));
tally[idx]++;
} /* end if */
} /* end for */
} /* end else */
break;
case NC_UINT64:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
op2.ui64p[idx]=(nco_uint64)sqrt((double)(op1.ui64p[idx]));
tally[idx]++;
} /* end for */
}else{
const nco_uint64 mss_val_uint64=*mss_val.ui64p;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if(op1.ui64p[idx] != mss_val_uint64){
op2.ui64p[idx]=(nco_uint64)sqrt((double)(op1.ui64p[idx]));
tally[idx]++;
} /* end if */
} /* end for */
} /* end else */
break;
case NC_BYTE:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
op2.bp[idx]=(nco_byte)sqrt((double)(op1.bp[idx]));
tally[idx]++;
} /* end for */
}else{
const nco_byte mss_val_byte=*mss_val.bp;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if(op1.bp[idx] != mss_val_byte){
op2.bp[idx]=(nco_byte)sqrt((double)(op1.bp[idx]));
tally[idx]++;
} /* end if */
} /* end for */
} /* end else */
break;
case NC_UBYTE:
if(!has_mss_val){
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
op2.ubp[idx]=(nco_ubyte)sqrt((double)(op1.ubp[idx]));
tally[idx]++;
} /* end for */
}else{
const nco_ubyte mss_val_ubyte=*mss_val.ubp;
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++){
if(op1.ubp[idx] != mss_val_ubyte){
op2.ubp[idx]=(nco_ubyte)sqrt((double)(op1.ubp[idx]));
tally[idx]++;
} /* end if */
} /* end for */
} /* end else */
break;
case NC_CHAR: break; /* Do nothing */
case NC_STRING: break; /* Do nothing */
default: nco_dfl_case_nc_type_err(); break;
} /* end switch */
/* NB: it is not neccessary to un-typecast pointers to values after access
because we have only operated on local copies of them. */
} /* end nco_var_sqrt() */
void
nco_var_zero /* [fnc] Zero value of first operand */
(const nc_type type, /* I [enm] netCDF type of operand */
const long sz, /* I [nbr] Size (in elements) of operand */
ptr_unn op1) /* O [val] Values of first operand zeroed on output */
{
/* Purpose: Zero value of first operand */
/* NB: Floats and integers all use same bit pattern for zero
Confirm this with
ccc --tst=bnr --int_foo=0
ccc --dbg=0 --tst=gsl --gsl_a=0.0
Hence, it is faster to use memset() rather than explicit loop to zero memory
calloc() would also work if interactions with NC_CHAR and NC_STRING were predictable
Same approach is used in nco_zero_long() */
size_t sz_byt; /* [B] Number of bytes in variable buffer */
sz_byt=(size_t)sz*nco_typ_lng(type);
switch(type){
case NC_FLOAT:
case NC_DOUBLE:
case NC_INT:
case NC_SHORT:
case NC_USHORT:
case NC_UINT:
case NC_INT64:
case NC_UINT64:
case NC_BYTE:
case NC_UBYTE:
(void)memset(op1.vp,0,sz_byt);
break;
case NC_CHAR: break; /* Do nothing */
case NC_STRING: break; /* Do nothing */
default: nco_dfl_case_nc_type_err(); break;
} /* end switch */
#if 0
/* Presumably this old method (used until 20050321) is slower because of pointer de-referencing */
long idx;
/* Typecast pointer to values before access */
(void)cast_void_nctype(type,&op1);
switch(type){
case NC_FLOAT:
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) op1.fp[idx]=0.0;
break;
case NC_DOUBLE:
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) op1.dp[idx]=0.0;
break;
case NC_INT:
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) op1.ip[idx]=0L;
break;
case NC_SHORT:
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) op1.sp[idx]=0;
break;
case NC_USHORT:
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) op1.usp[idx]=0;
break;
case NC_UINT:
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) op1.uip[idx]=0;
break;
case NC_INT64:
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) op1.i64p[idx]=0;
break;
case NC_UINT64:
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) op1.ui64p[idx]=0;
break;
case NC_BYTE:
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) op1.bp[idx]=0;
break;
case NC_UBYTE:
#if ( __GNUC__ >= 8 ) || ( __clang_major__ >= 8 )
# pragma omp simd
#endif
for(idx=0;idx<sz;idx++) op1.ubp[idx]=0;
break;
case NC_CHAR: break; /* Do nothing */
case NC_STRING: break; /* Do nothing */
default: nco_dfl_case_nc_type_err(); break;
} /* end switch */
#endif /* !0 */
/* NB: it is not neccessary to un-typecast pointers to values after access
because we have only operated on local copies of them. */
} /* end nco_var_zero() */
|
mttkrp.c |
/******************************************************************************
* INCLUDES
*****************************************************************************/
#include "base.h"
#include "mttkrp.h"
#include "thd_info.h"
#include "tile.h"
#include "util.h"
#include "mutex_pool.h"
#include <time.h>
matrix_t p_schedule_tiles_malloc1;
mutex_pool mutex_pool1 = { .num_locks = SPLATT_DEFAULT_NLOCKS, .pad_size = SPLATT_DEFAULT_LOCK_PAD, .locks = NULL};
mutex_pool mutex_pool2 = { .num_locks = SPLATT_DEFAULT_NLOCKS, .pad_size = SPLATT_DEFAULT_LOCK_PAD, .locks = NULL};
/* XXX: this is a memory leak until cpd_ws is added/freed. */
static mutex_pool * pool = NULL;
/**
* @brief Function pointer that performs MTTKRP on a tile of a CSF tree.
*
* @param ct The CSF tensor.
* @param tile_id The tile to process.
* @param mats The matrices.
* @param mode The output mode.
* @param thds Thread structures.
* @param partition A partitioning of the slices in the tensor, to distribute
* to threads. Use the thread ID to decide which slices to
* process. This may be NULL, in that case simply process all
* slices.
*/
typedef void (* csf_mttkrp_func)(
splatt_csf const * const ct,
idx_t const tile_id,
matrix_t ** mats,
idx_t const mode,
thd_info * const thds,
idx_t const * const partition);
/******************************************************************************
* PRIVATE FUNCTIONS
*****************************************************************************/
/**
* @brief Perform a reduction on thread-local MTTKRP outputs.
*
* @param ws MTTKRP workspace containing thread-local outputs.
* @param global_output The global MTTKRP output we are reducing into.
* @param nrows The number of rows in the MTTKRP.
* @param ncols The number of columns in the MTTKRP.
*/
static void p_reduce_privatized(
splatt_mttkrp_ws * const ws,
val_t * const restrict global_output,
idx_t const nrows,
idx_t const ncols)
{
/* Ensure everyone has completed their local MTTKRP. */
// #pragma omp barrier
sp_timer_t reduction_timer;
timer_fstart(&reduction_timer);
int const tid = 0; //splatt_omp_get_thread_num();
idx_t const num_threads = 1; //splatt_omp_get_num_threads();
idx_t const elem_per_thread = (nrows * ncols) / num_threads;
idx_t const start = tid * elem_per_thread;
idx_t const stop = ((idx_t)tid == num_threads-1) ?
(nrows * ncols) : (tid + 1) * elem_per_thread;
/* reduction */
for(idx_t t=0; t < num_threads; ++t){
val_t const * const restrict thread_buf = ws->privatize_buffer[t];
for(idx_t x=start; x < stop; ++x) {
global_output[x] += thread_buf[x];
}
}
timer_stop(&reduction_timer);
// #pragma omp master
ws->reduction_time = reduction_timer.seconds;
}
/**
* @brief Map MTTKRP functions onto a (possibly tiled) CSF tensor. This function
* will handle any scheduling required with a partially tiled tensor.
*
* @param tensors An array of CSF representations. tensors[csf_id] is processed.
* @param csf_id Which tensor are we processing?
* @param atomic_func An MTTKRP function which atomically updates the output.
* @param nosync_func An MTTKRP function which does not atomically update.
* @param mats The matrices, with the output stored in mats[MAX_NMODES].
* @param mode Which mode of 'tensors' is the output (not CSF depth).
* @param thds Thread structures.
* @param ws MTTKRP workspace.
*/
static void p_schedule_tiles(
splatt_csf const * const tensors,
idx_t const csf_id,
csf_mttkrp_func atomic_func,
csf_mttkrp_func nosync_func,
matrix_t ** mats,
idx_t const mode,
thd_info * const thds,
splatt_mttkrp_ws * const ws)
{
splatt_csf const * const csf = &(tensors[csf_id]);
idx_t const nmodes = csf->nmodes;
idx_t const depth = nmodes - 1;
idx_t const nrows = mats[mode]->I;
idx_t const ncols = mats[mode]->J;
/* Store old pointer */
val_t * const restrict global_output = mats[MAX_NMODES]->vals;
// #pragma omp parallel
{
int const tid = 0; //splatt_omp_get_thread_num();
timer_start(&thds[tid].ttime);
idx_t const * const tile_partition = ws->tile_partition[csf_id];
idx_t const * const tree_partition = ws->tree_partition[csf_id];
/*
* We may need to edit mats[MAX_NMODES]->vals, so create a private copy of
* the pointers to edit. (NOT actual factors).
*/
matrix_t * mats_priv[MAX_NMODES+1];
for(idx_t m=0; m < MAX_NMODES; ++m) {
mats_priv[m] = mats[m];
}
/* each thread gets separate structure, but do a shallow copy */
mats_priv[MAX_NMODES] = &p_schedule_tiles_malloc1;//splatt_malloc(sizeof(**mats_priv));
*(mats_priv[MAX_NMODES]) = *(mats[MAX_NMODES]);
/* Give each thread its own private buffer and overwrite atomic
* function. */
if(ws->is_privatized[mode]) {
/* change (thread-private!) output structure */
memset(ws->privatize_buffer[tid], 0,
nrows * ncols * sizeof(**(ws->privatize_buffer)));
mats_priv[MAX_NMODES]->vals = ws->privatize_buffer[tid];
/* Don't use atomics if we privatized. */
atomic_func = nosync_func;
}
/*
* Distribute tiles to threads in some fashion.
*/
if(csf->ntiles > 1) {
/* We parallelize across tiles, and thus should not distribute within a
* tree. This may change if we instead 'split' tiles across a few
* threads. */
assert(tree_partition == NULL);
/* mode is actually tiled -- avoid synchronization */
if(csf->tile_dims[mode] > 1) {
idx_t tile_id = 0;
/* foreach layer of tiles */
// #pragma omp for schedule(dynamic, 1) nowait
for(idx_t t=0; t < csf->tile_dims[mode]; ++t) {
tile_id =
get_next_tileid(TILE_BEGIN, csf->tile_dims, nmodes, mode, t);
while(tile_id != TILE_END) {
nosync_func(csf, tile_id, mats_priv, mode, thds, tree_partition);
tile_id =
get_next_tileid(tile_id, csf->tile_dims, nmodes, mode, t);
}
}
/* tiled, but not this mode. Atomics are still necessary. */
} else {
for(idx_t tile_id = tile_partition[tid];
tile_id < tile_partition[tid+1]; ++tile_id) {
atomic_func(csf, tile_id, mats_priv, mode, thds, tree_partition);
}
}
/*
* Untiled, parallelize within kernel.
*/
} else {
assert(tree_partition != NULL);
atomic_func(csf, 0, mats_priv, mode, thds, tree_partition);
}
timer_stop(&thds[tid].ttime);
/* If we used privatization, perform a reduction. */
if(ws->is_privatized[mode]) {
p_reduce_privatized(ws, global_output, nrows, ncols);
}
//free(mats_priv[MAX_NMODES]);
} /* end omp parallel */
/* restore pointer */
mats[MAX_NMODES]->vals = global_output;
}
/**
* @brief Should a certain mode should be privatized to avoid locks?
*
* @param csf The tensor (just used for dimensions).
* @param mode The mode we are processing.
* @param opts Options, storing the # threads and the threshold.
*
* @return true, if we should privatize.
*/
static bool p_is_privatized(
splatt_csf const * const csf,
idx_t const mode,
double const * const opts)
{
idx_t const length = csf->dims[mode];
idx_t const nthreads = (idx_t) opts[SPLATT_OPTION_NTHREADS];
double const thresh = opts[SPLATT_OPTION_PRIVTHRESH];
/* don't bother if it is not multithreaded. */
if(nthreads == 1) {
return false;
}
return (double)(length * nthreads) <= (thresh * (double)csf->nnz);
}
static inline void p_add_hada_clear(
val_t * const restrict out,
val_t * const restrict a,
val_t const * const restrict b,
idx_t const nfactors)
{
for(idx_t f=0; f < nfactors; ++f) {
out[f] += a[f] * b[f];
a[f] = 0;
}
}
static inline void p_assign_hada(
val_t * const restrict out,
val_t const * const restrict a,
val_t const * const restrict b,
idx_t const nfactors)
{
for(idx_t f=0; f < nfactors; ++f) {
out[f] = a[f] * b[f];
}
}
static inline void p_csf_process_fiber_locked(
val_t * const leafmat,
val_t const * const restrict accumbuf,
idx_t const nfactors,
idx_t const start,
idx_t const end,
idx_t const * const restrict inds,
val_t const * const restrict vals)
{
for(idx_t jj=start; jj < end; ++jj) {
val_t * const restrict leafrow = leafmat + (inds[jj] * nfactors);
val_t const v = vals[jj];
//mutex_set_lock(pool, inds[jj]);
for(idx_t f=0; f < nfactors; ++f) {
leafrow[f] += v * accumbuf[f];
}
//mutex_unset_lock(pool, inds[jj]);
}
}
static inline void p_csf_process_fiber_nolock(
val_t * const leafmat,
val_t const * const restrict accumbuf,
idx_t const nfactors,
idx_t const start,
idx_t const end,
idx_t const * const restrict inds,
val_t const * const restrict vals)
{
for(idx_t jj=start; jj < end; ++jj) {
val_t * const restrict leafrow = leafmat + (inds[jj] * nfactors);
val_t const v = vals[jj];
for(idx_t f=0; f < nfactors; ++f) {
leafrow[f] += v * accumbuf[f];
}
}
}
static inline void p_csf_process_fiber(
val_t * const restrict accumbuf,
idx_t const nfactors,
val_t const * const leafmat,
idx_t const start,
idx_t const end,
idx_t const * const inds,
val_t const * const vals)
{
/* foreach nnz in fiber */
for(idx_t j=start; j < end; ++j) {
val_t const v = vals[j] ;
val_t const * const restrict row = leafmat + (nfactors * inds[j]);
for(idx_t f=0; f < nfactors; ++f) {
accumbuf[f] += v * row[f];
}
}
}
static inline void p_propagate_up(
val_t * const out,
val_t * const * const buf,
idx_t * const restrict idxstack,
idx_t const init_depth,
idx_t const init_idx,
idx_t const * const * const fp,
idx_t const * const * const fids,
val_t const * const restrict vals,
val_t ** mvals,
idx_t const nmodes,
idx_t const nfactors)
{
/* push initial idx initialize idxstack */
idxstack[init_depth] = init_idx;
for(idx_t m=init_depth+1; m < nmodes; ++m) {
idxstack[m] = fp[m-1][idxstack[m-1]];
}
assert(init_depth < nmodes-1);
/* clear out accumulation buffer */
for(idx_t f=0; f < nfactors; ++f) {
buf[init_depth+1][f] = 0;
}
while(idxstack[init_depth+1] < fp[init_depth][init_idx+1]) {
/* skip to last internal mode */
idx_t depth = nmodes - 2;
/* process all nonzeros [start, end) into buf[depth]*/
idx_t const start = fp[depth][idxstack[depth]];
idx_t const end = fp[depth][idxstack[depth]+1];
p_csf_process_fiber(buf[depth+1], nfactors, mvals[depth+1],
start, end, fids[depth+1], vals);
idxstack[depth+1] = end;
/* exit early if there is no propagation to do... */
if(init_depth == nmodes-2) {
for(idx_t f=0; f < nfactors; ++f) {
out[f] = buf[depth+1][f];
}
return;
}
/* Propagate up until we reach a node with more children to process */
do {
/* propagate result up and clear buffer for next sibling */
val_t const * const restrict fibrow
= mvals[depth] + (fids[depth][idxstack[depth]] * nfactors);
p_add_hada_clear(buf[depth], buf[depth+1], fibrow, nfactors);
++idxstack[depth];
--depth;
} while(depth > init_depth &&
idxstack[depth+1] == fp[depth][idxstack[depth]+1]);
} /* end DFS */
/* copy to out */
for(idx_t f=0; f < nfactors; ++f) {
out[f] = buf[init_depth+1][f];
}
}
static void p_csf_mttkrp_root3_nolock(
splatt_csf const * const ct,
idx_t const tile_id,
matrix_t ** mats,
idx_t const mode,
thd_info * const thds,
idx_t const * const restrict partition)
{
assert(ct->nmodes == 3);
val_t const * const vals = ct->pt[tile_id].vals;
idx_t const * const restrict sptr = ct->pt[tile_id].fptr[0];
idx_t const * const restrict fptr = ct->pt[tile_id].fptr[1];
idx_t const * const restrict sids = ct->pt[tile_id].fids[0];
idx_t const * const restrict fids = ct->pt[tile_id].fids[1];
idx_t const * const restrict inds = ct->pt[tile_id].fids[2];
val_t const * const avals = mats[csf_depth_to_mode(ct, 1)]->vals;
val_t const * const bvals = mats[csf_depth_to_mode(ct, 2)]->vals;
val_t * const ovals = mats[MAX_NMODES]->vals;
idx_t const nfactors = mats[MAX_NMODES]->J;
int const tid = 0; //splatt_omp_get_thread_num();
val_t * const restrict accumF = (val_t *) thds[tid].scratch[0];
/* write to output */
val_t * const restrict writeF = (val_t *) thds[tid].scratch[2];
for(idx_t r=0; r < nfactors; ++r) {
writeF[r] = 0.;
}
/* break up loop by partition */
idx_t const nslices = ct->pt[tile_id].nfibs[0];
idx_t const start = (partition != NULL) ? partition[tid] : 0;
idx_t const stop = (partition != NULL) ? partition[tid+1] : nslices;
for(idx_t s=start; s < stop; ++s) {
idx_t const fid = (sids == NULL) ? s : sids[s];
val_t * const restrict mv = ovals + (fid * nfactors);
/* foreach fiber in slice */
for(idx_t f=sptr[s]; f < sptr[s+1]; ++f) {
/* first entry of the fiber is used to initialize accumF */
idx_t const jjfirst = fptr[f];
val_t const vfirst = vals[jjfirst];
val_t const * const restrict bv = bvals + (inds[jjfirst] * nfactors);
for(idx_t r=0; r < nfactors; ++r) {
accumF[r] = vfirst * bv[r];
}
/* foreach nnz in fiber */
for(idx_t jj=fptr[f]+1; jj < fptr[f+1]; ++jj) {
val_t const v = vals[jj];
val_t const * const restrict bv = bvals + (inds[jj] * nfactors);
for(idx_t r=0; r < nfactors; ++r) {
accumF[r] += v * bv[r];
}
}
/* scale inner products by row of A and update to M */
val_t const * const restrict av = avals + (fids[f] * nfactors);
for(idx_t r=0; r < nfactors; ++r) {
writeF[r] += accumF[r] * av[r];
}
} /* foreach fiber */
/* flush to output */
for(idx_t r=0; r < nfactors; ++r) {
mv[r] += writeF[r];
writeF[r] = 0.;
}
} /* foreach slice (tree) */
}
static void p_csf_mttkrp_root3_locked(
splatt_csf const * const ct,
idx_t const tile_id,
matrix_t ** mats,
idx_t const mode,
thd_info * const thds,
idx_t const * const restrict partition)
{
assert(ct->nmodes == 3);
val_t const * const vals = ct->pt[tile_id].vals;
idx_t const * const restrict sptr = ct->pt[tile_id].fptr[0];
idx_t const * const restrict fptr = ct->pt[tile_id].fptr[1];
idx_t const * const restrict sids = ct->pt[tile_id].fids[0];
idx_t const * const restrict fids = ct->pt[tile_id].fids[1];
idx_t const * const restrict inds = ct->pt[tile_id].fids[2];
val_t const * const avals = mats[csf_depth_to_mode(ct, 1)]->vals;
val_t const * const bvals = mats[csf_depth_to_mode(ct, 2)]->vals;
val_t * const ovals = mats[MAX_NMODES]->vals;
idx_t const nfactors = mats[MAX_NMODES]->J;
int const tid = 0; //splatt_omp_get_thread_num();
val_t * const restrict accumF = (val_t *) thds[tid].scratch[0];
/* write to output */
val_t * const restrict writeF = (val_t *) thds[tid].scratch[2];
for(idx_t r=0; r < nfactors; ++r) {
writeF[r] = 0.;
}
idx_t const nslices = ct->pt[tile_id].nfibs[0];
idx_t const start = (partition != NULL) ? partition[tid] : 0;
idx_t const stop = (partition != NULL) ? partition[tid+1] : nslices;
for(idx_t s=start; s < stop; ++s) {
/* foreach fiber in slice */
for(idx_t f=sptr[s]; f < sptr[s+1]; ++f) {
/* first entry of the fiber is used to initialize accumF */
idx_t const jjfirst = fptr[f];
val_t const vfirst = vals[jjfirst];
val_t const * const restrict bv = bvals + (inds[jjfirst] * nfactors);
for(idx_t r=0; r < nfactors; ++r) {
accumF[r] = vfirst * bv[r];
}
/* foreach nnz in fiber */
for(idx_t jj=fptr[f]+1; jj < fptr[f+1]; ++jj) {
val_t const v = vals[jj];
val_t const * const restrict bv = bvals + (inds[jj] * nfactors);
for(idx_t r=0; r < nfactors; ++r) {
accumF[r] += v * bv[r];
}
}
/* scale inner products by row of A and update to M */
val_t const * const restrict av = avals + (fids[f] * nfactors);
for(idx_t r=0; r < nfactors; ++r) {
writeF[r] += accumF[r] * av[r];
}
}
idx_t const fid = (sids == NULL) ? s : sids[s];
val_t * const restrict mv = ovals + (fid * nfactors);
/* flush to output */
//mutex_set_lock(pool, fid);
for(idx_t r=0; r < nfactors; ++r) {
mv[r] += writeF[r];
writeF[r] = 0.;
}
// mutex_unset_lock(pool, fid);
}
}
static void p_csf_mttkrp_intl3_locked(
splatt_csf const * const ct,
idx_t const tile_id,
matrix_t ** mats,
idx_t const mode,
thd_info * const thds,
idx_t const * const restrict partition)
{
assert(ct->nmodes == 3);
val_t const * const vals = ct->pt[tile_id].vals;
idx_t const * const restrict sptr = ct->pt[tile_id].fptr[0];
idx_t const * const restrict fptr = ct->pt[tile_id].fptr[1];
idx_t const * const restrict sids = ct->pt[tile_id].fids[0];
idx_t const * const restrict fids = ct->pt[tile_id].fids[1];
idx_t const * const restrict inds = ct->pt[tile_id].fids[2];
val_t const * const avals = mats[csf_depth_to_mode(ct, 0)]->vals;
val_t const * const bvals = mats[csf_depth_to_mode(ct, 2)]->vals;
val_t * const ovals = mats[MAX_NMODES]->vals;
idx_t const nfactors = mats[MAX_NMODES]->J;
int const tid = 0; //splatt_omp_get_thread_num();
val_t * const restrict accumF = (val_t *) thds[tid].scratch[0];
idx_t const nslices = ct->pt[tile_id].nfibs[0];
idx_t const start = (partition != NULL) ? partition[tid] : 0;
idx_t const stop = (partition != NULL) ? partition[tid+1] : nslices;
for(idx_t s=start; s < stop; ++s) {
idx_t const fid = (sids == NULL) ? s : sids[s];
/* root row */
val_t const * const restrict rv = avals + (fid * nfactors);
/* foreach fiber in slice */
for(idx_t f=sptr[s]; f < sptr[s+1]; ++f) {
/* first entry of the fiber is used to initialize accumF */
idx_t const jjfirst = fptr[f];
val_t const vfirst = vals[jjfirst];
val_t const * const restrict bv = bvals + (inds[jjfirst] * nfactors);
for(idx_t r=0; r < nfactors; ++r) {
accumF[r] = vfirst * bv[r];
}
/* foreach nnz in fiber */
for(idx_t jj=fptr[f]+1; jj < fptr[f+1]; ++jj) {
val_t const v = vals[jj];
val_t const * const restrict bv = bvals + (inds[jj] * nfactors);
for(idx_t r=0; r < nfactors; ++r) {
accumF[r] += v * bv[r];
}
}
/* write to fiber row */
val_t * const restrict ov = ovals + (fids[f] * nfactors);
// mutex_set_lock(pool, fids[f]);
for(idx_t r=0; r < nfactors; ++r) {
ov[r] += rv[r] * accumF[r];
}
// mutex_unset_lock(pool, fids[f]);
}
}
}
static void p_csf_mttkrp_leaf3_locked(
splatt_csf const * const ct,
idx_t const tile_id,
matrix_t ** mats,
idx_t const mode,
thd_info * const thds,
idx_t const * const restrict partition)
{
assert(ct->nmodes == 3);
val_t const * const vals = ct->pt[tile_id].vals;
idx_t const * const restrict sptr = ct->pt[tile_id].fptr[0];
idx_t const * const restrict fptr = ct->pt[tile_id].fptr[1];
idx_t const * const restrict sids = ct->pt[tile_id].fids[0];
idx_t const * const restrict fids = ct->pt[tile_id].fids[1];
idx_t const * const restrict inds = ct->pt[tile_id].fids[2];
val_t const * const avals = mats[csf_depth_to_mode(ct, 0)]->vals;
val_t const * const bvals = mats[csf_depth_to_mode(ct, 1)]->vals;
val_t * const ovals = mats[MAX_NMODES]->vals;
idx_t const nfactors = mats[MAX_NMODES]->J;
int const tid = 0; //splatt_omp_get_thread_num();
val_t * const restrict accumF = (val_t *) thds[tid].scratch[0];
idx_t const nslices = ct->pt[tile_id].nfibs[0];
idx_t const start = (partition != NULL) ? partition[tid] : 0;
idx_t const stop = (partition != NULL) ? partition[tid+1] : nslices;
for(idx_t s=start; s < stop; ++s) {
idx_t const fid = (sids == NULL) ? s : sids[s];
/* root row */
val_t const * const restrict rv = avals + (fid * nfactors);
/* foreach fiber in slice */
for(idx_t f=sptr[s]; f < sptr[s+1]; ++f) {
/* fill fiber with hada */
val_t const * const restrict av = bvals + (fids[f] * nfactors);
for(idx_t r=0; r < nfactors; ++r) {
accumF[r] = rv[r] * av[r];
}
/* foreach nnz in fiber, scale with hada and write to ovals */
for(idx_t jj=fptr[f]; jj < fptr[f+1]; ++jj) {
val_t const v = vals[jj];
val_t * const restrict ov = ovals + (inds[jj] * nfactors);
// mutex_set_lock(pool, inds[jj]);
for(idx_t r=0; r < nfactors; ++r) {
ov[r] += v * accumF[r];
}
// mutex_unset_lock(pool, inds[jj]);
}
}
}
}
static void p_csf_mttkrp_root_nolock(
splatt_csf const * const ct,
idx_t const tile_id,
matrix_t ** mats,
idx_t const mode,
thd_info * const thds,
idx_t const * const restrict partition)
{
/* extract tensor structures */
idx_t const nmodes = ct->nmodes;
val_t const * const vals = ct->pt[tile_id].vals;
/* empty tile, just return */
if(vals == NULL) {
return;
}
if(nmodes == 3) {
p_csf_mttkrp_root3_nolock(ct, tile_id, mats, mode, thds, partition);
return;
}
idx_t const * const * const restrict fp
= (idx_t const * const *) ct->pt[tile_id].fptr;
idx_t const * const * const restrict fids
= (idx_t const * const *) ct->pt[tile_id].fids;
idx_t const nfactors = mats[0]->J;
val_t * mvals[MAX_NMODES];
val_t * buf[MAX_NMODES];
idx_t idxstack[MAX_NMODES];
int const tid = 0; //splatt_omp_get_thread_num();
for(idx_t m=0; m < nmodes; ++m) {
mvals[m] = mats[csf_depth_to_mode(ct, m)]->vals;
/* grab the next row of buf from thds */
buf[m] = ((val_t *) thds[tid].scratch[2]) + (nfactors * m);
memset(buf[m], 0, nfactors * sizeof(val_t));
}
val_t * const ovals = mats[MAX_NMODES]->vals;
idx_t const nfibs = ct->pt[tile_id].nfibs[0];
assert(nfibs <= mats[MAX_NMODES]->I);
/* break up loop by partition */
idx_t const start = (partition != NULL) ? partition[tid] : 0;
idx_t const stop = (partition != NULL) ? partition[tid+1] : nfibs;
for(idx_t s=start; s < stop; ++s) {
idx_t const fid = (fids[0] == NULL) ? s : fids[0][s];
assert(fid < mats[MAX_NMODES]->I);
p_propagate_up(buf[0], buf, idxstack, 0, s, fp, fids,
vals, mvals, nmodes, nfactors);
val_t * const restrict orow = ovals + (fid * nfactors);
val_t const * const restrict obuf = buf[0];
// mutex_set_lock(pool, fid);
for(idx_t f=0; f < nfactors; ++f) {
orow[f] += obuf[f];
}
// mutex_unset_lock(pool, fid);
} /* end foreach outer slice */
}
static void p_csf_mttkrp_root_locked(
splatt_csf const * const ct,
idx_t const tile_id,
matrix_t ** mats,
idx_t const mode,
thd_info * const thds,
idx_t const * const restrict partition)
{
/* extract tensor structures */
idx_t const nmodes = ct->nmodes;
val_t const * const vals = ct->pt[tile_id].vals;
/* empty tile, just return */
if(vals == NULL) {
return;
}
if(nmodes == 3) {
p_csf_mttkrp_root3_locked(ct, tile_id, mats, mode, thds, partition);
return;
}
idx_t const * const * const restrict fp
= (idx_t const * const *) ct->pt[tile_id].fptr;
idx_t const * const * const restrict fids
= (idx_t const * const *) ct->pt[tile_id].fids;
idx_t const nfactors = mats[0]->J;
val_t * mvals[MAX_NMODES];
val_t * buf[MAX_NMODES];
idx_t idxstack[MAX_NMODES];
int const tid = 0; //splatt_omp_get_thread_num();
for(idx_t m=0; m < nmodes; ++m) {
mvals[m] = mats[csf_depth_to_mode(ct, m)]->vals;
/* grab the next row of buf from thds */
buf[m] = ((val_t *) thds[tid].scratch[2]) + (nfactors * m);
memset(buf[m], 0, nfactors * sizeof(val_t));
}
val_t * const ovals = mats[MAX_NMODES]->vals;
idx_t const nfibs = ct->pt[tile_id].nfibs[0];
assert(nfibs <= mats[MAX_NMODES]->I);
idx_t const start = (partition != NULL) ? partition[tid] : 0;
idx_t const stop = (partition != NULL) ? partition[tid+1] : nfibs;
for(idx_t s=start; s < stop; ++s) {
idx_t const fid = (fids[0] == NULL) ? s : fids[0][s];
assert(fid < mats[MAX_NMODES]->I);
p_propagate_up(buf[0], buf, idxstack, 0, s, fp, fids,
vals, mvals, nmodes, nfactors);
val_t * const restrict orow = ovals + (fid * nfactors);
val_t const * const restrict obuf = buf[0];
// mutex_set_lock(pool, fid);
for(idx_t f=0; f < nfactors; ++f) {
orow[f] += obuf[f];
}
// mutex_unset_lock(pool, fid);
} /* end foreach outer slice */
}
static void p_csf_mttkrp_leaf3_nolock(
splatt_csf const * const ct,
idx_t const tile_id,
matrix_t ** mats,
idx_t const mode,
thd_info * const thds,
idx_t const * const partition)
{
assert(ct->nmodes == 3);
val_t const * const vals = ct->pt[tile_id].vals;
idx_t const * const restrict sptr = ct->pt[tile_id].fptr[0];
idx_t const * const restrict fptr = ct->pt[tile_id].fptr[1];
idx_t const * const restrict sids = ct->pt[tile_id].fids[0];
idx_t const * const restrict fids = ct->pt[tile_id].fids[1];
idx_t const * const restrict inds = ct->pt[tile_id].fids[2];
val_t const * const avals = mats[csf_depth_to_mode(ct, 0)]->vals;
val_t const * const bvals = mats[csf_depth_to_mode(ct, 1)]->vals;
val_t * const ovals = mats[MAX_NMODES]->vals;
idx_t const nfactors = mats[MAX_NMODES]->J;
int const tid = 0; //splatt_omp_get_thread_num();
val_t * const restrict accumF = (val_t *) thds[tid].scratch[0];
idx_t const nslices = ct->pt[tile_id].nfibs[0];
idx_t const start = (partition != NULL) ? partition[tid] : 0;
idx_t const stop = (partition != NULL) ? partition[tid+1] : nslices;
for(idx_t s=start; s < stop; ++s) {
idx_t const fid = (sids == NULL) ? s : sids[s];
/* root row */
val_t const * const restrict rv = avals + (fid * nfactors);
/* foreach fiber in slice */
for(idx_t f=sptr[s]; f < sptr[s+1]; ++f) {
/* fill fiber with hada */
val_t const * const restrict av = bvals + (fids[f] * nfactors);
for(idx_t r=0; r < nfactors; ++r) {
accumF[r] = rv[r] * av[r];
}
/* foreach nnz in fiber, scale with hada and write to ovals */
for(idx_t jj=fptr[f]; jj < fptr[f+1]; ++jj) {
val_t const v = vals[jj];
val_t * const restrict ov = ovals + (inds[jj] * nfactors);
for(idx_t r=0; r < nfactors; ++r) {
ov[r] += v * accumF[r];
}
}
}
}
}
static void p_csf_mttkrp_leaf_nolock(
splatt_csf const * const ct,
idx_t const tile_id,
matrix_t ** mats,
idx_t const mode,
thd_info * const thds,
idx_t const * const partition)
{
val_t const * const vals = ct->pt[tile_id].vals;
idx_t const nmodes = ct->nmodes;
/* pass empty tiles */
if(vals == NULL) {
return;
}
if(nmodes == 3) {
p_csf_mttkrp_leaf3_nolock(ct, tile_id, mats, mode, thds, partition);
return;
}
/* extract tensor structures */
idx_t const * const * const restrict fp
= (idx_t const * const *) ct->pt[tile_id].fptr;
idx_t const * const * const restrict fids
= (idx_t const * const *) ct->pt[tile_id].fids;
idx_t const nfactors = mats[0]->J;
val_t * mvals[MAX_NMODES];
val_t * buf[MAX_NMODES];
idx_t idxstack[MAX_NMODES];
int const tid = 0; //splatt_omp_get_thread_num();
for(idx_t m=0; m < nmodes; ++m) {
mvals[m] = mats[csf_depth_to_mode(ct, m)]->vals;
/* grab the next row of buf from thds */
buf[m] = ((val_t *) thds[tid].scratch[2]) + (nfactors * m);
}
/* foreach outer slice */
idx_t const nouter = ct->pt[tile_id].nfibs[0];
idx_t const start = (partition != NULL) ? partition[tid] : 0;
idx_t const stop = (partition != NULL) ? partition[tid+1] : nouter;
for(idx_t s=start; s < stop; ++s) {
idx_t const fid = (fids[0] == NULL) ? s : fids[0][s];
idxstack[0] = s;
/* clear out stale data */
for(idx_t m=1; m < nmodes-1; ++m) {
idxstack[m] = fp[m-1][idxstack[m-1]];
}
/* first buf will always just be a matrix row */
val_t const * const rootrow = mvals[0] + (fid*nfactors);
val_t * const rootbuf = buf[0];
for(idx_t f=0; f < nfactors; ++f) {
rootbuf[f] = rootrow[f];
}
idx_t depth = 0;
idx_t const outer_end = fp[0][s+1];
while(idxstack[1] < outer_end) {
/* move down to an nnz node */
for(; depth < nmodes-2; ++depth) {
/* propogate buf down */
val_t const * const restrict drow
= mvals[depth+1] + (fids[depth+1][idxstack[depth+1]] * nfactors);
p_assign_hada(buf[depth+1], buf[depth], drow, nfactors);
}
/* process all nonzeros [start, end) */
idx_t const start = fp[depth][idxstack[depth]];
idx_t const end = fp[depth][idxstack[depth]+1];
p_csf_process_fiber_nolock(mats[MAX_NMODES]->vals, buf[depth],
nfactors, start, end, fids[depth+1], vals);
/* now move back up to the next unprocessed child */
do {
++idxstack[depth];
--depth;
} while(depth > 0 && idxstack[depth+1] == fp[depth][idxstack[depth]+1]);
} /* end DFS */
} /* end outer slice loop */
}
static void p_csf_mttkrp_leaf_locked(
splatt_csf const * const ct,
idx_t const tile_id,
matrix_t ** mats,
idx_t const mode,
thd_info * const thds,
idx_t const * const restrict partition)
{
/* extract tensor structures */
val_t const * const vals = ct->pt[tile_id].vals;
idx_t const nmodes = ct->nmodes;
if(vals == NULL) {
return;
}
if(nmodes == 3) {
p_csf_mttkrp_leaf3_locked(ct, tile_id, mats, mode, thds, partition);
return;
}
idx_t const * const * const restrict fp
= (idx_t const * const *) ct->pt[tile_id].fptr;
idx_t const * const * const restrict fids
= (idx_t const * const *) ct->pt[tile_id].fids;
idx_t const nfactors = mats[0]->J;
val_t * mvals[MAX_NMODES];
val_t * buf[MAX_NMODES];
idx_t idxstack[MAX_NMODES];
int const tid = 0; //splatt_omp_get_thread_num();
for(idx_t m=0; m < nmodes; ++m) {
mvals[m] = mats[csf_depth_to_mode(ct, m)]->vals;
/* grab the next row of buf from thds */
buf[m] = ((val_t *) thds[tid].scratch[2]) + (nfactors * m);
}
/* foreach outer slice */
idx_t const nslices = ct->pt[tile_id].nfibs[0];
idx_t const start = (partition != NULL) ? partition[tid] : 0;
idx_t const stop = (partition != NULL) ? partition[tid+1] : nslices;
for(idx_t s=start; s < stop; ++s) {
idx_t const fid = (fids[0] == NULL) ? s : fids[0][s];
idxstack[0] = s;
/* clear out stale data */
for(idx_t m=1; m < nmodes-1; ++m) {
idxstack[m] = fp[m-1][idxstack[m-1]];
}
/* first buf will always just be a matrix row */
val_t const * const restrict rootrow = mvals[0] + (fid*nfactors);
val_t * const rootbuf = buf[0];
for(idx_t f=0; f < nfactors; ++f) {
rootbuf[f] = rootrow[f];
}
idx_t depth = 0;
idx_t const outer_end = fp[0][s+1];
while(idxstack[1] < outer_end) {
/* move down to an nnz node */
for(; depth < nmodes-2; ++depth) {
/* propogate buf down */
val_t const * const restrict drow
= mvals[depth+1] + (fids[depth+1][idxstack[depth+1]] * nfactors);
p_assign_hada(buf[depth+1], buf[depth], drow, nfactors);
}
/* process all nonzeros [start, end) */
idx_t const start = fp[depth][idxstack[depth]];
idx_t const end = fp[depth][idxstack[depth]+1];
p_csf_process_fiber_locked(mats[MAX_NMODES]->vals, buf[depth],
nfactors, start, end, fids[depth+1], vals);
/* now move back up to the next unprocessed child */
do {
++idxstack[depth];
--depth;
} while(depth > 0 && idxstack[depth+1] == fp[depth][idxstack[depth]+1]);
} /* end DFS */
} /* end outer slice loop */
}
static void p_csf_mttkrp_intl3_nolock(
splatt_csf const * const ct,
idx_t const tile_id,
matrix_t ** mats,
idx_t const mode,
thd_info * const thds,
idx_t const * const partition)
{
assert(ct->nmodes == 3);
val_t const * const vals = ct->pt[tile_id].vals;
idx_t const * const restrict sptr = ct->pt[tile_id].fptr[0];
idx_t const * const restrict fptr = ct->pt[tile_id].fptr[1];
idx_t const * const restrict sids = ct->pt[tile_id].fids[0];
idx_t const * const restrict fids = ct->pt[tile_id].fids[1];
idx_t const * const restrict inds = ct->pt[tile_id].fids[2];
val_t const * const avals = mats[csf_depth_to_mode(ct, 0)]->vals;
val_t const * const bvals = mats[csf_depth_to_mode(ct, 2)]->vals;
val_t * const ovals = mats[MAX_NMODES]->vals;
idx_t const nfactors = mats[MAX_NMODES]->J;
int const tid = 0; //splatt_omp_get_thread_num();
val_t * const restrict accumF = (val_t *) thds[tid].scratch[0];
idx_t const nslices = ct->pt[tile_id].nfibs[0];
idx_t const start = (partition != NULL) ? partition[tid] : 0;
idx_t const stop = (partition != NULL) ? partition[tid+1] : nslices;
for(idx_t s=start; s < stop; ++s) {
idx_t const fid = (sids == NULL) ? s : sids[s];
/* root row */
val_t const * const restrict rv = avals + (fid * nfactors);
/* foreach fiber in slice */
for(idx_t f=sptr[s]; f < sptr[s+1]; ++f) {
/* first entry of the fiber is used to initialize accumF */
idx_t const jjfirst = fptr[f];
val_t const vfirst = vals[jjfirst];
val_t const * const restrict bv = bvals + (inds[jjfirst] * nfactors);
for(idx_t r=0; r < nfactors; ++r) {
accumF[r] = vfirst * bv[r];
}
/* foreach nnz in fiber */
for(idx_t jj=fptr[f]+1; jj < fptr[f+1]; ++jj) {
val_t const v = vals[jj];
val_t const * const restrict bv = bvals + (inds[jj] * nfactors);
for(idx_t r=0; r < nfactors; ++r) {
accumF[r] += v * bv[r];
}
}
/* write to fiber row */
val_t * const restrict ov = ovals + (fids[f] * nfactors);
for(idx_t r=0; r < nfactors; ++r) {
ov[r] += rv[r] * accumF[r];
}
}
}
}
static void p_csf_mttkrp_intl_nolock(
splatt_csf const * const ct,
idx_t const tile_id,
matrix_t ** mats,
idx_t const mode,
thd_info * const thds,
idx_t const * const partition)
{
/* extract tensor structures */
idx_t const nmodes = ct->nmodes;
val_t const * const vals = ct->pt[tile_id].vals;
/* pass empty tiles */
if(vals == NULL) {
return;
}
if(nmodes == 3) {
p_csf_mttkrp_intl3_nolock(ct, tile_id, mats, mode, thds, partition);
return;
}
idx_t const * const * const restrict fp
= (idx_t const * const *) ct->pt[tile_id].fptr;
idx_t const * const * const restrict fids
= (idx_t const * const *) ct->pt[tile_id].fids;
idx_t const nfactors = mats[0]->J;
/* find out which level in the tree this is */
idx_t const outdepth = csf_mode_to_depth(ct, mode);
val_t * mvals[MAX_NMODES];
val_t * buf[MAX_NMODES];
idx_t idxstack[MAX_NMODES];
int const tid = 0; //splatt_omp_get_thread_num();
for(idx_t m=0; m < nmodes; ++m) {
mvals[m] = mats[csf_depth_to_mode(ct, m)]->vals;
/* grab the next row of buf from thds */
buf[m] = ((val_t *) thds[tid].scratch[2]) + (nfactors * m);
memset(buf[m], 0, nfactors * sizeof(val_t));
}
val_t * const ovals = mats[MAX_NMODES]->vals;
/* foreach outer slice */
idx_t const nslices = ct->pt[tile_id].nfibs[0];
idx_t const start = (partition != NULL) ? partition[tid] : 0;
idx_t const stop = (partition != NULL) ? partition[tid+1] : nslices;
for(idx_t s=start; s < stop; ++s) {
idx_t const fid = (fids[0] == NULL) ? s : fids[0][s];
/* push outer slice and fill stack */
idxstack[0] = s;
for(idx_t m=1; m <= outdepth; ++m) {
idxstack[m] = fp[m-1][idxstack[m-1]];
}
/* fill first buf */
val_t const * const restrict rootrow = mvals[0] + (fid*nfactors);
for(idx_t f=0; f < nfactors; ++f) {
buf[0][f] = rootrow[f];
}
/* process entire subtree */
idx_t depth = 0;
while(idxstack[1] < fp[0][s+1]) {
/* propagate values down to outdepth-1 */
for(; depth < outdepth; ++depth) {
val_t const * const restrict drow
= mvals[depth+1] + (fids[depth+1][idxstack[depth+1]] * nfactors);
p_assign_hada(buf[depth+1], buf[depth], drow, nfactors);
}
/* write to output and clear buf[outdepth] for next subtree */
idx_t const noderow = fids[outdepth][idxstack[outdepth]];
/* propagate value up to buf[outdepth] */
p_propagate_up(buf[outdepth], buf, idxstack, outdepth,idxstack[outdepth],
fp, fids, vals, mvals, nmodes, nfactors);
val_t * const restrict outbuf = ovals + (noderow * nfactors);
p_add_hada_clear(outbuf, buf[outdepth], buf[outdepth-1], nfactors);
/* backtrack to next unfinished node */
do {
++idxstack[depth];
--depth;
} while(depth > 0 && idxstack[depth+1] == fp[depth][idxstack[depth]+1]);
} /* end DFS */
} /* end foreach outer slice */
}
static void p_csf_mttkrp_intl_locked(
splatt_csf const * const ct,
idx_t const tile_id,
matrix_t ** mats,
idx_t const mode,
thd_info * const thds,
idx_t const * const partition)
{
/* extract tensor structures */
idx_t const nmodes = ct->nmodes;
val_t const * const vals = ct->pt[tile_id].vals;
/* pass empty tiles */
if(vals == NULL) {
return;
}
if(nmodes == 3) {
p_csf_mttkrp_intl3_locked(ct, tile_id, mats, mode, thds, partition);
return;
}
idx_t const * const * const restrict fp
= (idx_t const * const *) ct->pt[tile_id].fptr;
idx_t const * const * const restrict fids
= (idx_t const * const *) ct->pt[tile_id].fids;
idx_t const nfactors = mats[0]->J;
/* find out which level in the tree this is */
idx_t const outdepth = csf_mode_to_depth(ct, mode);
val_t * mvals[MAX_NMODES];
val_t * buf[MAX_NMODES];
idx_t idxstack[MAX_NMODES];
int const tid = 0; // splatt_omp_get_thread_num();
for(idx_t m=0; m < nmodes; ++m) {
mvals[m] = mats[csf_depth_to_mode(ct, m)]->vals;
/* grab the next row of buf from thds */
buf[m] = ((val_t *) thds[tid].scratch[2]) + (nfactors * m);
memset(buf[m], 0, nfactors * sizeof(val_t));
}
val_t * const ovals = mats[MAX_NMODES]->vals;
/* foreach outer slice */
idx_t const nslices = ct->pt[tile_id].nfibs[0];
idx_t const start = (partition != NULL) ? partition[tid] : 0;
idx_t const stop = (partition != NULL) ? partition[tid+1] : nslices;
for(idx_t s=start; s < stop; ++s) {
idx_t const fid = (fids[0] == NULL) ? s : fids[0][s];
/* push outer slice and fill stack */
idxstack[0] = s;
for(idx_t m=1; m <= outdepth; ++m) {
idxstack[m] = fp[m-1][idxstack[m-1]];
}
/* fill first buf */
val_t const * const restrict rootrow = mvals[0] + (fid*nfactors);
for(idx_t f=0; f < nfactors; ++f) {
buf[0][f] = rootrow[f];
}
/* process entire subtree */
idx_t depth = 0;
while(idxstack[1] < fp[0][s+1]) {
/* propagate values down to outdepth-1 */
for(; depth < outdepth; ++depth) {
val_t const * const restrict drow
= mvals[depth+1] + (fids[depth+1][idxstack[depth+1]] * nfactors);
p_assign_hada(buf[depth+1], buf[depth], drow, nfactors);
}
/* write to output and clear buf[outdepth] for next subtree */
idx_t const noderow = fids[outdepth][idxstack[outdepth]];
/* propagate value up to buf[outdepth] */
p_propagate_up(buf[outdepth], buf, idxstack, outdepth,idxstack[outdepth],
fp, fids, vals, mvals, nmodes, nfactors);
val_t * const restrict outbuf = ovals + (noderow * nfactors);
// mutex_set_lock(pool, noderow);
p_add_hada_clear(outbuf, buf[outdepth], buf[outdepth-1], nfactors);
// mutex_unset_lock(pool, noderow);
/* backtrack to next unfinished node */
do {
++idxstack[depth];
--depth;
} while(depth > 0 && idxstack[depth+1] == fp[depth][idxstack[depth]+1]);
} /* end DFS */
} /* end foreach outer slice */
}
/******************************************************************************
* PUBLIC FUNCTIONS
*****************************************************************************/
void mttkrp_csf(
splatt_csf const * const tensors,
matrix_t ** mats,
idx_t const mode,
thd_info * const thds,
splatt_mttkrp_ws * const ws,
double const * const opts)
{
/* ensure we use as many threads as our partitioning supports */
//splatt_omp_set_num_threads(ws->num_threads);
if(pool == NULL) {
pool = &mutex_pool1; //mutex_alloc();
}
printf("Max nmdoes: %d\n", MAX_NMODES);
/* clear output matrix */
matrix_t * const M = mats[MAX_NMODES];
M->I = tensors[0].dims[mode];
memset(M->vals, 0, M->I * M->J * sizeof(val_t));
idx_t const nmodes = tensors[0].nmodes;
/* reset thread times */
//thd_reset(thds, 1) ; //splatt_omp_get_max_threads());
/* choose which MTTKRP function to use */
idx_t const which_csf = ws->mode_csf_map[mode];
idx_t const outdepth = csf_mode_to_depth(&(tensors[which_csf]), mode);
if(outdepth == 0) {
/* root */
p_schedule_tiles(tensors, which_csf,
p_csf_mttkrp_root_locked, p_csf_mttkrp_root_nolock,
mats, mode, thds, ws);
} else if(outdepth == nmodes - 1) {
/* leaf */
p_schedule_tiles(tensors, which_csf,
p_csf_mttkrp_leaf_locked, p_csf_mttkrp_leaf_nolock,
mats, mode, thds, ws);
} else {
/* internal */
p_schedule_tiles(tensors, which_csf,
p_csf_mttkrp_intl_locked, p_csf_mttkrp_intl_nolock,
mats, mode, thds, ws);
}
/*
// print thread times, if requested
if((int)opts[SPLATT_OPTION_VERBOSITY] == SPLATT_VERBOSITY_MAX) {
printf("MTTKRP mode %"SPLATT_PF_IDX": ", mode+1);
thd_time_stats(thds, 1 splatt_omp_get_max_threads());
if(ws->is_privatized[mode]) {
printf(" reduction-time: %0.3fs\n", ws->reduction_time);
}
}
thd_reset(thds, splatt_omp_get_max_threads());
*/
}
/******************************************************************************
* DEPRECATED FUNCTIONS
*****************************************************************************/
/******************************************************************************
* SPLATT MTTKRP
*****************************************************************************/
void mttkrp_splatt(
ftensor_t const * const ft,
matrix_t ** mats,
idx_t const mode,
thd_info * const thds,
idx_t const nthreads)
{
if(ft->tiled == SPLATT_SYNCTILE) {
mttkrp_splatt_sync_tiled(ft, mats, mode, thds, nthreads);
return;
}
if(ft->tiled == SPLATT_COOPTILE) {
mttkrp_splatt_coop_tiled(ft, mats, mode, thds, nthreads);
return;
}
matrix_t * const M = mats[MAX_NMODES];
matrix_t const * const A = mats[ft->dim_perm[1]];
matrix_t const * const B = mats[ft->dim_perm[2]];
idx_t const nslices = ft->dims[mode];
idx_t const rank = M->J;
val_t * const mvals = M->vals;
memset(mvals, 0, ft->dims[mode] * rank * sizeof(val_t));
val_t const * const avals = A->vals;
val_t const * const bvals = B->vals;
idx_t const * const restrict sptr = ft->sptr;
idx_t const * const restrict fptr = ft->fptr;
idx_t const * const restrict fids = ft->fids;
idx_t const * const restrict inds = ft->inds;
val_t const * const restrict vals = ft->vals;
// #pragma omp parallel
{
int const tid = 0; //splatt_omp_get_thread_num();
val_t * const restrict accumF = (val_t *) thds[tid].scratch[0];
timer_start(&thds[tid].ttime);
// #pragma omp for schedule(dynamic, 16) nowait
for(idx_t s=0; s < nslices; ++s) {
val_t * const restrict mv = mvals + (s * rank);
/* foreach fiber in slice */
for(idx_t f=sptr[s]; f < sptr[s+1]; ++f) {
/* first entry of the fiber is used to initialize accumF */
idx_t const jjfirst = fptr[f];
val_t const vfirst = vals[jjfirst];
val_t const * const restrict bv = bvals + (inds[jjfirst] * rank);
for(idx_t r=0; r < rank; ++r) {
accumF[r] = vfirst * bv[r];
}
/* foreach nnz in fiber */
for(idx_t jj=fptr[f]+1; jj < fptr[f+1]; ++jj) {
val_t const v = vals[jj];
val_t const * const restrict bv = bvals + (inds[jj] * rank);
for(idx_t r=0; r < rank; ++r) {
accumF[r] += v * bv[r];
}
}
/* scale inner products by row of A and update to M */
val_t const * const restrict av = avals + (fids[f] * rank);
for(idx_t r=0; r < rank; ++r) {
mv[r] += accumF[r] * av[r];
}
}
}
timer_stop(&thds[tid].ttime);
} /* end parallel region */
}
void mttkrp_splatt_sync_tiled(
ftensor_t const * const ft,
matrix_t ** mats,
idx_t const mode,
thd_info * const thds,
idx_t const nthreads)
{
matrix_t * const M = mats[MAX_NMODES];
matrix_t const * const A = mats[ft->dim_perm[1]];
matrix_t const * const B = mats[ft->dim_perm[2]];
idx_t const nslabs = ft->nslabs;
idx_t const rank = M->J;
val_t * const mvals = M->vals;
memset(mvals, 0, ft->dims[mode] * rank * sizeof(val_t));
val_t const * const avals = A->vals;
val_t const * const bvals = B->vals;
idx_t const * const restrict slabptr = ft->slabptr;
idx_t const * const restrict sids = ft->sids;
idx_t const * const restrict fptr = ft->fptr;
idx_t const * const restrict fids = ft->fids;
idx_t const * const restrict inds = ft->inds;
val_t const * const restrict vals = ft->vals;
// #pragma omp parallel
{
int const tid = 0; //splatt_omp_get_thread_num();
val_t * const restrict accumF = (val_t *) thds[tid].scratch[0];
timer_start(&thds[tid].ttime);
// #pragma omp for schedule(dynamic, 1) nowait
for(idx_t s=0; s < nslabs; ++s) {
/* foreach fiber in slice */
for(idx_t f=slabptr[s]; f < slabptr[s+1]; ++f) {
/* first entry of the fiber is used to initialize accumF */
idx_t const jjfirst = fptr[f];
val_t const vfirst = vals[jjfirst];
val_t const * const restrict bv = bvals + (inds[jjfirst] * rank);
for(idx_t r=0; r < rank; ++r) {
accumF[r] = vfirst * bv[r];
}
/* foreach nnz in fiber */
for(idx_t jj=fptr[f]+1; jj < fptr[f+1]; ++jj) {
val_t const v = vals[jj];
val_t const * const restrict bv = bvals + (inds[jj] * rank);
for(idx_t r=0; r < rank; ++r) {
accumF[r] += v * bv[r];
}
}
/* scale inner products by row of A and update to M */
val_t * const restrict mv = mvals + (sids[f] * rank);
val_t const * const restrict av = avals + (fids[f] * rank);
for(idx_t r=0; r < rank; ++r) {
mv[r] += accumF[r] * av[r];
}
}
}
timer_stop(&thds[tid].ttime);
} /* end parallel region */
}
void mttkrp_splatt_coop_tiled(
ftensor_t const * const ft,
matrix_t ** mats,
idx_t const mode,
thd_info * const thds,
idx_t const nthreads)
{
matrix_t * const M = mats[MAX_NMODES];
matrix_t const * const A = mats[ft->dim_perm[1]];
matrix_t const * const B = mats[ft->dim_perm[2]];
idx_t const nslabs = ft->nslabs;
idx_t const rank = M->J;
val_t * const mvals = M->vals;
memset(mvals, 0, ft->dims[mode] * rank * sizeof(val_t));
val_t const * const avals = A->vals;
val_t const * const bvals = B->vals;
idx_t const * const restrict slabptr = ft->slabptr;
idx_t const * const restrict sptr = ft->sptr;
idx_t const * const restrict sids = ft->sids;
idx_t const * const restrict fptr = ft->fptr;
idx_t const * const restrict fids = ft->fids;
idx_t const * const restrict inds = ft->inds;
val_t const * const restrict vals = ft->vals;
// #pragma omp parallel
{
int const tid = 0; // splatt_omp_get_thread_num();
val_t * const restrict accumF = (val_t *) thds[tid].scratch[0];
val_t * const localm = (val_t *) thds[tid].scratch[1];
timer_start(&thds[tid].ttime);
/* foreach slab */
for(idx_t s=0; s < nslabs; ++s) {
/* foreach fiber in slab */
// #pragma omp for schedule(dynamic, 8)
for(idx_t sl=slabptr[s]; sl < slabptr[s+1]; ++sl) {
idx_t const slice = sids[sl];
for(idx_t f=sptr[sl]; f < sptr[sl+1]; ++f) {
/* first entry of the fiber is used to initialize accumF */
idx_t const jjfirst = fptr[f];
val_t const vfirst = vals[jjfirst];
val_t const * const restrict bv = bvals + (inds[jjfirst] * rank);
for(idx_t r=0; r < rank; ++r) {
accumF[r] = vfirst * bv[r];
}
/* foreach nnz in fiber */
for(idx_t jj=fptr[f]+1; jj < fptr[f+1]; ++jj) {
val_t const v = vals[jj];
val_t const * const restrict bv = bvals + (inds[jj] * rank);
for(idx_t r=0; r < rank; ++r) {
accumF[r] += v * bv[r];
}
}
/* scale inner products by row of A and update thread-local M */
val_t * const restrict mv = localm + ((slice % TILE_SIZES[0]) * rank);
val_t const * const restrict av = avals + (fids[f] * rank);
for(idx_t r=0; r < rank; ++r) {
mv[r] += accumF[r] * av[r];
}
}
}
idx_t const start = s * TILE_SIZES[0];
idx_t const stop = SS_MIN((s+1) * TILE_SIZES[0], ft->dims[mode]);
// #pragma omp for schedule(static)
for(idx_t i=start; i < stop; ++i) {
/* map i back to global slice id */
idx_t const localrow = i % TILE_SIZES[0];
for(idx_t t=0; t < nthreads; ++t) {
val_t * const threadm = (val_t *) thds[t].scratch[1];
for(idx_t r=0; r < rank; ++r) {
mvals[r + (i*rank)] += threadm[r + (localrow*rank)];
threadm[r + (localrow*rank)] = 0.;
}
}
}
} /* end foreach slab */
timer_stop(&thds[tid].ttime);
} /* end omp parallel */
}
/******************************************************************************
* GIGA MTTKRP
*****************************************************************************/
void mttkrp_giga(
spmatrix_t const * const spmat,
matrix_t ** mats,
idx_t const mode,
val_t * const scratch)
{
matrix_t * const M = mats[MAX_NMODES];
matrix_t const * const A = mode == 0 ? mats[1] : mats[0];
matrix_t const * const B = mode == 2 ? mats[1] : mats[2];
idx_t const I = spmat->I;
idx_t const rank = M->J;
idx_t const * const restrict rowptr = spmat->rowptr;
idx_t const * const restrict colind = spmat->colind;
val_t const * const restrict vals = spmat->vals;
// #pragma omp parallel
{
for(idx_t r=0; r < rank; ++r) {
val_t * const restrict mv = M->vals + (r * I);
val_t const * const restrict av = A->vals + (r * A->I);
val_t const * const restrict bv = B->vals + (r * B->I);
/* Joined Hadamard products of X, C, and B */
// #pragma omp for schedule(dynamic, 16)
for(idx_t i=0; i < I; ++i) {
for(idx_t y=rowptr[i]; y < rowptr[i+1]; ++y) {
idx_t const a = colind[y] / B->I;
idx_t const b = colind[y] % B->I;
scratch[y] = vals[y] * av[a] * bv[b];
}
}
/* now accumulate rows into column of M1 */
// #pragma omp for schedule(dynamic, 16)
for(idx_t i=0; i < I; ++i) {
val_t sum = 0;
for(idx_t y=rowptr[i]; y < rowptr[i+1]; ++y) {
sum += scratch[y];
}
mv[i] = sum;
}
}
}
}
/******************************************************************************
* TTBOX MTTKRP
*****************************************************************************/
void mttkrp_ttbox(
sptensor_t const * const tt,
matrix_t ** mats,
idx_t const mode,
val_t * const scratch)
{
matrix_t * const M = mats[MAX_NMODES];
matrix_t const * const A = mode == 0 ? mats[1] : mats[0];
matrix_t const * const B = mode == 2 ? mats[1] : mats[2];
idx_t const I = tt->dims[mode];
idx_t const rank = M->J;
memset(M->vals, 0, I * rank * sizeof(val_t));
idx_t const nnz = tt->nnz;
idx_t const * const restrict indM = tt->ind[mode];
idx_t const * const restrict indA =
mode == 0 ? tt->ind[1] : tt->ind[0];
idx_t const * const restrict indB =
mode == 2 ? tt->ind[1] : tt->ind[2];
val_t const * const restrict vals = tt->vals;
for(idx_t r=0; r < rank; ++r) {
val_t * const restrict mv = M->vals + (r * I);
val_t const * const restrict av = A->vals + (r * A->I);
val_t const * const restrict bv = B->vals + (r * B->I);
/* stretch out columns of A and B */
// #pragma omp parallel for
for(idx_t x=0; x < nnz; ++x) {
scratch[x] = vals[x] * av[indA[x]] * bv[indB[x]];
}
/* now accumulate into m1 */
for(idx_t x=0; x < nnz; ++x) {
mv[indM[x]] += scratch[x];
}
}
}
void mttkrp_stream(
sptensor_t const * const tt,
matrix_t ** mats,
idx_t const mode)
{
if(pool == NULL) {
pool = &mutex_pool2; //mutex_alloc();
}
matrix_t * const M = mats[MAX_NMODES];
idx_t const I = tt->dims[mode];
idx_t const nfactors = M->J;
val_t * const outmat = M->vals;
memset(outmat, 0, I * nfactors * sizeof(*outmat));
idx_t const nmodes = tt->nmodes;
val_t * mvals[MAX_NMODES];
for(idx_t m=0; m < nmodes; ++m) {
mvals[m] = mats[m]->vals;
}
val_t const * const restrict vals = tt->vals;
// #pragma omp parallel
{
val_t * restrict accum = malloc(nfactors * sizeof(*accum));
/* stream through nnz */
// #pragma omp for schedule(static)
for(idx_t n=0; n < tt->nnz; ++n) {
/* initialize with value */
for(idx_t f=0; f < nfactors; ++f) {
accum[f] = vals[n];
}
for(idx_t m=0; m < nmodes; ++m) {
if(m == mode) {
continue;
}
val_t const * const restrict inrow = mvals[m] + \
(tt->ind[m][n] * nfactors);
for(idx_t f=0; f < nfactors; ++f) {
accum[f] *= inrow[f];
}
}
/* write to output */
idx_t const out_ind = tt->ind[mode][n];
val_t * const restrict outrow = outmat + (tt->ind[mode][n] * nfactors);
// mutex_set_lock(pool, out_ind);
for(idx_t f=0; f < nfactors; ++f) {
outrow[f] += accum[f];
}
// mutex_unset_lock(pool, out_ind);
}
free(accum);
} /* end omp parallel */
}
/******************************************************************************
* API FUNCTIONS
*****************************************************************************/
int splatt_mttkrp(
splatt_idx_t const mode,
splatt_idx_t const ncolumns,
splatt_csf const * const tensors,
splatt_val_t ** matrices,
splatt_val_t * const matout,
double const * const options)
{
idx_t const nmodes = tensors->nmodes;
/* fill matrix pointers */
matrix_t * mats[MAX_NMODES+1];
for(idx_t m=0; m < nmodes; ++m) {
mats[m] = (matrix_t *) malloc(sizeof(matrix_t));
mats[m]->I = tensors->dims[m];
mats[m]->J = ncolumns,
mats[m]->rowmajor = 1;
mats[m]->vals = matrices[m];
}
mats[MAX_NMODES] = (matrix_t *) malloc(sizeof(matrix_t));
mats[MAX_NMODES]->I = tensors->dims[mode];
mats[MAX_NMODES]->J = ncolumns;
mats[MAX_NMODES]->rowmajor = 1;
mats[MAX_NMODES]->vals = matout;
/* Setup thread structures. + 64 bytes is to avoid false sharing. */
idx_t const nthreads = (idx_t) options[SPLATT_OPTION_NTHREADS];
// splatt_omp_set_num_threads(nthreads);
thd_info * thds = thd_init(nthreads, 3,
(nmodes * ncolumns * sizeof(val_t)) + 64,
0,
(nmodes * ncolumns * sizeof(val_t)) + 64);
splatt_mttkrp_ws * ws = splatt_mttkrp_alloc_ws(tensors, ncolumns, options);
/* do the MTTKRP */
mttkrp_csf(tensors, mats, mode, thds, ws, options);
splatt_mttkrp_free_ws(ws);
/* cleanup */
thd_free(thds, nthreads);
for(idx_t m=0; m < nmodes; ++m) {
free(mats[m]);
}
free(mats[MAX_NMODES]);
return SPLATT_SUCCESS;
}
splatt_mttkrp_ws * splatt_mttkrp_alloc_ws(
splatt_csf const * const tensors,
splatt_idx_t const ncolumns,
double const * const opts)
{
splatt_mttkrp_ws * ws = malloc(sizeof(*ws));
idx_t num_csf = 0;
//#ifdef _OPENMP
// idx_t const num_threads = (idx_t) opts[SPLATT_OPTION_NTHREADS];
//#else
idx_t const num_threads = 1;
//#endif
ws->num_threads = num_threads;
/* map each MTTKRP mode to a CSF tensor */
splatt_csf_type which_csf = (splatt_csf_type) opts[SPLATT_OPTION_CSF_ALLOC];
for(idx_t m=0; m < tensors->nmodes; ++m) {
switch(which_csf) {
case SPLATT_CSF_ONEMODE:
/* only one tensor, map is easy */
ws->mode_csf_map[m] = 0;
num_csf = 1;
break;
case SPLATT_CSF_TWOMODE:
/* last mode is mapped to second tensor */
ws->mode_csf_map[m] = 0;
if(csf_mode_to_depth(&(tensors[0]), m) == tensors->nmodes-1) {
ws->mode_csf_map[m] = 1;
}
num_csf = 2;
break;
case SPLATT_CSF_ALLMODE:
/* each mode has its own tensor, map is easy */
ws->mode_csf_map[m] = m;
num_csf = tensors->nmodes;
break;
/* XXX */
default:
fprintf(stderr, "SPLATT: CSF type '%d' not recognized.\n", which_csf);
abort();
break;
}
}
assert(num_csf > 0);
ws->num_csf = num_csf;
/* Now setup partition info for each CSF. */
for(idx_t c=0; c < num_csf; ++c) {
ws->tile_partition[c] = NULL;
ws->tree_partition[c] = NULL;
}
for(idx_t c=0; c < num_csf; ++c) {
splatt_csf const * const csf = &(tensors[c]);
if(tensors[c].ntiles > 1) {
ws->tile_partition[c] = csf_partition_tiles_1d(csf, num_threads);
} else {
ws->tree_partition[c] = csf_partition_1d(csf, 0, num_threads);
}
}
/* allocate privatization buffer */
idx_t largest_priv_dim = 0;
ws->privatize_buffer =
malloc(num_threads * sizeof(*(ws->privatize_buffer)));
for(idx_t m=0; m < tensors->nmodes; ++m) {
ws->is_privatized[m] = p_is_privatized(tensors, m, opts);
if(ws->is_privatized[m]) {
largest_priv_dim = SS_MAX(largest_priv_dim, tensors->dims[m]);
if((int)opts[SPLATT_OPTION_VERBOSITY] == SPLATT_VERBOSITY_MAX) {
printf("PRIVATIZING-MODE: %"SPLATT_PF_IDX"\n", m+1);
}
}
}
for(idx_t t=0; t < num_threads; ++t) {
printf("Privatize buffer size: %d\n", largest_priv_dim * ncolumns * sizeof(**(ws->privatize_buffer)));
ws->privatize_buffer[t] = malloc(largest_priv_dim * ncolumns *
sizeof(**(ws->privatize_buffer)));
printf("BUFFERS: %d x %d", largest_priv_dim, ncolumns);
}
if(largest_priv_dim > 0 &&
(int)opts[SPLATT_OPTION_VERBOSITY] == SPLATT_VERBOSITY_MAX) {
size_t bytes = num_threads * largest_priv_dim * ncolumns *
sizeof(**(ws->privatize_buffer));
char * bstr = bytes_str(bytes);
printf("PRIVATIZATION-BUF: %s\n", bstr);
printf("\n");
free(bstr);
}
return ws;
}
void splatt_mttkrp_free_ws(
splatt_mttkrp_ws * const ws)
{
for(idx_t t=0; t < ws->num_threads; ++t) {
free(ws->privatize_buffer[t]);
}
free(ws->privatize_buffer);
for(idx_t c=0; c < ws->num_csf; ++c) {
free(ws->tile_partition[c]);
free(ws->tree_partition[c]);
}
free(ws);
}
|
vednnMaxPoolingForward.c | #include "vednnMaxPoolingForward.h"
#include "vednn-def.h"
#include <stdint.h>
static inline vednnError_t
vednnMaxPoolingForward_wrapper(
vednnMaxPoolForward_t pFunc,
VEDNN_MAXPOOLINGFWD_ARGS )
{
#ifndef VEDNN_USE_OPENMP
return pFunc(VEDNN_MAXPOOLINGFWD_ARGS_LIST);
#else
if ( __vednn_omp_num_threads == 1 ) {
return pFunc(VEDNN_MAXPOOLINGFWD_ARGS_LIST);
}
else {
vednnError_t rc = VEDNN_SUCCESS ;
#pragma omp parallel reduction(|:rc)
{
int64_t nthreads = omp_get_num_threads() ;
int64_t threadid = omp_get_thread_num() ;
int64_t allBatch = pParamIn->batch ;
int64_t nBatch = allBatch / nthreads ;
int64_t remain = allBatch % nthreads ;
int64_t batchBegin = nBatch * threadid + ( threadid < remain ? threadid : remain ) ;
int64_t myBatch = nBatch + ( threadid < remain ? 1 : 0 ) ;
if( myBatch == 0 ) {
rc |= VEDNN_SUCCESS ;
}
else {
vednnTensorParam_t _pParamIn = *pParamIn ; _pParamIn.batch = myBatch ;
vednnTensorParam_t _pParamOut = *pParamOut ; _pParamOut.batch = myBatch ;
float* _pDataIn = ((float *)pDataIn) + batchBegin * pParamIn->channel * pParamIn->height * pParamIn->width ;
float* _pDataOut = ((float *)pDataOut) + batchBegin * pParamOut->channel * pParamOut->height * pParamOut->width ;
rc |= pFunc(&_pParamIn, (void*)_pDataIn, &_pParamOut, (void*) _pDataOut, pParamPool) ;
}
}
return rc ;
}
#endif
}
/* ----------------------------------------------------------------------- */
vednnError_t vednnMaxPoolingForward( VEDNN_MAXPOOLINGFWD_ARGS )
{
#define OMPWRAP( IMPL ) WRAP_RET(vednnMaxPoolingForward_##IMPL, \
vednnMaxPoolingForward_wrapper, VEDNN_MAXPOOLINGFWD_ARGS_LIST)
if( pParamPool->padHeight == 0 && pParamPool->padWidth == 0
&& pParamPool->strideHeight == pParamPool->windowHeight
&& pParamPool->strideWidth == pParamPool->windowWidth
&& pParamOut->height*pParamPool->strideHeight <= pParamIn->height
&& pParamOut->width*pParamPool->strideWidth == pParamIn->width )
{
if( pParamOut->width <= 128 )
{
if( (pParamPool->windowWidth & 0x01) == 0
&& (((uint64_t)pDataIn) & 0x07) == 0 )
OMPWRAP(regular_ww2X_owU128_ialigned);
else
OMPWRAP(regular_owU128);
}
else
OMPWRAP(regular);
} else
OMPWRAP(default);
#undef OMPWRAP
}
// vim: et sw=2 ts=2
|
reduce.c | /*
* Main program for openmp and libreduce.
*
* Copyright (c) 2019, Rice University.
* See the file LICENSE for details.
*
* Mark W. Krentel
* August 2019
*/
#include <sys/types.h>
#include <dlfcn.h>
#include <err.h>
#include <errno.h>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#define LIBM "libm.so.6"
double reduce(double *, int);
int
main(int argc, char **argv)
{
printf("main: first entry\n");
int i, j, N;
if (argc < 2 || sscanf(argv[1], "%d", &N) < 1) {
N = 1000;
}
printf("main: N = %d\n", N);
double * A = (double *) malloc(N * sizeof(double));
if (A == NULL) {
err(1, "malloc array failed");
}
printf("main: calling dlopen() and sigprocmask() ...\n");
#pragma omp parallel
{
sigset_t * set = (sigset_t *) malloc(sizeof(sigset_t));
sigemptyset(set);
int ret = sigprocmask(SIG_BLOCK, set, NULL);
if (ret != 0) {
warn("sigprocmask() failed");
}
void * handle = dlopen(LIBM, RTLD_LAZY);
if (handle == NULL) {
warn("dlopen() failed");
}
}
double ans = 0.0;
for (j = 0; j < N; j++) {
#pragma omp parallel for default(none) private(i) shared(A, N)
for (i = 0; i < N; i++) {
A[i] = (double) i;
}
ans = reduce(A, N);
}
printf("main: ans = %g\n", ans);
return 0;
}
|
GB_unop__ainv_fp64_fp64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__ainv_fp64_fp64)
// op(A') function: GB (_unop_tran__ainv_fp64_fp64)
// C type: double
// A type: double
// cast: double cij = aij
// unaryop: cij = -aij
#define GB_ATYPE \
double
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CAST(z, aij) \
double z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
double aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
double z = aij ; \
Cx [pC] = -z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__ainv_fp64_fp64)
(
double *Cx, // Cx and Ax may be aliased
const double *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
double z = aij ;
Cx [p] = -z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
double aij = Ax [p] ;
double z = aij ;
Cx [p] = -z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__ainv_fp64_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
threading.c | /*BHEADER**********************************************************************
* See the file COPYRIGHT_and_DISCLAIMER for a complete copyright
* notice, contact person, and disclaimer.
*
* $Revision$
*********************************************************************EHEADER*/
#include <stdlib.h>
#include <stdio.h>
#include "utilities.h"
#if defined(HYPRE_USING_OPENMP) || defined (HYPRE_USING_PGCC_SMP)
int
hypre_NumThreads( )
{
int num_threads;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel
num_threads = omp_get_num_threads();
#endif
#ifdef HYPRE_USING_PGCC_SMP
num_threads = 2;
#endif
return num_threads;
}
#endif
/*!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!*/
/* The pthreads stuff needs to be reworked */
#define HYPRE_THREAD_GLOBALS
#ifdef HYPRE_USE_PTHREADS
#ifdef HYPRE_USE_UMALLOC
#include "umalloc_local.h"
#endif
int iteration_counter = 0;
volatile int hypre_thread_counter;
volatile int work_continue = 1;
int HYPRE_InitPthreads( int num_threads )
{
int err;
int i;
hypre_qptr =
(hypre_workqueue_t) malloc(sizeof(struct hypre_workqueue_struct));
hypre_NumThreads = num_threads;
initial_thread = pthread_self();
if (hypre_qptr != NULL) {
pthread_mutex_init(&hypre_qptr->lock, NULL);
pthread_cond_init(&hypre_qptr->work_wait, NULL);
pthread_cond_init(&hypre_qptr->finish_wait, NULL);
hypre_qptr->n_working = hypre_qptr->n_waiting = hypre_qptr->n_queue = 0;
hypre_qptr->inp = hypre_qptr->outp = 0;
for (i=0; i < hypre_NumThreads; i++) {
#ifdef HYPRE_USE_UMALLOC
/* Get initial area to start heap */
assert ((_uinitial_block[i] = malloc(INITIAL_HEAP_SIZE))!=NULL);
/* Create a user heap */
assert ((_uparam[i].myheap = _ucreate(initial_block[i],
INITIAL_HEAP_SIZE,
_BLOCK_CLEAN,
_HEAP_REGULAR,
_uget_fn,
_urelease_fn)) != NULL);
#endif
err=pthread_create(&hypre_thread[i], NULL,
(void *(*)(void *))hypre_pthread_worker,
(void *)i);
assert(err == 0);
}
}
pthread_mutex_init(&hypre_mutex_boxloops, NULL);
pthread_mutex_init(&mpi_mtx, NULL);
pthread_mutex_init(&talloc_mtx, NULL);
pthread_mutex_init(&time_mtx, NULL);
pthread_mutex_init(&worker_mtx, NULL);
hypre_thread_counter = 0;
hypre_thread_release = 0;
return (err);
}
void hypre_StopWorker(void *i)
{
work_continue = 0;
}
void HYPRE_DestroyPthreads( void )
{
int i;
void *status;
for (i=0; i < hypre_NumThreads; i++) {
hypre_work_put(hypre_StopWorker, (void *) &i);
}
#ifdef HYPRE_USE_UMALLOC
for (i=0; i<hypre_NumThreads; i++)
{
_udestroy (_uparam[i].myheap, _FORCE);
}
#endif
for (i=0; i<hypre_NumThreads; i++)
pthread_join(hypre_thread[i], &status);
pthread_mutex_destroy(&hypre_qptr->lock);
pthread_mutex_destroy(&hypre_mutex_boxloops);
pthread_mutex_destroy(&mpi_mtx);
pthread_mutex_destroy(&talloc_mtx);
pthread_mutex_destroy(&time_mtx);
pthread_mutex_destroy(&worker_mtx);
pthread_cond_destroy(&hypre_qptr->work_wait);
pthread_cond_destroy(&hypre_qptr->finish_wait);
free (hypre_qptr);
}
void hypre_pthread_worker( int threadid )
{
void *argptr;
hypre_work_proc_t funcptr;
pthread_mutex_lock(&hypre_qptr->lock);
hypre_qptr->n_working++;
while(work_continue) {
while (hypre_qptr->n_queue == 0) {
if (--hypre_qptr->n_working == 0)
pthread_cond_signal(&hypre_qptr->finish_wait);
hypre_qptr->n_waiting++;
pthread_cond_wait(&hypre_qptr->work_wait, &hypre_qptr->lock);
hypre_qptr->n_waiting--;
hypre_qptr->n_working++;
}
hypre_qptr->n_queue--;
funcptr = hypre_qptr->worker_proc_queue[hypre_qptr->outp];
argptr = hypre_qptr->argqueue[hypre_qptr->outp];
hypre_qptr->outp = (hypre_qptr->outp + 1) % MAX_QUEUE;
pthread_mutex_unlock(&hypre_qptr->lock);
(*funcptr)(argptr);
hypre_barrier(&worker_mtx, 0);
if (work_continue)
pthread_mutex_lock(&hypre_qptr->lock);
}
}
void
hypre_work_put( hypre_work_proc_t funcptr, void *argptr )
{
pthread_mutex_lock(&hypre_qptr->lock);
if (hypre_qptr->n_waiting) {
/* idle workers to be awakened */
pthread_cond_signal(&hypre_qptr->work_wait);
}
assert(hypre_qptr->n_queue != MAX_QUEUE);
hypre_qptr->n_queue++;
hypre_qptr->worker_proc_queue[hypre_qptr->inp] = funcptr;
hypre_qptr->argqueue[hypre_qptr->inp] = argptr;
hypre_qptr->inp = (hypre_qptr->inp + 1) % MAX_QUEUE;
pthread_mutex_unlock(&hypre_qptr->lock);
}
/* Wait until all work is done and workers quiesce. */
void
hypre_work_wait( void )
{
pthread_mutex_lock(&hypre_qptr->lock);
while(hypre_qptr->n_queue !=0 || hypre_qptr->n_working != 0)
pthread_cond_wait(&hypre_qptr->finish_wait, &hypre_qptr->lock);
pthread_mutex_unlock(&hypre_qptr->lock);
}
int
hypre_fetch_and_add( int *w )
{
int temp;
temp = *w;
*w += 1;
return temp;
}
int
ifetchadd( int *w, pthread_mutex_t *mutex_fetchadd )
{
int n;
pthread_mutex_lock(mutex_fetchadd);
n = *w;
*w += 1;
pthread_mutex_unlock(mutex_fetchadd);
return n;
}
static volatile int thb_count = 0;
static volatile int thb_release = 0;
void hypre_barrier(pthread_mutex_t *mtx, int unthreaded)
{
if (!unthreaded) {
pthread_mutex_lock(mtx);
thb_count++;
if (thb_count < hypre_NumThreads) {
pthread_mutex_unlock(mtx);
while (!thb_release);
pthread_mutex_lock(mtx);
thb_count--;
pthread_mutex_unlock(mtx);
while (thb_release);
}
else if (thb_count == hypre_NumThreads) {
thb_count--;
pthread_mutex_unlock(mtx);
thb_release++;
while (thb_count);
thb_release = 0;
}
}
}
int
hypre_GetThreadID( void )
{
int i;
if (pthread_equal(pthread_self(), initial_thread))
return hypre_NumThreads;
for (i = 0; i < hypre_NumThreads; i++)
{
if (pthread_equal(pthread_self(), hypre_thread[i]))
return i;
}
return -1;
}
#endif
/*!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!*/
|
10.norace2.c | // RUN: clang %loadLLOV %s -o /dev/null 2>&1 | FileCheck %s
#include <omp.h>
#define N 100
int main() {
int A[N], B[N];
#pragma omp parallel shared(A) num_threads(2)
{
#pragma omp sections // nowait
{
for (int i = 0; i < N; i++) {
A[i] = i;
}
#pragma omp section
for (int i = 0; i < N; i++) {
B[i] = i * i;
}
}
#pragma omp for
for (int i = 0; i < N; i++) {
A[i] *= B[i];
}
}
return 0;
}
// Printing in reverse. Need to fix it.
// CHECK: Region is Data Race Free.
// CHECK: Region is Data Race Free.
// END
|
softmax-inl.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2017 by Contributors
* \file softmax-inl.h
* \brief
*/
#ifndef MXNET_OPERATOR_NN_SOFTMAX_INL_H_
#define MXNET_OPERATOR_NN_SOFTMAX_INL_H_
#include <algorithm>
#include <string>
#include <utility>
#include <vector>
#include <type_traits>
#include "../mxnet_op.h"
#include "../operator_common.h"
#include "../tensor/broadcast_reduce_op.h"
#include "../../common/cuda_utils.h"
namespace mxnet {
namespace op {
namespace mxnet_op {
struct softmax_fwd {
template<typename AType>
MSHADOW_XINLINE static AType Map(float a, AType b) {
return AType(expf(a)/b);
}
template<typename AType>
MSHADOW_XINLINE static AType Map(double a, AType b) {
return AType(exp(a)/b);
}
};
struct log_softmax_fwd {
template<typename DType>
MSHADOW_XINLINE static float Map(DType a, float b) {
return a - logf(b);
}
template<typename DType>
MSHADOW_XINLINE static double Map(DType a, double b) {
return a - log(b);
}
};
template<typename OP, bool negate, typename AType, typename DType, typename OType,
typename IType, int ndim>
inline void Softmax(Stream<cpu> *s, DType *in, OType *out, IType *length,
Shape<ndim> shape, int axis, const DType temperature) {
index_t M = shape[axis];
if (M == 0) return;
index_t N = shape.Size()/M;
Shape<ndim> stride = calc_stride(shape);
Shape<ndim> sshape = shape;
sshape[axis] = 1;
index_t sa = stride[axis];
if (length == nullptr) {
#pragma omp parallel for
for (index_t i = 0; i < N; ++i) {
index_t base = unravel_dot(i, sshape, stride);
DType mmax = negate ? -in[base] : in[base];
DType val;
for (index_t j = 1; j < M; ++j) {
val = negate ? -in[base + j*sa] : in[base + j*sa];
if (mmax < val) mmax = val;
}
AType sum = AType(0);
DType in_val;
// By default temperature is 1.0.
// Adding a branch here to save the CPU 'divide-by-1' computation at runtime
if (temperature == 1.0) {
for (index_t j = 0; j < M; ++j) {
in_val = negate ? -in[base + j*sa] : in[base + j*sa];
sum += std::exp(in_val - mmax);
}
for (index_t j = 0; j < M; ++j) {
in_val = negate ? -in[base + j*sa] : in[base + j*sa];
out[base + j*sa] = OP::Map(in_val - mmax, sum);
}
} else {
for (index_t j = 0; j < M; ++j) {
in_val = negate ? -in[base + j*sa] : in[base + j*sa];
sum += std::exp((in_val - mmax)/temperature);
}
for (index_t j = 0; j < M; ++j) {
in_val = negate ? -in[base + j*sa] : in[base + j*sa];
out[base + j*sa] = OP::Map((in_val - mmax)/temperature, sum);
}
}
}
} else {
#pragma omp parallel for
for (index_t i = 0; i < N; ++i) {
index_t len = static_cast<index_t>(length[i]);
index_t base = unravel_dot(i, sshape, stride);
DType mmax = negate ? -in[base] : in[base];
DType val;
for (index_t j = 1; j < len; ++j) {
val = negate ? -in[base + j*sa] : in[base + j*sa];
if (mmax < val) mmax = val;
}
for (index_t j = len; j < M; ++j) {
out[base + j*sa] = OType(0.0f);
}
AType sum = AType(0);
DType in_val;
// By default temperature is 1.0.
// Adding a branch here to save the CPU 'divide-by-1' computation at runtime
if (temperature == 1.0) {
for (index_t j = 0; j < len; ++j) {
in_val = negate ? -in[base + j*sa] : in[base + j*sa];
sum += std::exp(in_val - mmax);
}
for (index_t j = 0; j < len; ++j) {
in_val = negate ? -in[base + j*sa] : in[base + j*sa];
out[base + j*sa] = OP::Map(in_val - mmax, sum);
}
} else {
for (index_t j = 0; j < len; ++j) {
in_val = negate ? -in[base + j*sa] : in[base + j*sa];
sum += std::exp((in_val - mmax)/temperature);
}
for (index_t j = 0; j < len; ++j) {
in_val = negate ? -in[base + j*sa] : in[base + j*sa];
out[base + j*sa] = OP::Map((in_val - mmax)/temperature, sum);
}
}
}
}
}
struct softmax_bwd {
template<typename DType, typename AType>
MSHADOW_XINLINE static AType Map(DType ograd, DType out, AType sum) {
return AType(out * (ograd - sum));
}
};
struct log_softmax_bwd {
template<typename AType>
MSHADOW_XINLINE static AType Map(float ograd, float out, AType sum) {
return AType(ograd - expf(out)*sum);
}
template<typename AType>
MSHADOW_XINLINE static AType Map(double ograd, double out, AType sum) {
return AType(ograd - exp(out)*sum);
}
};
template<typename OP1, typename OP2, int Req, bool negate,
typename AType, typename DType, typename OType, typename IType, int ndim>
inline void SoftmaxGrad(Stream<cpu> *s, OType *out, OType *ograd,
DType *igrad, IType *length, Shape<ndim> shape,
int axis, const DType temperature) {
index_t M = shape[axis];
if (M == 0) return;
index_t N = shape.Size()/M;
Shape<ndim> stride = calc_stride(shape);
Shape<ndim> sshape = shape;
sshape[axis] = 1;
index_t sa = stride[axis];
if (length != nullptr) {
#pragma omp parallel for
for (index_t i = 0; i < N; ++i) {
index_t base = unravel_dot(i, sshape, stride);
index_t len = static_cast<index_t>(length[i]);
AType sum = AType(0);
for (index_t j = 0; j < len; ++j) {
sum += OP1::Map(ograd[base + j*sa], out[base + j*sa]);
}
// By default temperature is 1.0.
// Adding a branch here to save the CPU 'divide-by-1' computation at runtime
DType final_result;
if (temperature == 1.0) {
for (index_t j = 0; j < M; ++j) {
final_result = negate ?
-OP2::Map(ograd[base + j*sa], out[base + j*sa], sum) :
OP2::Map(ograd[base + j*sa], out[base + j*sa], sum);
final_result = (j < len) ? final_result : DType(0.0f);
KERNEL_ASSIGN(igrad[base + j*sa], Req, final_result);
}
} else {
for (index_t j = 0; j < M; ++j) {
final_result = negate ?
-OP2::Map(ograd[base + j*sa], out[base + j*sa], sum) / temperature :
OP2::Map(ograd[base + j*sa], out[base + j*sa], sum) / temperature;
final_result = (j < len) ? final_result : DType(0.0f);
KERNEL_ASSIGN(igrad[base + j*sa], Req, final_result);
}
}
}
} else {
#pragma omp parallel for
for (index_t i = 0; i < N; ++i) {
index_t base = unravel_dot(i, sshape, stride);
AType sum = AType(0);
for (index_t j = 0; j < M; ++j) {
sum += OP1::Map(ograd[base + j*sa], out[base + j*sa]);
}
// By default temperature is 1.0.
// Adding a branch here to save the CPU 'divide-by-1' computation at runtime
DType final_result;
if (temperature == 1.0) {
for (index_t j = 0; j < M; ++j) {
final_result = negate ?
-OP2::Map(ograd[base + j*sa], out[base + j*sa], sum) :
OP2::Map(ograd[base + j*sa], out[base + j*sa], sum);
KERNEL_ASSIGN(igrad[base + j*sa], Req, final_result);
}
} else {
for (index_t j = 0; j < M; ++j) {
final_result = negate ?
-OP2::Map(ograd[base + j*sa], out[base + j*sa], sum) / temperature :
OP2::Map(ograd[base + j*sa], out[base + j*sa], sum) / temperature;
KERNEL_ASSIGN(igrad[base + j*sa], Req, final_result);
}
}
}
}
}
#ifdef __CUDACC__
template<int x_bits, typename OP, bool negate, typename AType, int ndim,
typename DType, typename OType, typename IType>
__global__ void softmax_compute_kernel(DType *in, OType *out, IType *length,
index_t M, int axis, Shape<ndim> sshape,
Shape<ndim> stride, const double temperature) {
const unsigned x_size = 1 << x_bits;
__shared__ AType smem[x_size];
index_t sa = stride[axis];
index_t base = unravel_dot(blockIdx.x, sshape, stride);
index_t x = threadIdx.x;
const index_t len = length == nullptr ? M : static_cast<index_t>(length[blockIdx.x]);
red::maximum::SetInitValue(smem[x]);
for (index_t i = x; i < len; i += x_size) {
smem[x] = ::max(smem[x], negate ? -in[base + i*sa] : in[base + i*sa]);
}
__syncthreads();
cuda::Reduce1D<red::maximum, x_bits>(smem);
__syncthreads();
DType smax = smem[0];
__syncthreads();
red::sum::SetInitValue(smem[x]);
DType val;
for (index_t i = x; i < len; i += x_size) {
val = negate ? -in[base + i*sa]:in[base + i*sa];
smem[x] += static_cast<AType>(expf((val - smax) / static_cast<AType>(temperature)));
}
__syncthreads();
cuda::Reduce1D<red::sum, x_bits>(smem);
__syncthreads();
AType ssum = smem[0];
__syncthreads();
for (index_t i = x; i < M; i += x_size) {
val = negate ? -in[base + i*sa] : in[base + i*sa];
out[base + i*sa] =
(i < len) ? OType(OP::Map((val - smax)/static_cast<DType>(temperature), ssum)) : OType(0.0f);
}
}
const int softmax_threads_per_block = 512;
template<typename OP, bool negate, typename AType, typename LType,
typename DType, typename OType, typename IType>
__global__ void softmax_stride1_compute_kernel(const DType *in, OType *out, IType *length,
const index_t M, const double temperature,
const int rows_per_block, const index_t total_rows) {
__shared__ AType scratch[softmax_threads_per_block];
__shared__ LType persistent_storage[20 * 1024 / sizeof(LType)];
const int warp_size = 32;
const int threads_per_row = softmax_threads_per_block / rows_per_block;
const int my_local_row = threadIdx.x / threads_per_row;
const int my_row = blockIdx.x * rows_per_block + my_local_row;
if (my_row >= total_rows) return;
const int my_id = threadIdx.x % threads_per_row;
const int entries_per_load = sizeof(LType)/sizeof(DType);
const index_t len = length == nullptr ? M : static_cast<index_t>(length[my_row]);
// Due to usage of MSHADOW_TYPE_SWITCH macro we are generating
// kernels where sizeof(LType) may be less than sizeof(DType),
// resulting in entries_per_load being 0.
// This is not a valid combination and is being checked against
// in the launcher code. This switch here is just to silence
// the division by zero warning generated for such invalid cases.
const int row_length = entries_per_load > 0 ? M / entries_per_load : 0;
const LType* in_aligned = reinterpret_cast<const LType*>(in);
size_t base = my_row * row_length;
for (index_t i = my_id; i < row_length; i += threads_per_row) {
persistent_storage[my_local_row * row_length + i] = in_aligned[base + i];
}
DType * row = reinterpret_cast<DType *>(persistent_storage + my_local_row * row_length);
__syncthreads();
DType my_max_value;
red::maximum::SetInitValue(my_max_value);
for (index_t i = my_id; i < len; i += threads_per_row) {
my_max_value = ::max(my_max_value, negate ? -row[i] : row[i]);
}
scratch[threadIdx.x] = my_max_value;
__syncthreads();
for (int size = threads_per_row / 2; size >= warp_size; size /= 2) {
if (my_id < size) {
scratch[threadIdx.x] = ::max(scratch[threadIdx.x], scratch[threadIdx.x + size]);
}
__syncthreads();
}
if (my_id < warp_size) {
AType my_value = warp_reduce(scratch[threadIdx.x],
[](AType x, AType y) { return ::max(x, y); });
scratch[threadIdx.x] = my_value;
}
__syncthreads();
DType smax = scratch[threadIdx.x - threadIdx.x % threads_per_row];
__syncthreads();
AType my_sum;
red::sum::SetInitValue(my_sum);
for (index_t i = my_id; i < len; i += threads_per_row) {
const DType val = negate ? -row[i] : row[i];
my_sum += static_cast<AType>(expf((val - smax) / static_cast<AType>(temperature)));
}
scratch[threadIdx.x] = my_sum;
__syncthreads();
for (int size = threads_per_row / 2; size >= warp_size; size /= 2) {
if (my_id < size) {
scratch[threadIdx.x] += scratch[threadIdx.x + size];
}
__syncthreads();
}
if (my_id < warp_size) {
AType my_value = warp_reduce(scratch[threadIdx.x],
[](AType x, AType y) { return x + y;});
scratch[threadIdx.x] = my_value;
}
__syncthreads();
AType ssum = scratch[threadIdx.x - threadIdx.x % threads_per_row];
__syncthreads();
for (index_t i = my_id; i < M; i += threads_per_row) {
const DType val = negate ? -row[i] : row[i];
row[i] = (i < len) ? DType(OP::Map((val - smax)/static_cast<DType>(temperature), ssum)) :
DType(0.0f);
}
__syncthreads();
LType* out_aligned = reinterpret_cast<LType*>(out);
for (index_t i = my_id; i < row_length; i += threads_per_row) {
out_aligned[base + i] = persistent_storage[my_local_row * row_length + i];
}
}
template<typename OP, bool negate, typename AType, typename DType, typename OType,
typename IType, int ndim>
inline void Softmax(Stream<gpu> *s, DType *in, OType *out, IType *length,
Shape<ndim> shape, int axis, const double temperature) {
const int x_bits = 7;
const int x_size = 1 << x_bits;
index_t M = shape[axis];
if (M == 0 || shape.Size() == 0) return;
index_t N = shape.Size()/M;
Shape<ndim> stride = calc_stride(shape);
Shape<ndim> sshape = shape;
sshape[axis] = 1;
const size_t DSize = sizeof(DType);
// Using 20 kB of shared memory for persistent storage in the optimized case
const size_t max_opt_M = 20 * 1024 / DSize;
if (stride[axis] == 1 &&
static_cast<size_t>(M) <= max_opt_M &&
std::is_same<DType, OType>::value) {
int ltype = mxnet::common::cuda::get_load_type(M * sizeof(DType));
MXNET_LOAD_TYPE_SWITCH(ltype, LType, {
int rows_per_block = mxnet::common::cuda::get_rows_per_block(M *
sizeof(DType) / sizeof(LType),
softmax_threads_per_block);
int nblocks = (N + rows_per_block - 1) / rows_per_block;
CHECK_LE(sizeof(DType), sizeof(LType));
softmax_stride1_compute_kernel<OP, negate, AType, LType>
<<<nblocks, softmax_threads_per_block, 0, mshadow::Stream<gpu>::GetStream(s)>>>(
in, out, length, M, temperature, rows_per_block, N);
});
MSHADOW_CUDA_POST_KERNEL_CHECK(softmax_stride1_compute_kernel);
} else {
softmax_compute_kernel<x_bits, OP, negate, AType, ndim>
<<<N, x_size, 0, mshadow::Stream<gpu>::GetStream(s)>>>(
in, out, length, M, axis, sshape, stride, temperature);
MSHADOW_CUDA_POST_KERNEL_CHECK(softmax_compute_kernel);
}
}
template<typename OP1, typename OP2, int Req, bool negate, typename AType, typename LType,
typename DType, typename OType, typename IType>
__global__ void softmax_stride1_grad_kernel(const OType *out, const OType *ograd,
DType *igrad, const IType *length,
const index_t M,
const double temperature,
const int rows_per_block,
const index_t total_rows) {
__shared__ AType scratch[softmax_threads_per_block];
__shared__ LType persistent_storage[20 * 1024 / sizeof(LType)];
const int warp_size = 32;
const int threads_per_row = softmax_threads_per_block / rows_per_block;
const int my_local_row = threadIdx.x / threads_per_row;
const int my_row = blockIdx.x * rows_per_block + my_local_row;
if (my_row >= total_rows) return;
const int my_id = threadIdx.x % threads_per_row;
const int entries_per_load = sizeof(LType)/sizeof(DType);
const index_t len = length == nullptr ? M : static_cast<index_t>(length[my_row]);
// Due to usage of MSHADOW_TYPE_SWITCH macro we are generating
// kernels where sizeof(LType) may be less than sizeof(DType),
// resulting in entries_per_load being 0.
// This is not a valid combination and is being checked against
// in the launcher code. This switch here is just to silence
// the division by zero warning generated for such invalid cases.
const int row_length = entries_per_load > 0 ? M / entries_per_load : 0;
const LType* out_aligned = reinterpret_cast<const LType*>(out);
const LType* ograd_aligned = reinterpret_cast<const LType*>(ograd);
size_t base = my_row * row_length;
for (index_t i = my_id; i < row_length; i += threads_per_row) {
persistent_storage[my_local_row * row_length * 2 + i] = out_aligned[base + i];
persistent_storage[my_local_row * row_length * 2 + row_length + i] = ograd_aligned[base + i];
}
DType * row = reinterpret_cast<DType *>(persistent_storage + my_local_row * row_length * 2);
__syncthreads();
AType my_sum_value;
red::sum::SetInitValue(my_sum_value);
for (index_t i = my_id; i < len; i += threads_per_row) {
my_sum_value += OP1::Map(row[i + M], row[i]);
}
scratch[threadIdx.x] = my_sum_value;
__syncthreads();
for (int size = threads_per_row / 2; size >= warp_size; size /= 2) {
if (my_id < size) {
scratch[threadIdx.x] = scratch[threadIdx.x] + scratch[threadIdx.x + size];
}
__syncthreads();
}
if (my_id < warp_size) {
AType my_value = warp_reduce(scratch[threadIdx.x],
[](AType x, AType y) { return x + y; });
scratch[threadIdx.x] = my_value;
}
__syncthreads();
AType ssum = scratch[threadIdx.x - threadIdx.x % threads_per_row];
__syncthreads();
for (index_t i = my_id; i < M; i += threads_per_row) {
const DType val =
negate ?
-OP2::Map(row[i + M], row[i], ssum) :
OP2::Map(row[i + M], row[i], ssum);
row[i] = (i < len) ? DType(val / static_cast<DType>(temperature)) :
DType(0.0f);
if (Req == kAddTo) {
row[i] += igrad[my_row * M + i];
}
}
__syncthreads();
LType* igrad_aligned = reinterpret_cast<LType*>(igrad);
for (index_t i = my_id; i < row_length; i += threads_per_row) {
igrad_aligned[base + i] = persistent_storage[my_local_row * row_length * 2 + i];
}
}
template<int x_bits, typename OP1, typename OP2, int Req, bool negate, typename AType, int ndim,
typename DType, typename OType, typename IType>
__global__ void softmax_grad_kernel(OType *out, OType *ograd, DType *igrad,
const IType *length, index_t M, int axis,
Shape<ndim> sshape, Shape<ndim> stride,
const double temperature) {
const unsigned x_size = 1 << x_bits;
__shared__ AType smem[x_size];
index_t sa = stride[axis];
index_t base = unravel_dot(blockIdx.x, sshape, stride);
index_t x = threadIdx.x;
index_t len = length != nullptr ? static_cast<index_t>(length[blockIdx.x]) : M;
red::sum::SetInitValue(smem[x]);
for (index_t i = x; i < len; i += x_size) {
smem[x] += OP1::Map(ograd[base + i*sa], out[base + i*sa]);
}
__syncthreads();
cuda::Reduce1D<red::sum, x_bits>(smem);
__syncthreads();
AType ssum = smem[0];
__syncthreads();
DType final_result;
for (index_t i = x; i < M; i += x_size) {
final_result =
negate ?
-OP2::Map(ograd[base + i*sa], out[base + i*sa], ssum) :
OP2::Map(ograd[base + i*sa], out[base + i*sa], ssum);
final_result = (i < len) ? final_result : DType(0.0f);
KERNEL_ASSIGN(igrad[base + i*sa], Req, final_result / static_cast<DType>(temperature));
}
}
template<typename OP1, typename OP2, int Req, bool negate, typename AType, int ndim,
typename DType, typename OType, typename IType>
inline void SoftmaxGrad(Stream<gpu> *s, OType *out, OType *ograd,
DType *igrad, IType *length, Shape<ndim> shape, int axis,
const double temperature) {
const int x_bits = 7;
const int x_size = 1 << x_bits;
index_t M = shape[axis];
if (M == 0 || shape.Size() == 0) return;
index_t N = shape.Size()/M;
Shape<ndim> stride = calc_stride(shape);
Shape<ndim> sshape = shape;
sshape[axis] = 1;
const size_t DSize = sizeof(DType);
// Using 20 kB of shared memory for persistent storage in the optimized case
// Need to store both out and ograd, so M can be only half compared to
// forward pass.
const size_t max_opt_M = 20 * 1024 / DSize / 2;
if (stride[axis] == 1 &&
static_cast<size_t>(M) <= max_opt_M &&
std::is_same<DType, OType>::value) {
int ltype = mxnet::common::cuda::get_load_type(M * sizeof(DType));
MXNET_LOAD_TYPE_SWITCH(ltype, LType, {
int rows_per_block = mxnet::common::cuda::get_rows_per_block(M *
sizeof(DType) / sizeof(LType),
softmax_threads_per_block);
int nblocks = (N + rows_per_block - 1) / rows_per_block;
CHECK_LE(sizeof(DType), sizeof(LType));
softmax_stride1_grad_kernel<OP1, OP2, Req, negate, AType, LType>
<<<nblocks, softmax_threads_per_block, 0, mshadow::Stream<gpu>::GetStream(s)>>>(
out, ograd, igrad, length, M, temperature, rows_per_block, N);
});
MSHADOW_CUDA_POST_KERNEL_CHECK(softmax_stride1_grad_kernel);
} else {
softmax_grad_kernel<x_bits, OP1, OP2, Req, negate, AType, ndim>
<<<N, x_size, 0, mshadow::Stream<gpu>::GetStream(s)>>>(
out, ograd, igrad, length, M, axis, sshape, stride, temperature);
MSHADOW_CUDA_POST_KERNEL_CHECK(softmax_grad_kernel);
}
}
#endif
} // namespace mxnet_op
struct SoftmaxParam : public dmlc::Parameter<SoftmaxParam> {
int axis;
dmlc::optional<double> temperature;
dmlc::optional<int> dtype;
dmlc::optional<bool> use_length;
DMLC_DECLARE_PARAMETER(SoftmaxParam) {
DMLC_DECLARE_FIELD(axis).set_default(-1)
.describe("The axis along which to compute softmax.");
DMLC_DECLARE_FIELD(temperature).set_default(dmlc::optional<double>())
.describe("Temperature parameter in softmax");
DMLC_DECLARE_FIELD(dtype)
.add_enum("float16", mshadow::kFloat16)
.add_enum("float32", mshadow::kFloat32)
.add_enum("float64", mshadow::kFloat64)
.set_default(dmlc::optional<int>())
.describe("DType of the output in case this can't be inferred. "
"Defaults to the same as input's dtype if not defined (dtype=None).");
DMLC_DECLARE_FIELD(use_length)
.set_default(dmlc::optional<bool>(false))
.describe("Whether to use the length input as a mask over the data input.");
}
bool operator==(const SoftmaxParam& other) const {
return this->axis == other.axis &&
this->temperature == other.temperature &&
this->dtype == other.dtype &&
this->use_length == other.use_length;
}
};
static inline bool softmax_has_dtype_override(const nnvm::NodeAttrs& attrs) {
const SoftmaxParam& param = nnvm::get<SoftmaxParam>(attrs.parsed);
return param.dtype.has_value() && param.dtype.value() != -1;
}
static inline bool softmax_use_length(const nnvm::NodeAttrs& attrs) {
const SoftmaxParam& param = nnvm::get<SoftmaxParam>(attrs.parsed);
return param.use_length.value();
}
static inline bool SoftmaxOpType(const nnvm::NodeAttrs& attrs,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs) {
CHECK_EQ(out_attrs->size(), 1);
const SoftmaxParam& param = nnvm::get<SoftmaxParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), softmax_use_length(attrs) ? 2U : 1U);
if (softmax_has_dtype_override(attrs)) {
TYPE_ASSIGN_CHECK(*out_attrs, 0, param.dtype.value());
type_assign(&(*in_attrs)[0], (*out_attrs)[0]);
return true;
} else {
std::vector<int> tmp = {in_attrs->at(0)};
return ElemwiseType<1, 1>(attrs, &tmp, out_attrs);
}
}
static inline bool SoftmaxOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
CHECK_EQ(out_attrs->size(), 1U);
const SoftmaxParam& param = nnvm::get<SoftmaxParam>(attrs.parsed);
CHECK_EQ(in_attrs->size(), param.use_length.value() ? 2U : 1U);
if (param.use_length.value()) {
mxnet::TShape& dshape = in_attrs->at(0);
mxnet::TShape tmp_shape((dshape.ndim() == 1) ? 1U : dshape.ndim() - 1, 1);
int j = 0;
int axis = param.axis != -1 ? param.axis : dshape.ndim() - 1;
for (int i = 0; i < dshape.ndim(); ++i) {
if (i != axis) {
tmp_shape[j++] = dshape[i];
}
}
SHAPE_ASSIGN_CHECK(*in_attrs, 1, tmp_shape);
}
mxnet::ShapeVector tmp = {in_attrs->at(0)};
return ElemwiseShape<1, 1>(attrs, &tmp, out_attrs);
}
static inline bool SoftmaxGradOpShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
if (softmax_has_dtype_override(attrs) || softmax_use_length(attrs)) {
if (softmax_use_length(attrs)) {
mxnet::ShapeVector ins = {in_attrs->at(0), in_attrs->at(1), in_attrs->at(3)};
mxnet::ShapeVector dgrad = {out_attrs->at(0)};
bool res = ElemwiseShape<3, 1>(attrs, &ins, &dgrad);
SHAPE_ASSIGN_CHECK(*in_attrs, 0, ins[0]);
SHAPE_ASSIGN_CHECK(*in_attrs, 1, ins[1]);
SHAPE_ASSIGN_CHECK(*in_attrs, 3, ins[2]);
SHAPE_ASSIGN_CHECK(*out_attrs, 0, dgrad[0]);
mxnet::ShapeVector length = {in_attrs->at(2)};
mxnet::ShapeVector lgrad = {out_attrs->at(1)};
res = (res && ElemwiseShape<1, 1>(attrs, &length, &lgrad));
SHAPE_ASSIGN_CHECK(*in_attrs, 2, length[0]);
SHAPE_ASSIGN_CHECK(*out_attrs, 1, lgrad[0]);
return res;
} else {
return ElemwiseShape<3, 1>(attrs, in_attrs, out_attrs);
}
} else {
return ElemwiseShape<2, 1>(attrs, in_attrs, out_attrs);
}
}
static inline bool SoftmaxGradOpType(const nnvm::NodeAttrs& attrs,
std::vector<int>* in_attrs,
std::vector<int>* out_attrs) {
CHECK_EQ(out_attrs->size(), softmax_use_length(attrs) ? 2U : 1U);
if (softmax_has_dtype_override(attrs) || softmax_use_length(attrs)) {
CHECK_EQ(in_attrs->size(), softmax_use_length(attrs) ? 4U : 3U);
int in_dtype = (*in_attrs)[1];
int out_dtype = (*in_attrs)[softmax_use_length(attrs) ? 3 : 2];
TYPE_ASSIGN_CHECK(*in_attrs, 0, out_dtype);
TYPE_ASSIGN_CHECK(*out_attrs, 0, in_dtype);
if (softmax_use_length(attrs)) {
TYPE_ASSIGN_CHECK(*out_attrs, 1, in_attrs->at(2));
}
return (*out_attrs)[0] != -1 && (*in_attrs)[0] != -1 &&
(*out_attrs)[1] != -1 && (*in_attrs)[1] != -1;
} else {
CHECK_EQ(in_attrs->size(), 2U);
int out_dtype = (*in_attrs)[1];
TYPE_ASSIGN_CHECK(*out_attrs, 0, out_dtype);
TYPE_ASSIGN_CHECK(*in_attrs, 0, out_dtype);
return (*out_attrs)[0] != -1 && (*in_attrs)[0] != -1;
}
}
static inline std::vector<std::pair<int, int> >
SoftmaxGradOpInplaceOption(const nnvm::NodeAttrs& attrs) {
if (softmax_has_dtype_override(attrs) || softmax_use_length(attrs)) {
if (softmax_use_length(attrs)) {
return std::vector<std::pair<int, int> >{{0, 0}, {1, 0}, {2, 1}, {3, 0}};
} else {
return std::vector<std::pair<int, int> >{{0, 0}, {1, 0}, {2, 0}};
}
} else {
return std::vector<std::pair<int, int> >{{0, 0}, {1, 0}};
}
}
static inline uint32_t SoftmaxGradOpNumInputs(const nnvm::NodeAttrs& attrs) {
if (softmax_has_dtype_override(attrs) || softmax_use_length(attrs)) {
return softmax_use_length(attrs) ? 4 : 3;
}
return 2;
}
static inline std::vector<std::string> SoftmaxGradOpInputNames(const nnvm::NodeAttrs& attrs) {
if (softmax_has_dtype_override(attrs) || softmax_use_length(attrs)) {
if (softmax_use_length(attrs)) {
return std::vector<std::string>{"ograd", "data", "length", "output"};
} else {
return std::vector<std::string>{"ograd", "data", "output"};
}
} else {
return std::vector<std::string>{"ograd", "output"};
}
}
struct SoftmaxFGradient {
const char *op_name;
std::vector<nnvm::NodeEntry> operator()(const nnvm::ObjectPtr& n,
const std::vector<nnvm::NodeEntry>& ograds) const {
if (softmax_has_dtype_override(n->attrs) || softmax_use_length(n->attrs)) {
return ElemwiseGradUseInOut {op_name}(n, ograds);
} else {
return ElemwiseGradUseOut {op_name}(n, ograds);
}
}
};
template<typename xpu, typename OP, bool negate = false>
void SoftmaxCompute(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mxnet_op;
if (req[0] == kNullOp || inputs[0].Size() == 0U) return;
CHECK_NE(req[0], kAddTo);
const SoftmaxParam& param = nnvm::get<SoftmaxParam>(attrs.parsed);
int axis = CheckAxis(param.axis, inputs[0].ndim());
const double temperature = param.temperature.has_value() ?
param.temperature.value() : 1.0;
mxnet::TShape shape = AxisShapeCompact(inputs[0].shape_, &axis, true);
bool safe_acc = dmlc::GetEnv("MXNET_SAFE_ACCUMULATION", false);
if (!safe_acc && inputs[0].type_flag_ == mshadow::kFloat16) {
common::LogOnce("MXNET_SAFE_ACCUMULATION=1 is recommended for softmax with float16 inputs. "
"See https://mxnet.apache.org/api/faq/env_var "
"for more details.");
}
MXNET_REAL_ACC_TYPE_SWITCH(inputs[0].type_flag_, DType, AType, {
MSHADOW_REAL_TYPE_SWITCH(outputs[0].type_flag_, OType, {
int type = kInt32;
if (param.use_length.value()) {
CHECK(inputs.size() > 1)
<< "Mask needs to be provided when using softmax with use_length=True.";
type = inputs[1].type_flag_;
}
MXNET_INT32_INT64_TYPE_SWITCH(type, IType, {
IType* mask_ptr = nullptr;
if (param.use_length.value()) {
mask_ptr = inputs[1].dptr<IType>();
}
if (safe_acc) {
if (shape.ndim() == 2) {
Softmax<OP, negate, AType>(
ctx.get_stream<xpu>(), inputs[0].dptr<DType>(),
outputs[0].dptr<OType>(), mask_ptr, shape.get<2>(),
axis, static_cast<DType>(temperature));
} else {
Softmax<OP, negate, AType>(
ctx.get_stream<xpu>(), inputs[0].dptr<DType>(),
outputs[0].dptr<OType>(), mask_ptr, shape.get<3>(),
axis, static_cast<DType>(temperature));
}
} else {
if (shape.ndim() == 2) {
Softmax<OP, negate, DType>(
ctx.get_stream<xpu>(), inputs[0].dptr<DType>(),
outputs[0].dptr<OType>(), mask_ptr, shape.get<2>(),
axis, static_cast<DType>(temperature));
} else {
Softmax<OP, negate, DType>(
ctx.get_stream<xpu>(), inputs[0].dptr<DType>(),
outputs[0].dptr<OType>(), mask_ptr, shape.get<3>(),
axis, static_cast<DType>(temperature));
}
}
});
});
});
}
template<typename xpu, typename OP1, typename OP2, bool negate = false>
void SoftmaxGradCompute(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
using namespace mxnet_op;
if (softmax_use_length(attrs)) {
MXNET_INT32_INT64_TYPE_SWITCH(inputs[2].type_flag_, IType, {
if (req[1] != kNullOp) {
mxnet_op::Kernel<mxnet_op::set_zero, xpu>::Launch(
ctx.get_stream<xpu>(), outputs[1].Size(), outputs[1].dptr<IType>());
}
});
}
if (req[0] == kNullOp) return;
const int itype = softmax_use_length(attrs) ? inputs[2].type_flag_ : kInt32;
const SoftmaxParam& param = nnvm::get<SoftmaxParam>(attrs.parsed);
int axis = CheckAxis(param.axis, inputs[0].ndim());
const double temperature = param.temperature.has_value() ?
param.temperature.value() : 1.0;
mxnet::TShape shape = AxisShapeCompact(inputs[0].shape_, &axis, true);
int out_idx = softmax_has_dtype_override(attrs) ? 2 : 1;
out_idx = softmax_use_length(attrs) ? 3 : out_idx;
bool safe_acc = dmlc::GetEnv("MXNET_SAFE_ACCUMULATION", false);
MXNET_REAL_ACC_TYPE_SWITCH(inputs[0].type_flag_, OType, AType, {
MSHADOW_REAL_TYPE_SWITCH(outputs[0].type_flag_, DType, {
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
MXNET_INT32_INT64_TYPE_SWITCH(itype, IType, {
IType * length_ptr = nullptr;
if (softmax_use_length(attrs)) {
length_ptr = inputs[2].dptr<IType>();
}
if (safe_acc) {
if (shape.ndim() == 2) {
SoftmaxGrad<OP1, OP2, Req, negate, AType>(
ctx.get_stream<xpu>(), inputs[out_idx].dptr<OType>(),
inputs[0].dptr<OType>(), outputs[0].dptr<DType>(),
length_ptr, shape.get<2>(), axis,
static_cast<DType>(temperature));
} else {
SoftmaxGrad<OP1, OP2, Req, negate, AType>(
ctx.get_stream<xpu>(), inputs[out_idx].dptr<OType>(),
inputs[0].dptr<OType>(), outputs[0].dptr<DType>(),
length_ptr, shape.get<3>(), axis,
static_cast<DType>(temperature));
}
} else {
if (shape.ndim() == 2) {
SoftmaxGrad<OP1, OP2, Req, negate, DType>(
ctx.get_stream<xpu>(), inputs[out_idx].dptr<OType>(),
inputs[0].dptr<OType>(), outputs[0].dptr<DType>(),
length_ptr, shape.get<2>(), axis,
static_cast<DType>(temperature));
} else {
SoftmaxGrad<OP1, OP2, Req, negate, DType>(
ctx.get_stream<xpu>(), inputs[out_idx].dptr<OType>(),
inputs[0].dptr<OType>(), outputs[0].dptr<DType>(),
length_ptr, shape.get<3>(), axis,
static_cast<DType>(temperature));
}
}
});
});
});
});
}
} // namespace op
} // namespace mxnet
namespace std {
template<>
struct hash<mxnet::op::SoftmaxParam> {
size_t operator()(const mxnet::op::SoftmaxParam& val) {
size_t ret = 0;
ret = dmlc::HashCombine(ret, val.axis);
ret = dmlc::HashCombine(ret, val.temperature);
ret = dmlc::HashCombine(ret, val.dtype);
ret = dmlc::HashCombine(ret, val.use_length);
return ret;
}
};
} // namespace std
#endif // MXNET_OPERATOR_NN_SOFTMAX_INL_H_
|
callback.h | #ifndef _BSD_SOURCE
#define _BSD_SOURCE
#endif
#ifndef _DEFAULT_SOURCE
#define _DEFAULT_SOURCE
#endif
#include <stdio.h>
#ifndef __STDC_FORMAT_MACROS
#define __STDC_FORMAT_MACROS
#endif
#include <inttypes.h>
#include <omp.h>
#include <omp-tools.h>
#include "ompt-signal.h"
// Used to detect architecture
#include "../../src/kmp_platform.h"
#ifndef _TOOL_PREFIX
#define _TOOL_PREFIX ""
// If no _TOOL_PREFIX is set, we assume that we run as part of an OMPT test
#define _OMPT_TESTS
#endif
static const char *ompt_thread_t_values[] = {
"ompt_thread_UNDEFINED", "ompt_thread_initial", "ompt_thread_worker",
"ompt_thread_other"};
static const char *ompt_task_status_t_values[] = {
"ompt_task_UNDEFINED",
"ompt_task_complete", // 1
"ompt_task_yield", // 2
"ompt_task_cancel", // 3
"ompt_task_detach", // 4
"ompt_task_early_fulfill", // 5
"ompt_task_late_fulfill", // 6
"ompt_task_switch", // 7
"ompt_taskwait_complete" // 8
};
static const char* ompt_cancel_flag_t_values[] = {
"ompt_cancel_parallel",
"ompt_cancel_sections",
"ompt_cancel_loop",
"ompt_cancel_taskgroup",
"ompt_cancel_activated",
"ompt_cancel_detected",
"ompt_cancel_discarded_task"
};
static const char *ompt_dependence_type_t_values[] = {
"ompt_dependence_type_UNDEFINED",
"ompt_dependence_type_in", // 1
"ompt_dependence_type_out", // 2
"ompt_dependence_type_inout", // 3
"ompt_dependence_type_mutexinoutset", // 4
"ompt_dependence_type_source", // 5
"ompt_dependence_type_sink", // 6
"ompt_dependence_type_inoutset" // 7
};
static void format_task_type(int type, char *buffer) {
char *progress = buffer;
if (type & ompt_task_initial)
progress += sprintf(progress, "ompt_task_initial");
if (type & ompt_task_implicit)
progress += sprintf(progress, "ompt_task_implicit");
if (type & ompt_task_explicit)
progress += sprintf(progress, "ompt_task_explicit");
if (type & ompt_task_target)
progress += sprintf(progress, "ompt_task_target");
if (type & ompt_task_taskwait)
progress += sprintf(progress, "ompt_task_taskwait");
if (type & ompt_task_undeferred)
progress += sprintf(progress, "|ompt_task_undeferred");
if (type & ompt_task_untied)
progress += sprintf(progress, "|ompt_task_untied");
if (type & ompt_task_final)
progress += sprintf(progress, "|ompt_task_final");
if (type & ompt_task_mergeable)
progress += sprintf(progress, "|ompt_task_mergeable");
if (type & ompt_task_merged)
progress += sprintf(progress, "|ompt_task_merged");
}
static ompt_set_callback_t ompt_set_callback;
static ompt_get_callback_t ompt_get_callback;
static ompt_get_state_t ompt_get_state;
static ompt_get_task_info_t ompt_get_task_info;
static ompt_get_task_memory_t ompt_get_task_memory;
static ompt_get_thread_data_t ompt_get_thread_data;
static ompt_get_parallel_info_t ompt_get_parallel_info;
static ompt_get_unique_id_t ompt_get_unique_id;
static ompt_finalize_tool_t ompt_finalize_tool;
static ompt_get_num_procs_t ompt_get_num_procs;
static ompt_get_num_places_t ompt_get_num_places;
static ompt_get_place_proc_ids_t ompt_get_place_proc_ids;
static ompt_get_place_num_t ompt_get_place_num;
static ompt_get_partition_place_nums_t ompt_get_partition_place_nums;
static ompt_get_proc_id_t ompt_get_proc_id;
static ompt_enumerate_states_t ompt_enumerate_states;
static ompt_enumerate_mutex_impls_t ompt_enumerate_mutex_impls;
static void print_ids(int level)
{
int task_type, thread_num;
ompt_frame_t *frame;
ompt_data_t *task_parallel_data;
ompt_data_t *task_data;
int exists_task = ompt_get_task_info(level, &task_type, &task_data, &frame,
&task_parallel_data, &thread_num);
char buffer[2048];
format_task_type(task_type, buffer);
if (frame)
printf("%" PRIu64 ": task level %d: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", exit_frame=%p, reenter_frame=%p, "
"task_type=%s=%d, thread_num=%d\n",
ompt_get_thread_data()->value, level,
exists_task ? task_parallel_data->value : 0,
exists_task ? task_data->value : 0, frame->exit_frame.ptr,
frame->enter_frame.ptr, buffer, task_type, thread_num);
}
#define get_frame_address(level) __builtin_frame_address(level)
#define print_frame(level) \
printf("%" PRIu64 ": __builtin_frame_address(%d)=%p\n", \
ompt_get_thread_data()->value, level, get_frame_address(level))
// clang (version 5.0 and above) adds an intermediate function call with debug flag (-g)
#if defined(TEST_NEED_PRINT_FRAME_FROM_OUTLINED_FN)
#if defined(DEBUG) && defined(__clang__) && __clang_major__ >= 5
#define print_frame_from_outlined_fn(level) print_frame(level+1)
#else
#define print_frame_from_outlined_fn(level) print_frame(level)
#endif
#if defined(__clang__) && __clang_major__ >= 5
#warning "Clang 5.0 and later add an additional wrapper for outlined functions when compiling with debug information."
#warning "Please define -DDEBUG iff you manually pass in -g to make the tests succeed!"
#endif
#endif
// This macro helps to define a label at the current position that can be used
// to get the current address in the code.
//
// For print_current_address():
// To reliably determine the offset between the address of the label and the
// actual return address, we insert a NOP instruction as a jump target as the
// compiler would otherwise insert an instruction that we can't control. The
// instruction length is target dependent and is explained below.
//
// (The empty block between "#pragma omp ..." and the __asm__ statement is a
// workaround for a bug in the Intel Compiler.)
#define define_ompt_label(id) \
{} \
__asm__("nop"); \
ompt_label_##id:
// This macro helps to get the address of a label that is inserted by the above
// macro define_ompt_label(). The address is obtained with a GNU extension
// (&&label) that has been tested with gcc, clang and icc.
#define get_ompt_label_address(id) (&& ompt_label_##id)
// This macro prints the exact address that a previously called runtime function
// returns to.
#define print_current_address(id) \
define_ompt_label(id) \
print_possible_return_addresses(get_ompt_label_address(id))
#if KMP_ARCH_X86 || KMP_ARCH_X86_64
// On X86 the NOP instruction is 1 byte long. In addition, the compiler inserts
// a MOV instruction for non-void runtime functions which is 3 bytes long.
#define print_possible_return_addresses(addr) \
printf("%" PRIu64 ": current_address=%p or %p for non-void functions\n", \
ompt_get_thread_data()->value, ((char *)addr) - 1, ((char *)addr) - 4)
#elif KMP_ARCH_PPC64
// On Power the NOP instruction is 4 bytes long. In addition, the compiler
// inserts a second NOP instruction (another 4 bytes). For non-void runtime
// functions Clang inserts a STW instruction (but only if compiling under
// -fno-PIC which will be the default with Clang 8.0, another 4 bytes).
#define print_possible_return_addresses(addr) \
printf("%" PRIu64 ": current_address=%p or %p\n", ompt_get_thread_data()->value, \
((char *)addr) - 8, ((char *)addr) - 12)
#elif KMP_ARCH_AARCH64
// On AArch64 the NOP instruction is 4 bytes long, can be followed by inserted
// store instruction (another 4 bytes long).
#define print_possible_return_addresses(addr) \
printf("%" PRIu64 ": current_address=%p or %p\n", ompt_get_thread_data()->value, \
((char *)addr) - 4, ((char *)addr) - 8)
#elif KMP_ARCH_RISCV64
#if __riscv_compressed
// On RV64GC the C.NOP instruction is 2 byte long. In addition, the compiler
// inserts a J instruction (targeting the successor basic block), which
// accounts for another 4 bytes. Finally, an additional J instruction may
// appear (adding 4 more bytes) when the C.NOP is referenced elsewhere (ie.
// another branch).
#define print_possible_return_addresses(addr) \
printf("%" PRIu64 ": current_address=%p or %p\n", \
ompt_get_thread_data()->value, ((char *)addr) - 6, ((char *)addr) - 10)
#else
// On RV64G the NOP instruction is 4 byte long. In addition, the compiler
// inserts a J instruction (targeting the successor basic block), which
// accounts for another 4 bytes. Finally, an additional J instruction may
// appear (adding 4 more bytes) when the NOP is referenced elsewhere (ie.
// another branch).
#define print_possible_return_addresses(addr) \
printf("%" PRIu64 ": current_address=%p or %p\n", \
ompt_get_thread_data()->value, ((char *)addr) - 8, ((char *)addr) - 12)
#endif
#else
#error Unsupported target architecture, cannot determine address offset!
#endif
// This macro performs a somewhat similar job to print_current_address(), except
// that it discards a certain number of nibbles from the address and only prints
// the most significant bits / nibbles. This can be used for cases where the
// return address can only be approximated.
//
// To account for overflows (ie the most significant bits / nibbles have just
// changed as we are a few bytes above the relevant power of two) the addresses
// of the "current" and of the "previous block" are printed.
#define print_fuzzy_address(id) \
define_ompt_label(id) \
print_fuzzy_address_blocks(get_ompt_label_address(id))
// If you change this define you need to adapt all capture patterns in the tests
// to include or discard the new number of nibbles!
#define FUZZY_ADDRESS_DISCARD_NIBBLES 2
#define FUZZY_ADDRESS_DISCARD_BYTES (1 << ((FUZZY_ADDRESS_DISCARD_NIBBLES) * 4))
#define print_fuzzy_address_blocks(addr) \
printf("%" PRIu64 ": fuzzy_address=0x%" PRIx64 " or 0x%" PRIx64 \
" or 0x%" PRIx64 " or 0x%" PRIx64 " (%p)\n", \
ompt_get_thread_data()->value, \
((uint64_t)addr) / FUZZY_ADDRESS_DISCARD_BYTES - 1, \
((uint64_t)addr) / FUZZY_ADDRESS_DISCARD_BYTES, \
((uint64_t)addr) / FUZZY_ADDRESS_DISCARD_BYTES + 1, \
((uint64_t)addr) / FUZZY_ADDRESS_DISCARD_BYTES + 2, addr)
#define register_ompt_callback_t(name, type) \
do { \
type f_##name = &on_##name; \
if (ompt_set_callback(name, (ompt_callback_t)f_##name) == ompt_set_never) \
printf("0: Could not register callback '" #name "'\n"); \
} while (0)
#define register_ompt_callback(name) register_ompt_callback_t(name, name##_t)
#ifndef USE_PRIVATE_TOOL
static void
on_ompt_callback_mutex_acquire(
ompt_mutex_t kind,
unsigned int hint,
unsigned int impl,
ompt_wait_id_t wait_id,
const void *codeptr_ra)
{
switch(kind)
{
case ompt_mutex_lock:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_wait_lock: wait_id=%" PRIu64 ", hint=%" PRIu32
", impl=%" PRIu32 ", codeptr_ra=%p \n",
ompt_get_thread_data()->value, wait_id, hint, impl, codeptr_ra);
break;
case ompt_mutex_nest_lock:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_wait_nest_lock: wait_id=%" PRIu64 ", hint=%" PRIu32
", impl=%" PRIu32 ", codeptr_ra=%p \n",
ompt_get_thread_data()->value, wait_id, hint, impl, codeptr_ra);
break;
case ompt_mutex_critical:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_wait_critical: wait_id=%" PRIu64 ", hint=%" PRIu32
", impl=%" PRIu32 ", codeptr_ra=%p \n",
ompt_get_thread_data()->value, wait_id, hint, impl, codeptr_ra);
break;
case ompt_mutex_atomic:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_wait_atomic: wait_id=%" PRIu64 ", hint=%" PRIu32
", impl=%" PRIu32 ", codeptr_ra=%p \n",
ompt_get_thread_data()->value, wait_id, hint, impl, codeptr_ra);
break;
case ompt_mutex_ordered:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_wait_ordered: wait_id=%" PRIu64 ", hint=%" PRIu32
", impl=%" PRIu32 ", codeptr_ra=%p \n",
ompt_get_thread_data()->value, wait_id, hint, impl, codeptr_ra);
break;
default:
break;
}
}
static void
on_ompt_callback_mutex_acquired(
ompt_mutex_t kind,
ompt_wait_id_t wait_id,
const void *codeptr_ra)
{
switch(kind)
{
case ompt_mutex_lock:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_acquired_lock: wait_id=%" PRIu64 ", codeptr_ra=%p \n",
ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
case ompt_mutex_nest_lock:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_acquired_nest_lock_first: wait_id=%" PRIu64
", codeptr_ra=%p \n",
ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
case ompt_mutex_critical:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_acquired_critical: wait_id=%" PRIu64
", codeptr_ra=%p \n",
ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
case ompt_mutex_atomic:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_acquired_atomic: wait_id=%" PRIu64
", codeptr_ra=%p \n",
ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
case ompt_mutex_ordered:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_acquired_ordered: wait_id=%" PRIu64
", codeptr_ra=%p \n",
ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
default:
break;
}
}
static void
on_ompt_callback_mutex_released(
ompt_mutex_t kind,
ompt_wait_id_t wait_id,
const void *codeptr_ra)
{
switch(kind)
{
case ompt_mutex_lock:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_release_lock: wait_id=%" PRIu64 ", codeptr_ra=%p \n",
ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
case ompt_mutex_nest_lock:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_release_nest_lock_last: wait_id=%" PRIu64
", codeptr_ra=%p \n",
ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
case ompt_mutex_critical:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_release_critical: wait_id=%" PRIu64
", codeptr_ra=%p \n",
ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
case ompt_mutex_atomic:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_release_atomic: wait_id=%" PRIu64
", codeptr_ra=%p \n",
ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
case ompt_mutex_ordered:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_release_ordered: wait_id=%" PRIu64
", codeptr_ra=%p \n",
ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
default:
break;
}
}
static void
on_ompt_callback_nest_lock(
ompt_scope_endpoint_t endpoint,
ompt_wait_id_t wait_id,
const void *codeptr_ra)
{
switch(endpoint)
{
case ompt_scope_begin:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_acquired_nest_lock_next: wait_id=%" PRIu64
", codeptr_ra=%p \n",
ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
case ompt_scope_end:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_release_nest_lock_prev: wait_id=%" PRIu64
", codeptr_ra=%p \n",
ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
case ompt_scope_beginend:
printf("ompt_scope_beginend should never be passed to %s\n", __func__);
exit(-1);
}
}
static void
on_ompt_callback_sync_region(
ompt_sync_region_t kind,
ompt_scope_endpoint_t endpoint,
ompt_data_t *parallel_data,
ompt_data_t *task_data,
const void *codeptr_ra)
{
switch(endpoint)
{
case ompt_scope_begin:
switch(kind)
{
case ompt_sync_region_barrier:
case ompt_sync_region_barrier_implicit:
case ompt_sync_region_barrier_implicit_workshare:
case ompt_sync_region_barrier_implicit_parallel:
case ompt_sync_region_barrier_teams:
case ompt_sync_region_barrier_explicit:
case ompt_sync_region_barrier_implementation:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_barrier_begin: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", codeptr_ra=%p\n",
ompt_get_thread_data()->value, parallel_data->value,
task_data->value, codeptr_ra);
print_ids(0);
break;
case ompt_sync_region_taskwait:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_taskwait_begin: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", codeptr_ra=%p\n",
ompt_get_thread_data()->value, parallel_data->value,
task_data->value, codeptr_ra);
break;
case ompt_sync_region_taskgroup:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_taskgroup_begin: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", codeptr_ra=%p\n",
ompt_get_thread_data()->value, parallel_data->value,
task_data->value, codeptr_ra);
break;
case ompt_sync_region_reduction:
printf("ompt_sync_region_reduction should never be passed to "
"on_ompt_callback_sync_region\n");
exit(-1);
break;
}
break;
case ompt_scope_end:
switch(kind)
{
case ompt_sync_region_barrier:
case ompt_sync_region_barrier_implicit:
case ompt_sync_region_barrier_explicit:
case ompt_sync_region_barrier_implicit_workshare:
case ompt_sync_region_barrier_implicit_parallel:
case ompt_sync_region_barrier_teams:
case ompt_sync_region_barrier_implementation:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_barrier_end: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", codeptr_ra=%p\n",
ompt_get_thread_data()->value,
(parallel_data) ? parallel_data->value : 0, task_data->value,
codeptr_ra);
break;
case ompt_sync_region_taskwait:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_taskwait_end: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", codeptr_ra=%p\n",
ompt_get_thread_data()->value,
(parallel_data) ? parallel_data->value : 0, task_data->value,
codeptr_ra);
break;
case ompt_sync_region_taskgroup:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_taskgroup_end: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", codeptr_ra=%p\n",
ompt_get_thread_data()->value,
(parallel_data) ? parallel_data->value : 0, task_data->value,
codeptr_ra);
break;
case ompt_sync_region_reduction:
printf("ompt_sync_region_reduction should never be passed to "
"on_ompt_callback_sync_region\n");
exit(-1);
break;
}
break;
case ompt_scope_beginend:
printf("ompt_scope_beginend should never be passed to %s\n", __func__);
exit(-1);
}
}
static void
on_ompt_callback_sync_region_wait(
ompt_sync_region_t kind,
ompt_scope_endpoint_t endpoint,
ompt_data_t *parallel_data,
ompt_data_t *task_data,
const void *codeptr_ra)
{
switch(endpoint)
{
case ompt_scope_begin:
switch(kind)
{
case ompt_sync_region_barrier:
case ompt_sync_region_barrier_implicit:
case ompt_sync_region_barrier_implicit_workshare:
case ompt_sync_region_barrier_implicit_parallel:
case ompt_sync_region_barrier_teams:
case ompt_sync_region_barrier_explicit:
case ompt_sync_region_barrier_implementation:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_wait_barrier_begin: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", codeptr_ra=%p\n",
ompt_get_thread_data()->value, parallel_data->value,
task_data->value, codeptr_ra);
break;
case ompt_sync_region_taskwait:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_wait_taskwait_begin: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", codeptr_ra=%p\n",
ompt_get_thread_data()->value, parallel_data->value,
task_data->value, codeptr_ra);
break;
case ompt_sync_region_taskgroup:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_wait_taskgroup_begin: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", codeptr_ra=%p\n",
ompt_get_thread_data()->value, parallel_data->value,
task_data->value, codeptr_ra);
break;
case ompt_sync_region_reduction:
printf("ompt_sync_region_reduction should never be passed to "
"on_ompt_callback_sync_region_wait\n");
exit(-1);
break;
}
break;
case ompt_scope_end:
switch(kind)
{
case ompt_sync_region_barrier:
case ompt_sync_region_barrier_implicit:
case ompt_sync_region_barrier_implicit_workshare:
case ompt_sync_region_barrier_implicit_parallel:
case ompt_sync_region_barrier_teams:
case ompt_sync_region_barrier_explicit:
case ompt_sync_region_barrier_implementation:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_wait_barrier_end: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", codeptr_ra=%p\n",
ompt_get_thread_data()->value,
(parallel_data) ? parallel_data->value : 0, task_data->value,
codeptr_ra);
break;
case ompt_sync_region_taskwait:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_wait_taskwait_end: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", codeptr_ra=%p\n",
ompt_get_thread_data()->value,
(parallel_data) ? parallel_data->value : 0, task_data->value,
codeptr_ra);
break;
case ompt_sync_region_taskgroup:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_wait_taskgroup_end: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", codeptr_ra=%p\n",
ompt_get_thread_data()->value,
(parallel_data) ? parallel_data->value : 0, task_data->value,
codeptr_ra);
break;
case ompt_sync_region_reduction:
printf("ompt_sync_region_reduction should never be passed to "
"on_ompt_callback_sync_region_wait\n");
exit(-1);
break;
}
break;
case ompt_scope_beginend:
printf("ompt_scope_beginend should never be passed to %s\n", __func__);
exit(-1);
}
}
static void on_ompt_callback_reduction(ompt_sync_region_t kind,
ompt_scope_endpoint_t endpoint,
ompt_data_t *parallel_data,
ompt_data_t *task_data,
const void *codeptr_ra) {
switch (endpoint) {
case ompt_scope_begin:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_reduction_begin: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", codeptr_ra=%p\n",
ompt_get_thread_data()->value,
(parallel_data) ? parallel_data->value : 0, task_data->value,
codeptr_ra);
break;
case ompt_scope_end:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_reduction_end: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", codeptr_ra=%p\n",
ompt_get_thread_data()->value,
(parallel_data) ? parallel_data->value : 0, task_data->value,
codeptr_ra);
break;
case ompt_scope_beginend:
printf("ompt_scope_beginend should never be passed to %s\n", __func__);
exit(-1);
}
}
static void
on_ompt_callback_flush(
ompt_data_t *thread_data,
const void *codeptr_ra)
{
printf("%" PRIu64 ":" _TOOL_PREFIX " ompt_event_flush: codeptr_ra=%p\n",
thread_data->value, codeptr_ra);
}
static void
on_ompt_callback_cancel(
ompt_data_t *task_data,
int flags,
const void *codeptr_ra)
{
const char* first_flag_value;
const char* second_flag_value;
if(flags & ompt_cancel_parallel)
first_flag_value = ompt_cancel_flag_t_values[0];
else if(flags & ompt_cancel_sections)
first_flag_value = ompt_cancel_flag_t_values[1];
else if(flags & ompt_cancel_loop)
first_flag_value = ompt_cancel_flag_t_values[2];
else if(flags & ompt_cancel_taskgroup)
first_flag_value = ompt_cancel_flag_t_values[3];
if(flags & ompt_cancel_activated)
second_flag_value = ompt_cancel_flag_t_values[4];
else if(flags & ompt_cancel_detected)
second_flag_value = ompt_cancel_flag_t_values[5];
else if(flags & ompt_cancel_discarded_task)
second_flag_value = ompt_cancel_flag_t_values[6];
printf("%" PRIu64 ":" _TOOL_PREFIX " ompt_event_cancel: task_data=%" PRIu64
", flags=%s|%s=%" PRIu32 ", codeptr_ra=%p\n",
ompt_get_thread_data()->value, task_data->value, first_flag_value,
second_flag_value, flags, codeptr_ra);
}
static void
on_ompt_callback_implicit_task(
ompt_scope_endpoint_t endpoint,
ompt_data_t *parallel_data,
ompt_data_t *task_data,
unsigned int team_size,
unsigned int thread_num,
int flags)
{
switch(endpoint)
{
case ompt_scope_begin:
if(task_data->ptr)
printf("%s\n", "0: task_data initially not null");
task_data->value = ompt_get_unique_id();
//there is no parallel_begin callback for implicit parallel region
//thus it is initialized in initial task
if(flags & ompt_task_initial)
{
char buffer[2048];
format_task_type(flags, buffer);
// Only check initial task not created by teams construct
if (team_size == 1 && thread_num == 1 && parallel_data->ptr)
printf("%s\n", "0: parallel_data initially not null");
parallel_data->value = ompt_get_unique_id();
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_initial_task_begin: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", actual_parallelism=%" PRIu32
", index=%" PRIu32 ", flags=%" PRIu32 "\n",
ompt_get_thread_data()->value, parallel_data->value,
task_data->value, team_size, thread_num, flags);
} else {
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_implicit_task_begin: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", team_size=%" PRIu32
", thread_num=%" PRIu32 "\n",
ompt_get_thread_data()->value, parallel_data->value,
task_data->value, team_size, thread_num);
}
break;
case ompt_scope_end:
if(flags & ompt_task_initial){
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_initial_task_end: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", actual_parallelism=%" PRIu32
", index=%" PRIu32 "\n",
ompt_get_thread_data()->value,
(parallel_data) ? parallel_data->value : 0, task_data->value,
team_size, thread_num);
} else {
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_implicit_task_end: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", team_size=%" PRIu32
", thread_num=%" PRIu32 "\n",
ompt_get_thread_data()->value,
(parallel_data) ? parallel_data->value : 0, task_data->value,
team_size, thread_num);
}
break;
case ompt_scope_beginend:
printf("ompt_scope_beginend should never be passed to %s\n", __func__);
exit(-1);
}
}
static void
on_ompt_callback_lock_init(
ompt_mutex_t kind,
unsigned int hint,
unsigned int impl,
ompt_wait_id_t wait_id,
const void *codeptr_ra)
{
switch(kind)
{
case ompt_mutex_lock:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_init_lock: wait_id=%" PRIu64 ", hint=%" PRIu32
", impl=%" PRIu32 ", codeptr_ra=%p \n",
ompt_get_thread_data()->value, wait_id, hint, impl, codeptr_ra);
break;
case ompt_mutex_nest_lock:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_init_nest_lock: wait_id=%" PRIu64 ", hint=%" PRIu32
", impl=%" PRIu32 ", codeptr_ra=%p \n",
ompt_get_thread_data()->value, wait_id, hint, impl, codeptr_ra);
break;
default:
break;
}
}
static void
on_ompt_callback_lock_destroy(
ompt_mutex_t kind,
ompt_wait_id_t wait_id,
const void *codeptr_ra)
{
switch(kind)
{
case ompt_mutex_lock:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_destroy_lock: wait_id=%" PRIu64 ", codeptr_ra=%p \n",
ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
case ompt_mutex_nest_lock:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_destroy_nest_lock: wait_id=%" PRIu64
", codeptr_ra=%p \n",
ompt_get_thread_data()->value, wait_id, codeptr_ra);
break;
default:
break;
}
}
static void
on_ompt_callback_work(
ompt_work_t wstype,
ompt_scope_endpoint_t endpoint,
ompt_data_t *parallel_data,
ompt_data_t *task_data,
uint64_t count,
const void *codeptr_ra)
{
switch(endpoint)
{
case ompt_scope_begin:
switch(wstype)
{
case ompt_work_loop:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_loop_begin: parallel_id=%" PRIu64
", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64
"\n",
ompt_get_thread_data()->value, parallel_data->value,
task_data->value, codeptr_ra, count);
break;
case ompt_work_sections:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_sections_begin: parallel_id=%" PRIu64
", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64
"\n",
ompt_get_thread_data()->value, parallel_data->value,
task_data->value, codeptr_ra, count);
break;
case ompt_work_single_executor:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_single_in_block_begin: parallel_id=%" PRIu64
", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64
"\n",
ompt_get_thread_data()->value, parallel_data->value,
task_data->value, codeptr_ra, count);
break;
case ompt_work_single_other:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_single_others_begin: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n",
ompt_get_thread_data()->value, parallel_data->value,
task_data->value, codeptr_ra, count);
break;
case ompt_work_workshare:
//impl
break;
case ompt_work_distribute:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_distribute_begin: parallel_id=%" PRIu64
", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64
"\n",
ompt_get_thread_data()->value, parallel_data->value,
task_data->value, codeptr_ra, count);
break;
case ompt_work_taskloop:
//impl
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_taskloop_begin: parallel_id=%" PRIu64
", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64
"\n",
ompt_get_thread_data()->value, parallel_data->value,
task_data->value, codeptr_ra, count);
break;
case ompt_work_scope:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_scope_begin: parallel_id=%" PRIu64
", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64
"\n",
ompt_get_thread_data()->value, parallel_data->value,
task_data->value, codeptr_ra, count);
break;
}
break;
case ompt_scope_end:
switch(wstype)
{
case ompt_work_loop:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_loop_end: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n",
ompt_get_thread_data()->value, parallel_data->value,
task_data->value, codeptr_ra, count);
break;
case ompt_work_sections:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_sections_end: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n",
ompt_get_thread_data()->value, parallel_data->value,
task_data->value, codeptr_ra, count);
break;
case ompt_work_single_executor:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_single_in_block_end: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n",
ompt_get_thread_data()->value, parallel_data->value,
task_data->value, codeptr_ra, count);
break;
case ompt_work_single_other:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_single_others_end: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64 "\n",
ompt_get_thread_data()->value, parallel_data->value,
task_data->value, codeptr_ra, count);
break;
case ompt_work_workshare:
//impl
break;
case ompt_work_distribute:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_distribute_end: parallel_id=%" PRIu64
", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64
"\n",
ompt_get_thread_data()->value, parallel_data->value,
task_data->value, codeptr_ra, count);
break;
case ompt_work_taskloop:
//impl
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_taskloop_end: parallel_id=%" PRIu64
", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64
"\n",
ompt_get_thread_data()->value, parallel_data->value,
task_data->value, codeptr_ra, count);
break;
case ompt_work_scope:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_scope_end: parallel_id=%" PRIu64
", parent_task_id=%" PRIu64 ", codeptr_ra=%p, count=%" PRIu64
"\n",
ompt_get_thread_data()->value, parallel_data->value,
task_data->value, codeptr_ra, count);
break;
}
break;
case ompt_scope_beginend:
printf("ompt_scope_beginend should never be passed to %s\n", __func__);
exit(-1);
}
}
static void on_ompt_callback_masked(ompt_scope_endpoint_t endpoint,
ompt_data_t *parallel_data,
ompt_data_t *task_data,
const void *codeptr_ra) {
switch(endpoint)
{
case ompt_scope_begin:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_masked_begin: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", codeptr_ra=%p\n",
ompt_get_thread_data()->value, parallel_data->value,
task_data->value, codeptr_ra);
break;
case ompt_scope_end:
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_masked_end: parallel_id=%" PRIu64 ", task_id=%" PRIu64
", codeptr_ra=%p\n",
ompt_get_thread_data()->value, parallel_data->value,
task_data->value, codeptr_ra);
break;
case ompt_scope_beginend:
printf("ompt_scope_beginend should never be passed to %s\n", __func__);
exit(-1);
}
}
static void on_ompt_callback_parallel_begin(
ompt_data_t *encountering_task_data,
const ompt_frame_t *encountering_task_frame, ompt_data_t *parallel_data,
uint32_t requested_team_size, int flag, const void *codeptr_ra) {
if(parallel_data->ptr)
printf("0: parallel_data initially not null\n");
parallel_data->value = ompt_get_unique_id();
int invoker = flag & 0xF;
const char *event = (flag & ompt_parallel_team) ? "parallel" : "teams";
const char *size = (flag & ompt_parallel_team) ? "team_size" : "num_teams";
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_%s_begin: parent_task_id=%" PRIu64
", parent_task_frame.exit=%p, parent_task_frame.reenter=%p, "
"parallel_id=%" PRIu64 ", requested_%s=%" PRIu32
", codeptr_ra=%p, invoker=%d\n",
ompt_get_thread_data()->value, event, encountering_task_data->value,
encountering_task_frame->exit_frame.ptr,
encountering_task_frame->enter_frame.ptr, parallel_data->value, size,
requested_team_size, codeptr_ra, invoker);
}
static void on_ompt_callback_parallel_end(ompt_data_t *parallel_data,
ompt_data_t *encountering_task_data,
int flag, const void *codeptr_ra) {
int invoker = flag & 0xF;
const char *event = (flag & ompt_parallel_team) ? "parallel" : "teams";
printf("%" PRIu64 ":" _TOOL_PREFIX " ompt_event_%s_end: parallel_id=%" PRIu64
", task_id=%" PRIu64 ", invoker=%d, codeptr_ra=%p\n",
ompt_get_thread_data()->value, event, parallel_data->value,
encountering_task_data->value, invoker, codeptr_ra);
}
static void
on_ompt_callback_task_create(
ompt_data_t *encountering_task_data,
const ompt_frame_t *encountering_task_frame,
ompt_data_t* new_task_data,
int type,
int has_dependences,
const void *codeptr_ra)
{
if(new_task_data->ptr)
printf("0: new_task_data initially not null\n");
new_task_data->value = ompt_get_unique_id();
char buffer[2048];
format_task_type(type, buffer);
printf(
"%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_task_create: parent_task_id=%" PRIu64
", parent_task_frame.exit=%p, parent_task_frame.reenter=%p, "
"new_task_id=%" PRIu64
", codeptr_ra=%p, task_type=%s=%d, has_dependences=%s\n",
ompt_get_thread_data()->value,
encountering_task_data ? encountering_task_data->value : 0,
encountering_task_frame ? encountering_task_frame->exit_frame.ptr : NULL,
encountering_task_frame ? encountering_task_frame->enter_frame.ptr : NULL,
new_task_data->value, codeptr_ra, buffer, type,
has_dependences ? "yes" : "no");
}
static void
on_ompt_callback_task_schedule(
ompt_data_t *first_task_data,
ompt_task_status_t prior_task_status,
ompt_data_t *second_task_data)
{
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_task_schedule: first_task_id=%" PRIu64
", second_task_id=%" PRIu64 ", prior_task_status=%s=%d\n",
ompt_get_thread_data()->value, first_task_data->value,
(second_task_data ? second_task_data->value : -1),
ompt_task_status_t_values[prior_task_status], prior_task_status);
if (prior_task_status == ompt_task_complete ||
prior_task_status == ompt_task_late_fulfill ||
prior_task_status == ompt_taskwait_complete) {
printf("%" PRIu64 ":" _TOOL_PREFIX " ompt_event_task_end: task_id=%" PRIu64
"\n", ompt_get_thread_data()->value, first_task_data->value);
}
}
static void
on_ompt_callback_dependences(
ompt_data_t *task_data,
const ompt_dependence_t *deps,
int ndeps)
{
char buffer[2048];
char *progress = buffer;
for (int i = 0; i < ndeps && progress < buffer + 2000; i++) {
if (deps[i].dependence_type == ompt_dependence_type_source ||
deps[i].dependence_type == ompt_dependence_type_sink)
progress +=
sprintf(progress, "(%" PRIu64 ", %s), ", deps[i].variable.value,
ompt_dependence_type_t_values[deps[i].dependence_type]);
else
progress +=
sprintf(progress, "(%p, %s), ", deps[i].variable.ptr,
ompt_dependence_type_t_values[deps[i].dependence_type]);
}
if (ndeps > 0)
progress[-2] = 0;
printf("%" PRIu64 ":" _TOOL_PREFIX " ompt_event_dependences: task_id=%" PRIu64
", deps=[%s], ndeps=%d\n",
ompt_get_thread_data()->value, task_data->value, buffer, ndeps);
}
static void
on_ompt_callback_task_dependence(
ompt_data_t *first_task_data,
ompt_data_t *second_task_data)
{
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_task_dependence_pair: first_task_id=%" PRIu64
", second_task_id=%" PRIu64 "\n",
ompt_get_thread_data()->value, first_task_data->value,
second_task_data->value);
}
static void
on_ompt_callback_thread_begin(
ompt_thread_t thread_type,
ompt_data_t *thread_data)
{
if(thread_data->ptr)
printf("%s\n", "0: thread_data initially not null");
thread_data->value = ompt_get_unique_id();
printf("%" PRIu64 ":" _TOOL_PREFIX
" ompt_event_thread_begin: thread_type=%s=%d, thread_id=%" PRIu64 "\n",
ompt_get_thread_data()->value, ompt_thread_t_values[thread_type],
thread_type, thread_data->value);
}
static void
on_ompt_callback_thread_end(
ompt_data_t *thread_data)
{
printf("%" PRIu64 ":" _TOOL_PREFIX " ompt_event_thread_end: thread_id=%" PRIu64
"\n",
ompt_get_thread_data()->value, thread_data->value);
}
static int
on_ompt_callback_control_tool(
uint64_t command,
uint64_t modifier,
void *arg,
const void *codeptr_ra)
{
ompt_frame_t* omptTaskFrame;
ompt_get_task_info(0, NULL, (ompt_data_t**) NULL, &omptTaskFrame, NULL, NULL);
printf("%" PRIu64 ":" _TOOL_PREFIX " ompt_event_control_tool: command=%" PRIu64
", modifier=%" PRIu64
", arg=%p, codeptr_ra=%p, current_task_frame.exit=%p, "
"current_task_frame.reenter=%p \n",
ompt_get_thread_data()->value, command, modifier, arg, codeptr_ra,
omptTaskFrame->exit_frame.ptr, omptTaskFrame->enter_frame.ptr);
// the following would interfere with expected output for OMPT tests, so skip
#ifndef _OMPT_TESTS
// print task data
int task_level = 0;
ompt_data_t *task_data;
while (ompt_get_task_info(task_level, NULL, (ompt_data_t **)&task_data, NULL,
NULL, NULL)) {
printf("%" PRIu64 ":" _TOOL_PREFIX " task level %d: task_id=%" PRIu64 "\n",
ompt_get_thread_data()->value, task_level, task_data->value);
task_level++;
}
// print parallel data
int parallel_level = 0;
ompt_data_t *parallel_data;
while (ompt_get_parallel_info(parallel_level, (ompt_data_t **)¶llel_data,
NULL)) {
printf("%" PRIu64 ":" _TOOL_PREFIX " parallel level %d: parallel_id=%" PRIu64
"\n",
ompt_get_thread_data()->value, parallel_level, parallel_data->value);
parallel_level++;
}
#endif
return 0; //success
}
static void on_ompt_callback_error(ompt_severity_t severity,
const char *message, size_t length,
const void *codeptr_ra) {
printf("%" PRIu64 ": ompt_event_runtime_error: severity=%" PRIu32
", message=%s, length=%" PRIu64 ", codeptr_ra=%p\n",
ompt_get_thread_data()->value, severity, message, (uint64_t)length,
codeptr_ra);
}
int ompt_initialize(
ompt_function_lookup_t lookup,
int initial_device_num,
ompt_data_t *tool_data)
{
ompt_set_callback = (ompt_set_callback_t) lookup("ompt_set_callback");
ompt_get_callback = (ompt_get_callback_t) lookup("ompt_get_callback");
ompt_get_state = (ompt_get_state_t) lookup("ompt_get_state");
ompt_get_task_info = (ompt_get_task_info_t) lookup("ompt_get_task_info");
ompt_get_task_memory = (ompt_get_task_memory_t)lookup("ompt_get_task_memory");
ompt_get_thread_data = (ompt_get_thread_data_t) lookup("ompt_get_thread_data");
ompt_get_parallel_info = (ompt_get_parallel_info_t) lookup("ompt_get_parallel_info");
ompt_get_unique_id = (ompt_get_unique_id_t) lookup("ompt_get_unique_id");
ompt_finalize_tool = (ompt_finalize_tool_t)lookup("ompt_finalize_tool");
ompt_get_unique_id();
ompt_get_num_procs = (ompt_get_num_procs_t) lookup("ompt_get_num_procs");
ompt_get_num_places = (ompt_get_num_places_t) lookup("ompt_get_num_places");
ompt_get_place_proc_ids = (ompt_get_place_proc_ids_t) lookup("ompt_get_place_proc_ids");
ompt_get_place_num = (ompt_get_place_num_t) lookup("ompt_get_place_num");
ompt_get_partition_place_nums = (ompt_get_partition_place_nums_t) lookup("ompt_get_partition_place_nums");
ompt_get_proc_id = (ompt_get_proc_id_t) lookup("ompt_get_proc_id");
ompt_enumerate_states = (ompt_enumerate_states_t) lookup("ompt_enumerate_states");
ompt_enumerate_mutex_impls = (ompt_enumerate_mutex_impls_t) lookup("ompt_enumerate_mutex_impls");
register_ompt_callback(ompt_callback_mutex_acquire);
register_ompt_callback_t(ompt_callback_mutex_acquired, ompt_callback_mutex_t);
register_ompt_callback_t(ompt_callback_mutex_released, ompt_callback_mutex_t);
register_ompt_callback(ompt_callback_nest_lock);
register_ompt_callback(ompt_callback_sync_region);
register_ompt_callback_t(ompt_callback_sync_region_wait, ompt_callback_sync_region_t);
register_ompt_callback_t(ompt_callback_reduction, ompt_callback_sync_region_t);
register_ompt_callback(ompt_callback_control_tool);
register_ompt_callback(ompt_callback_flush);
register_ompt_callback(ompt_callback_cancel);
register_ompt_callback(ompt_callback_implicit_task);
register_ompt_callback_t(ompt_callback_lock_init, ompt_callback_mutex_acquire_t);
register_ompt_callback_t(ompt_callback_lock_destroy, ompt_callback_mutex_t);
register_ompt_callback(ompt_callback_work);
register_ompt_callback(ompt_callback_masked);
register_ompt_callback(ompt_callback_parallel_begin);
register_ompt_callback(ompt_callback_parallel_end);
register_ompt_callback(ompt_callback_task_create);
register_ompt_callback(ompt_callback_task_schedule);
register_ompt_callback(ompt_callback_dependences);
register_ompt_callback(ompt_callback_task_dependence);
register_ompt_callback(ompt_callback_thread_begin);
register_ompt_callback(ompt_callback_thread_end);
register_ompt_callback(ompt_callback_error);
printf("0: NULL_POINTER=%p\n", (void*)NULL);
return 1; //success
}
void ompt_finalize(ompt_data_t *tool_data)
{
printf("0: ompt_event_runtime_shutdown\n");
}
#ifdef __cplusplus
extern "C" {
#endif
ompt_start_tool_result_t* ompt_start_tool(
unsigned int omp_version,
const char *runtime_version)
{
static ompt_start_tool_result_t ompt_start_tool_result = {&ompt_initialize,&ompt_finalize, 0};
return &ompt_start_tool_result;
}
#ifdef __cplusplus
}
#endif
#endif // ifndef USE_PRIVATE_TOOL
#ifdef _OMPT_TESTS
#undef _OMPT_TESTS
#endif
|
3d7pt.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 8;
tile_size[1] = 8;
tile_size[2] = 32;
tile_size[3] = 1024;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,4);t1++) {
lbp=max(ceild(t1,2),ceild(8*t1-Nt+3,8));
ubp=min(floord(Nt+Nz-4,8),floord(4*t1+Nz+1,8));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(t1-7,8)),ceild(8*t2-Nz-28,32));t3<=min(min(min(floord(Nt+Ny-4,32),floord(4*t1+Ny+5,32)),floord(8*t2+Ny+4,32)),floord(8*t1-8*t2+Nz+Ny+3,32));t3++) {
for (t4=max(max(max(0,ceild(t1-255,256)),ceild(8*t2-Nz-1020,1024)),ceild(32*t3-Ny-1020,1024));t4<=min(min(min(min(floord(Nt+Nx-4,1024),floord(4*t1+Nx+5,1024)),floord(8*t2+Nx+4,1024)),floord(32*t3+Nx+28,1024)),floord(8*t1-8*t2+Nz+Nx+3,1024));t4++) {
for (t5=max(max(max(max(max(0,4*t1),8*t1-8*t2+1),8*t2-Nz+2),32*t3-Ny+2),1024*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,4*t1+7),8*t2+6),32*t3+30),1024*t4+1022),8*t1-8*t2+Nz+5);t5++) {
for (t6=max(max(8*t2,t5+1),-8*t1+8*t2+2*t5-7);t6<=min(min(8*t2+7,-8*t1+8*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(32*t3,t5+1);t7<=min(32*t3+31,t5+Ny-2);t7++) {
lbv=max(1024*t4,t5+1);
ubv=min(1024*t4+1023,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
gravity.c | #include <string.h>
#include <mpi.h>
#include <fastpm/libfastpm.h>
#include <fastpm/prof.h>
#include <fastpm/transfer.h>
#include <fastpm/logging.h>
#include "pmpfft.h"
#include "pmghosts.h"
static void
apply_pot_transfer(PM * pm, FastPMFloat * from, FastPMFloat * to, int order)
{
fastpm_apply_laplace_transfer(pm, from, to, order);
fastpm_apply_multiply_transfer(pm, to, to, -1);
}
static void
apply_grad_transfer(PM * pm, FastPMFloat * from, FastPMFloat * to, int dir, int order)
{
/* no need to print these, since we will check for FFT fields with pm_check_values.*/
if(0) {
PMKIter kiter;
pm_kiter_init(pm, &kiter);
float ** klist[2] = {kiter.k, kiter.k_finite};
int i;
for(i = 0; i < pm_nmesh(pm)[dir]; i ++) {
double k_finite = klist[order][dir][i];
fastpm_info("fourier space kernel[%d] = %g\n", i, k_finite);
}
}
#pragma omp parallel
{
PMKIter kiter;
ptrdiff_t * Nmesh = pm_nmesh(pm);
float ** klist[2] = {kiter.k, kiter.k_finite};
pm_kiter_init(pm, &kiter);
for(;
!pm_kiter_stop(&kiter);
pm_kiter_next(&kiter)) {
double k_finite;
k_finite = klist[order][dir][kiter.iabs[dir]];
ptrdiff_t ind = kiter.ind;
/* i k[d] */
/* Watch out the data dependency */
if(
kiter.iabs[0] == (Nmesh[0] - kiter.iabs[0]) % Nmesh[0] &&
kiter.iabs[1] == (Nmesh[1] - kiter.iabs[1]) % Nmesh[1] &&
kiter.iabs[2] == (Nmesh[2] - kiter.iabs[2]) % Nmesh[2]
) {
/* We are at the nyquist and the diff operator shall be zero;
* otherwise the force is not real! */
to[kiter.ind + 0] = 0;
to[kiter.ind + 1] = 0;
} else {
FastPMFloat tmp = from[ind + 0] * (k_finite);
to[ind + 0] = - from[ind + 1] * (k_finite);
to[ind + 1] = tmp;
}
}
}
}
static void
apply_gaussian_softening(PM * pm, FastPMFloat * from, FastPMFloat * to, double N)
{
/* N is rms in mesh size */
double r0 = N * pm->BoxSize[0] / pm->Nmesh[0];
#pragma omp parallel
{
PMKIter kiter;
int d;
int i;
double *kernel[3];
pm_kiter_init(pm, &kiter);
for(d = 0; d < 3; d ++) {
kernel[d] = malloc(sizeof(double) * pm->Nmesh[d]);
for(i = 0; i < pm->Nmesh[d]; i ++) {
kernel[d][i] = exp(- 0.5 * pow(kiter.k[d][i] * r0, 2));
}
}
for(;
!pm_kiter_stop(&kiter);
pm_kiter_next(&kiter)) {
int d;
double fac = 1;
ptrdiff_t ind = kiter.ind;
for(d = 0; d < 3; d++) {
fac *= kernel[d][kiter.iabs[d]];
}
to[ind + 0] *= fac;
to[ind + 1] *= fac;
}
for(d = 0; d < 3; d ++) {
free(kernel[d]);
}
}
}
static double
gaussian36(double k, double * knq)
{
double x = k / *knq;
return exp(- 36 * pow(x, 36));
}
void
fastpm_kernel_type_get_orders(FastPMKernelType type,
int *potorder,
int *gradorder,
int *deconvolveorder)
{
switch(type) {
case FASTPM_KERNEL_EASTWOOD:
*potorder = 0;
*gradorder = 0;
/* now sharpen for mass assignment */
/* L1 and L2*/
*deconvolveorder = 2;
break;
case FASTPM_KERNEL_NAIVE:
*potorder = 0;
*gradorder = 0;
*deconvolveorder = 0;
break;
case FASTPM_KERNEL_GADGET:
*potorder = 0;
*gradorder = 1;
*deconvolveorder = 2;
break;
case FASTPM_KERNEL_1_4:
*potorder = 0;
*gradorder = 1;
*deconvolveorder = 0;
break;
case FASTPM_KERNEL_3_4:
*potorder = 1;
*gradorder = 1;
*deconvolveorder = 0;
break;
case FASTPM_KERNEL_5_4:
*potorder = 2;
*gradorder = 1;
*deconvolveorder = 0;
break;
case FASTPM_KERNEL_3_2:
*potorder = 1;
*gradorder = 0;
*deconvolveorder = 0;
break;
default:
fastpm_raise(-1, "Wrong kernel type\n");
}
}
void
gravity_apply_kernel_transfer(FastPMKernelType type,
PM * pm,
FastPMFloat * delta_k,
FastPMFloat * canvas, FastPMFieldDescr field)
{
int potorder, gradorder, deconvolveorder;
fastpm_kernel_type_get_orders(type, &potorder, &gradorder, &deconvolveorder);
while(deconvolveorder > 0) {
fastpm_apply_decic_transfer(pm, canvas, canvas);
deconvolveorder--;
}
int d1, d2;
switch(field.attribute) {
case COLUMN_POTENTIAL:
apply_pot_transfer(pm, delta_k, canvas, potorder);
break;
case COLUMN_DENSITY:
fastpm_apply_multiply_transfer(pm, delta_k, canvas, 1.0);
break;
case COLUMN_TIDAL:
switch(field.memb) {
case 0:
d1 = 0; d2 = 0;
apply_pot_transfer(pm, delta_k, canvas, potorder);
apply_grad_transfer(pm, canvas, canvas, d1, gradorder);
apply_grad_transfer(pm, canvas, canvas, d2, gradorder);
break;
case 1:
d1 = 1; d2 = 1;
apply_pot_transfer(pm, delta_k, canvas, potorder);
apply_grad_transfer(pm, canvas, canvas, d1, gradorder);
apply_grad_transfer(pm, canvas, canvas, d2, gradorder);
break;
case 2:
d1 = 2; d2 = 2;
apply_pot_transfer(pm, delta_k, canvas, potorder);
apply_grad_transfer(pm, canvas, canvas, d1, gradorder);
apply_grad_transfer(pm, canvas, canvas, d2, gradorder);
break;
case 3:
d1 = 0; d2 = 1;
apply_pot_transfer(pm, delta_k, canvas, potorder);
apply_grad_transfer(pm, canvas, canvas, d1, gradorder);
apply_grad_transfer(pm, canvas, canvas, d2, gradorder);
break;
case 4:
d1 = 1; d2 = 2;
apply_pot_transfer(pm, delta_k, canvas, potorder);
apply_grad_transfer(pm, canvas, canvas, d1, gradorder);
apply_grad_transfer(pm, canvas, canvas, d2, gradorder);
break;
case 5:
d1 = 2; d2 = 0;
apply_pot_transfer(pm, delta_k, canvas, potorder);
apply_grad_transfer(pm, canvas, canvas, d1, gradorder);
apply_grad_transfer(pm, canvas, canvas, d2, gradorder);
break;
}
break;
case COLUMN_ACC:
apply_pot_transfer(pm, delta_k, canvas, potorder);
apply_grad_transfer(pm, canvas, canvas, field.memb, gradorder);
break;
default:
fastpm_raise(-1, "Unknown type for gravity attribute\n");
}
}
static void
apply_softening_transfer(FastPMSofteningType type, PM * pm, FastPMFloat * from, FastPMFloat * to)
{
switch(type) {
case FASTPM_SOFTENING_TWO_THIRD:
{
double k_nq = M_PI / pm->BoxSize[0] * pm->Nmesh[0];
fastpm_apply_lowpass_transfer(pm, from, to, 2.0 / 3 * k_nq);
}
break;
case FASTPM_SOFTENING_GAUSSIAN:
apply_gaussian_softening(pm, from, to, 1.0);
break;
case FASTPM_SOFTENING_GADGET_LONG_RANGE:
apply_gaussian_softening(pm, from, to, pow(2,0.5)*1.25);
break;
case FASTPM_SOFTENING_GAUSSIAN36:
{
double k_nq = M_PI / pm->BoxSize[0] * pm->Nmesh[0];
fastpm_apply_any_transfer(pm, from, to, (fastpm_fkfunc) gaussian36, &k_nq);
}
break;
case FASTPM_SOFTENING_NONE:
break;
default:
fastpm_raise(-1, "wrong softening kernel type");
}
}
void
_fastpm_solver_create_ghosts(FastPMSolver * fastpm, int support, PMGhostData * pgd[6])
{
PM * pm = fastpm->pm;
CLOCK(ghosts);
int si;
for(si = 0; si < FASTPM_SOLVER_NSPECIES; si++) {
FastPMStore * p = fastpm_solver_get_species(fastpm, si);
if(!p) continue;
pgd[si] = pm_ghosts_create(pm, p, p->attributes, support);
pm_ghosts_send(pgd[si], COLUMN_POS);
pm_ghosts_send(pgd[si], COLUMN_ID);
if(p->mass)
pm_ghosts_send(pgd[si], COLUMN_MASS);
}
LEAVE(ghosts);
}
void
_fastpm_solver_destroy_ghosts(FastPMSolver * fastpm, PMGhostData * pgd[6])
{
int si;
for(si = FASTPM_SOLVER_NSPECIES - 1; si >= 0; si--) {
FastPMStore * p = fastpm_solver_get_species(fastpm, si);
if(!p) continue;
pm_ghosts_free(pgd[si]);
}
}
void
_fastpm_solver_compute_delta_k(FastPMSolver * fastpm, FastPMPainter * painter, PMGhostData * pgd[6], FastPMFloat * canvas, FastPMFloat * delta_k)
{
PM * pm = fastpm->pm;
double total_mass = 0;
FastPMFieldDescr FASTPM_FIELD_DESCR_NONE = {0, 0};
pm_clear(pm, canvas);
/* Watch out: paint paints the mass per cell;
* divide by mean mass per cell to convert to matter overdensity, which
* goes into Poisson's equation.
*
* In this perspective, we are still operating with the dimension-less
* Poisson's equation where the critical density factors canceled
* with gravity constants into 1.5 OmegaM,
* */
CLOCK(paint);
int si;
for(si = 0; si < FASTPM_SOLVER_NSPECIES; si++) {
FastPMStore * p = fastpm_solver_get_species(fastpm, si);
if(!p) continue;
VALGRIND_CHECK_MEM_IS_DEFINED(p->x, sizeof(p->x[0]) * p->np);
VALGRIND_CHECK_MEM_IS_DEFINED(pgd[si]->p->x, sizeof(pgd[si]->p->x[0]) * pgd[si]->p->np);
double total_mass1 = 0;
ptrdiff_t i;
for (i = 0; i < p->np; i ++){
total_mass1 += fastpm_store_get_mass(p, i);
}
total_mass += total_mass1;
fastpm_paint_local(painter, canvas, p, p->np, FASTPM_FIELD_DESCR_NONE);
fastpm_paint_local(painter, canvas, pgd[si]->p, pgd[si]->p->np, FASTPM_FIELD_DESCR_NONE);
}
LEAVE(paint);
MPI_Allreduce(MPI_IN_PLACE, &total_mass, 1, MPI_DOUBLE, MPI_SUM, fastpm->comm);
double mean_mass_per_cell = total_mass / pm->Norm;
CLOCK(transfer);
fastpm_apply_multiply_transfer(pm, canvas, canvas, 1.0 / mean_mass_per_cell);
LEAVE(transfer);
CLOCK(r2c);
pm_check_values(pm, canvas, "After painting");
pm_r2c(pm, canvas, delta_k);
pm_check_values(pm, delta_k, "After r2c");
LEAVE(r2c);
}
void
_fastpm_solver_compute_force(FastPMSolver * fastpm,
FastPMPainter * reader,
FastPMKernelType kernel,
PMGhostData * pgd[6],
FastPMFloat * canvas,
FastPMFloat * delta_k, FastPMFieldDescr * ACC, int nacc)
{
int d;
PM * pm = fastpm->pm;
CLOCK(transfer);
CLOCK(c2r);
CLOCK(readout);
CLOCK(reduce);
for(d = 0; d < nacc; d ++) {
ENTER(transfer);
gravity_apply_kernel_transfer(kernel, pm, delta_k, canvas, ACC[d]);
LEAVE(transfer);
ENTER(c2r);
pm_check_values(pm, canvas, "Before c2r %d", d);
pm_c2r(pm, canvas);
pm_check_values(pm, canvas, "After c2r %d", d);
LEAVE(c2r);
ENTER(readout);
int si;
for(si = 0; si < FASTPM_SOLVER_NSPECIES; si ++) {
FastPMStore * p = fastpm_solver_get_species(fastpm, si);
if(!p) continue;
fastpm_readout_local(reader, canvas, p, p->np, ACC[d]);
fastpm_readout_local(reader, canvas, pgd[si]->p, pgd[si]->p->np, ACC[d]);
}
LEAVE(readout);
}
int si;
for(si = 0; si < FASTPM_SOLVER_NSPECIES; si ++) {
FastPMStore * p = fastpm_solver_get_species(fastpm, si);
if(!p) continue;
double acc_std[3], acc_mean[3], acc_min[3], acc_max[3];
fastpm_store_summary(p, COLUMN_ACC, pm_comm(pm), "<s->", acc_min, acc_std, acc_mean, acc_max);
for(d = 0; d < 3; d ++) {
fastpm_info("p%s acc[%d]: %g %g %g %g\n",
p->name, d, acc_min[d], acc_std[d], acc_mean[d], acc_max[d]);
}
fastpm_store_summary(pgd[si]->p, COLUMN_ACC, pm_comm(pm), "<s->", acc_min, acc_std, acc_mean, acc_max);
for(d = 0; d < 3; d ++) {
fastpm_info("ghost acc[%d]: %g %g %g %g\n",
d, acc_min[d], acc_std[d], acc_mean[d], acc_max[d]);
}
fastpm_store_summary(p, COLUMN_ACC, pm_comm(pm), "<s->", acc_min, acc_std, acc_mean, acc_max);
for(d = 0; d < 3; d ++) {
fastpm_info("p%s+g acc[%d]: %g %g %g %g\n",
p->name, d, acc_min[d], acc_std[d], acc_mean[d], acc_max[d]);
}
ENTER(reduce);
pm_ghosts_reduce(pgd[si], COLUMN_ACC, FastPMReduceAddFloat, NULL);
if(p->potential != NULL) {
pm_ghosts_reduce(pgd[si], COLUMN_POTENTIAL, FastPMReduceAddFloat, NULL);
}
LEAVE(reduce);
}
}
void
fastpm_solver_compute_force(FastPMSolver * fastpm,
FastPMPainter * painter,
FastPMSofteningType dealias,
FastPMKernelType kernel,
FastPMFloat * delta_k)
{
PM * pm = fastpm->pm;
PMGhostData * pgd[FASTPM_SOLVER_NSPECIES];
FastPMFloat * canvas = pm_alloc(pm);
_fastpm_solver_create_ghosts(fastpm, painter->support, pgd);
_fastpm_solver_compute_delta_k(fastpm, painter, pgd, canvas, delta_k);
CLOCK(dealias);
/* calculate the forces save them to p->acc */
apply_softening_transfer(dealias, pm, delta_k, delta_k);
pm_check_values(pm, delta_k, "After softening");
LEAVE(dealias);
FastPMFieldDescr ACC[] = {
{COLUMN_ACC, 0},
{COLUMN_ACC, 1},
{COLUMN_ACC, 2},
{COLUMN_POTENTIAL, 0}
};
int nacc = 4;
/* skip potential if not wanted */
if(NULL == fastpm_solver_get_species(fastpm, FASTPM_SPECIES_CDM)->potential) {
nacc = 3;
}
_fastpm_solver_compute_force(fastpm, painter, kernel, pgd, canvas, delta_k, ACC, nacc);
_fastpm_solver_destroy_ghosts(fastpm, pgd);
pm_free(pm, canvas);
}
|
mttkrp.c |
/******************************************************************************
* INCLUDES
*****************************************************************************/
#include "base.h"
#include "mttkrp.h"
#include "thd_info.h"
#include "tile.h"
#include "util.h"
#include "mutex_pool.h"
matrix_t p_schedule_tiles_malloc1;
mutex_pool mutex_pool1 = { .num_locks = SPLATT_DEFAULT_NLOCKS, .pad_size = SPLATT_DEFAULT_LOCK_PAD, .locks = NULL};
mutex_pool mutex_pool2 = { .num_locks = SPLATT_DEFAULT_NLOCKS, .pad_size = SPLATT_DEFAULT_LOCK_PAD, .locks = NULL};
/* XXX: this is a memory leak until cpd_ws is added/freed. */
static mutex_pool * pool = NULL;
/**
* @brief Function pointer that performs MTTKRP on a tile of a CSF tree.
*
* @param ct The CSF tensor.
* @param tile_id The tile to process.
* @param mats The matrices.
* @param mode The output mode.
* @param thds Thread structures.
* @param partition A partitioning of the slices in the tensor, to distribute
* to threads. Use the thread ID to decide which slices to
* process. This may be NULL, in that case simply process all
* slices.
*/
typedef void (* csf_mttkrp_func)(
splatt_csf const * const ct,
idx_t const tile_id,
matrix_t ** mats,
idx_t const mode,
thd_info * const thds,
idx_t const * const partition);
/******************************************************************************
* PRIVATE FUNCTIONS
*****************************************************************************/
/**
* @brief Perform a reduction on thread-local MTTKRP outputs.
*
* @param ws MTTKRP workspace containing thread-local outputs.
* @param global_output The global MTTKRP output we are reducing into.
* @param nrows The number of rows in the MTTKRP.
* @param ncols The number of columns in the MTTKRP.
*/
static void p_reduce_privatized(
splatt_mttkrp_ws * const ws,
val_t * const restrict global_output,
idx_t const nrows,
idx_t const ncols)
{
/* Ensure everyone has completed their local MTTKRP. */
// #pragma omp barrier
sp_timer_t reduction_timer;
timer_fstart(&reduction_timer);
int const tid = 0; //splatt_omp_get_thread_num();
idx_t const num_threads = 1; //splatt_omp_get_num_threads();
idx_t const elem_per_thread = (nrows * ncols) / num_threads;
idx_t const start = tid * elem_per_thread;
idx_t const stop = ((idx_t)tid == num_threads-1) ?
(nrows * ncols) : (tid + 1) * elem_per_thread;
/* reduction */
for(idx_t t=0; t < num_threads; ++t){
val_t const * const restrict thread_buf = ws->privatize_buffer[t];
for(idx_t x=start; x < stop; ++x) {
global_output[x] += thread_buf[x];
}
}
timer_stop(&reduction_timer);
// #pragma omp master
ws->reduction_time = reduction_timer.seconds;
}
/**
* @brief Map MTTKRP functions onto a (possibly tiled) CSF tensor. This function
* will handle any scheduling required with a partially tiled tensor.
*
* @param tensors An array of CSF representations. tensors[csf_id] is processed.
* @param csf_id Which tensor are we processing?
* @param atomic_func An MTTKRP function which atomically updates the output.
* @param nosync_func An MTTKRP function which does not atomically update.
* @param mats The matrices, with the output stored in mats[MAX_NMODES].
* @param mode Which mode of 'tensors' is the output (not CSF depth).
* @param thds Thread structures.
* @param ws MTTKRP workspace.
*/
static void p_schedule_tiles(
splatt_csf const * const tensors,
idx_t const csf_id,
csf_mttkrp_func atomic_func,
csf_mttkrp_func nosync_func,
matrix_t ** mats,
idx_t const mode,
thd_info * const thds,
splatt_mttkrp_ws * const ws)
{
splatt_csf const * const csf = &(tensors[csf_id]);
idx_t const nmodes = csf->nmodes;
idx_t const depth = nmodes - 1;
idx_t const nrows = mats[mode]->I;
idx_t const ncols = mats[mode]->J;
/* Store old pointer */
val_t * const restrict global_output = mats[MAX_NMODES]->vals;
// #pragma omp parallel
{
int const tid = 0; //splatt_omp_get_thread_num();
timer_start(&thds[tid].ttime);
idx_t const * const tile_partition = ws->tile_partition[csf_id];
idx_t const * const tree_partition = ws->tree_partition[csf_id];
/*
* We may need to edit mats[MAX_NMODES]->vals, so create a private copy of
* the pointers to edit. (NOT actual factors).
*/
matrix_t * mats_priv[MAX_NMODES+1];
for(idx_t m=0; m < MAX_NMODES; ++m) {
mats_priv[m] = mats[m];
}
/* each thread gets separate structure, but do a shallow copy */
mats_priv[MAX_NMODES] = &p_schedule_tiles_malloc1;//splatt_malloc(sizeof(**mats_priv));
*(mats_priv[MAX_NMODES]) = *(mats[MAX_NMODES]);
/* Give each thread its own private buffer and overwrite atomic
* function. */
if(ws->is_privatized[mode]) {
/* change (thread-private!) output structure */
memset(ws->privatize_buffer[tid], 0,
nrows * ncols * sizeof(**(ws->privatize_buffer)));
mats_priv[MAX_NMODES]->vals = ws->privatize_buffer[tid];
/* Don't use atomics if we privatized. */
atomic_func = nosync_func;
}
/*
* Distribute tiles to threads in some fashion.
*/
if(csf->ntiles > 1) {
/* We parallelize across tiles, and thus should not distribute within a
* tree. This may change if we instead 'split' tiles across a few
* threads. */
assert(tree_partition == NULL);
/* mode is actually tiled -- avoid synchronization */
if(csf->tile_dims[mode] > 1) {
idx_t tile_id = 0;
/* foreach layer of tiles */
// #pragma omp for schedule(dynamic, 1) nowait
for(idx_t t=0; t < csf->tile_dims[mode]; ++t) {
tile_id =
get_next_tileid(TILE_BEGIN, csf->tile_dims, nmodes, mode, t);
while(tile_id != TILE_END) {
nosync_func(csf, tile_id, mats_priv, mode, thds, tree_partition);
tile_id =
get_next_tileid(tile_id, csf->tile_dims, nmodes, mode, t);
}
}
/* tiled, but not this mode. Atomics are still necessary. */
} else {
for(idx_t tile_id = tile_partition[tid];
tile_id < tile_partition[tid+1]; ++tile_id) {
atomic_func(csf, tile_id, mats_priv, mode, thds, tree_partition);
}
}
/*
* Untiled, parallelize within kernel.
*/
} else {
assert(tree_partition != NULL);
atomic_func(csf, 0, mats_priv, mode, thds, tree_partition);
}
timer_stop(&thds[tid].ttime);
/* If we used privatization, perform a reduction. */
if(ws->is_privatized[mode]) {
p_reduce_privatized(ws, global_output, nrows, ncols);
}
//splatt_free(mats_priv[MAX_NMODES]);
} /* end omp parallel */
/* restore pointer */
mats[MAX_NMODES]->vals = global_output;
}
/**
* @brief Should a certain mode should be privatized to avoid locks?
*
* @param csf The tensor (just used for dimensions).
* @param mode The mode we are processing.
* @param opts Options, storing the # threads and the threshold.
*
* @return true, if we should privatize.
*/
static bool p_is_privatized(
splatt_csf const * const csf,
idx_t const mode,
double const * const opts)
{
idx_t const length = csf->dims[mode];
idx_t const nthreads = (idx_t) opts[SPLATT_OPTION_NTHREADS];
double const thresh = opts[SPLATT_OPTION_PRIVTHRESH];
/* don't bother if it is not multithreaded. */
if(nthreads == 1) {
return false;
}
return (double)(length * nthreads) <= (thresh * (double)csf->nnz);
}
static inline void p_add_hada_clear(
val_t * const restrict out,
val_t * const restrict a,
val_t const * const restrict b,
idx_t const nfactors)
{
for(idx_t f=0; f < nfactors; ++f) {
out[f] += a[f] * b[f];
a[f] = 0;
}
}
static inline void p_assign_hada(
val_t * const restrict out,
val_t const * const restrict a,
val_t const * const restrict b,
idx_t const nfactors)
{
for(idx_t f=0; f < nfactors; ++f) {
out[f] = a[f] * b[f];
}
}
static inline void p_csf_process_fiber_locked(
val_t * const leafmat,
val_t const * const restrict accumbuf,
idx_t const nfactors,
idx_t const start,
idx_t const end,
idx_t const * const restrict inds,
val_t const * const restrict vals)
{
for(idx_t jj=start; jj < end; ++jj) {
val_t * const restrict leafrow = leafmat + (inds[jj] * nfactors);
val_t const v = vals[jj];
//mutex_set_lock(pool, inds[jj]);
for(idx_t f=0; f < nfactors; ++f) {
leafrow[f] += v * accumbuf[f];
}
//mutex_unset_lock(pool, inds[jj]);
}
}
static inline void p_csf_process_fiber_nolock(
val_t * const leafmat,
val_t const * const restrict accumbuf,
idx_t const nfactors,
idx_t const start,
idx_t const end,
idx_t const * const restrict inds,
val_t const * const restrict vals)
{
for(idx_t jj=start; jj < end; ++jj) {
val_t * const restrict leafrow = leafmat + (inds[jj] * nfactors);
val_t const v = vals[jj];
for(idx_t f=0; f < nfactors; ++f) {
leafrow[f] += v * accumbuf[f];
}
}
}
static inline void p_csf_process_fiber(
val_t * const restrict accumbuf,
idx_t const nfactors,
val_t const * const leafmat,
idx_t const start,
idx_t const end,
idx_t const * const inds,
val_t const * const vals)
{
/* foreach nnz in fiber */
for(idx_t j=start; j < end; ++j) {
val_t const v = vals[j] ;
val_t const * const restrict row = leafmat + (nfactors * inds[j]);
for(idx_t f=0; f < nfactors; ++f) {
accumbuf[f] += v * row[f];
}
}
}
static inline void p_propagate_up(
val_t * const out,
val_t * const * const buf,
idx_t * const restrict idxstack,
idx_t const init_depth,
idx_t const init_idx,
idx_t const * const * const fp,
idx_t const * const * const fids,
val_t const * const restrict vals,
val_t ** mvals,
idx_t const nmodes,
idx_t const nfactors)
{
/* push initial idx initialize idxstack */
idxstack[init_depth] = init_idx;
for(idx_t m=init_depth+1; m < nmodes; ++m) {
idxstack[m] = fp[m-1][idxstack[m-1]];
}
assert(init_depth < nmodes-1);
/* clear out accumulation buffer */
for(idx_t f=0; f < nfactors; ++f) {
buf[init_depth+1][f] = 0;
}
while(idxstack[init_depth+1] < fp[init_depth][init_idx+1]) {
/* skip to last internal mode */
idx_t depth = nmodes - 2;
/* process all nonzeros [start, end) into buf[depth]*/
idx_t const start = fp[depth][idxstack[depth]];
idx_t const end = fp[depth][idxstack[depth]+1];
p_csf_process_fiber(buf[depth+1], nfactors, mvals[depth+1],
start, end, fids[depth+1], vals);
idxstack[depth+1] = end;
/* exit early if there is no propagation to do... */
if(init_depth == nmodes-2) {
for(idx_t f=0; f < nfactors; ++f) {
out[f] = buf[depth+1][f];
}
return;
}
/* Propagate up until we reach a node with more children to process */
do {
/* propagate result up and clear buffer for next sibling */
val_t const * const restrict fibrow
= mvals[depth] + (fids[depth][idxstack[depth]] * nfactors);
p_add_hada_clear(buf[depth], buf[depth+1], fibrow, nfactors);
++idxstack[depth];
--depth;
} while(depth > init_depth &&
idxstack[depth+1] == fp[depth][idxstack[depth]+1]);
} /* end DFS */
/* copy to out */
for(idx_t f=0; f < nfactors; ++f) {
out[f] = buf[init_depth+1][f];
}
}
static void p_csf_mttkrp_root3_nolock(
splatt_csf const * const ct,
idx_t const tile_id,
matrix_t ** mats,
idx_t const mode,
thd_info * const thds,
idx_t const * const restrict partition)
{
assert(ct->nmodes == 3);
val_t const * const vals = ct->pt[tile_id].vals;
idx_t const * const restrict sptr = ct->pt[tile_id].fptr[0];
idx_t const * const restrict fptr = ct->pt[tile_id].fptr[1];
idx_t const * const restrict sids = ct->pt[tile_id].fids[0];
idx_t const * const restrict fids = ct->pt[tile_id].fids[1];
idx_t const * const restrict inds = ct->pt[tile_id].fids[2];
val_t const * const avals = mats[csf_depth_to_mode(ct, 1)]->vals;
val_t const * const bvals = mats[csf_depth_to_mode(ct, 2)]->vals;
val_t * const ovals = mats[MAX_NMODES]->vals;
idx_t const nfactors = mats[MAX_NMODES]->J;
int const tid = 0; //splatt_omp_get_thread_num();
val_t * const restrict accumF = (val_t *) thds[tid].scratch[0];
/* write to output */
val_t * const restrict writeF = (val_t *) thds[tid].scratch[2];
for(idx_t r=0; r < nfactors; ++r) {
writeF[r] = 0.;
}
/* break up loop by partition */
idx_t const nslices = ct->pt[tile_id].nfibs[0];
idx_t const start = (partition != NULL) ? partition[tid] : 0;
idx_t const stop = (partition != NULL) ? partition[tid+1] : nslices;
for(idx_t s=start; s < stop; ++s) {
idx_t const fid = (sids == NULL) ? s : sids[s];
val_t * const restrict mv = ovals + (fid * nfactors);
/* foreach fiber in slice */
for(idx_t f=sptr[s]; f < sptr[s+1]; ++f) {
/* first entry of the fiber is used to initialize accumF */
idx_t const jjfirst = fptr[f];
val_t const vfirst = vals[jjfirst];
val_t const * const restrict bv = bvals + (inds[jjfirst] * nfactors);
for(idx_t r=0; r < nfactors; ++r) {
accumF[r] = vfirst * bv[r];
}
/* foreach nnz in fiber */
for(idx_t jj=fptr[f]+1; jj < fptr[f+1]; ++jj) {
val_t const v = vals[jj];
val_t const * const restrict bv = bvals + (inds[jj] * nfactors);
for(idx_t r=0; r < nfactors; ++r) {
accumF[r] += v * bv[r];
}
}
/* scale inner products by row of A and update to M */
val_t const * const restrict av = avals + (fids[f] * nfactors);
for(idx_t r=0; r < nfactors; ++r) {
writeF[r] += accumF[r] * av[r];
}
} /* foreach fiber */
/* flush to output */
for(idx_t r=0; r < nfactors; ++r) {
mv[r] += writeF[r];
writeF[r] = 0.;
}
} /* foreach slice (tree) */
}
static void p_csf_mttkrp_root3_locked(
splatt_csf const * const ct,
idx_t const tile_id,
matrix_t ** mats,
idx_t const mode,
thd_info * const thds,
idx_t const * const restrict partition)
{
assert(ct->nmodes == 3);
val_t const * const vals = ct->pt[tile_id].vals;
idx_t const * const restrict sptr = ct->pt[tile_id].fptr[0];
idx_t const * const restrict fptr = ct->pt[tile_id].fptr[1];
idx_t const * const restrict sids = ct->pt[tile_id].fids[0];
idx_t const * const restrict fids = ct->pt[tile_id].fids[1];
idx_t const * const restrict inds = ct->pt[tile_id].fids[2];
val_t const * const avals = mats[csf_depth_to_mode(ct, 1)]->vals;
val_t const * const bvals = mats[csf_depth_to_mode(ct, 2)]->vals;
val_t * const ovals = mats[MAX_NMODES]->vals;
idx_t const nfactors = mats[MAX_NMODES]->J;
int const tid = 0; //splatt_omp_get_thread_num();
val_t * const restrict accumF = (val_t *) thds[tid].scratch[0];
/* write to output */
val_t * const restrict writeF = (val_t *) thds[tid].scratch[2];
for(idx_t r=0; r < nfactors; ++r) {
writeF[r] = 0.;
}
idx_t const nslices = ct->pt[tile_id].nfibs[0];
idx_t const start = (partition != NULL) ? partition[tid] : 0;
idx_t const stop = (partition != NULL) ? partition[tid+1] : nslices;
for(idx_t s=start; s < stop; ++s) {
/* foreach fiber in slice */
for(idx_t f=sptr[s]; f < sptr[s+1]; ++f) {
/* first entry of the fiber is used to initialize accumF */
idx_t const jjfirst = fptr[f];
val_t const vfirst = vals[jjfirst];
val_t const * const restrict bv = bvals + (inds[jjfirst] * nfactors);
for(idx_t r=0; r < nfactors; ++r) {
accumF[r] = vfirst * bv[r];
}
/* foreach nnz in fiber */
for(idx_t jj=fptr[f]+1; jj < fptr[f+1]; ++jj) {
val_t const v = vals[jj];
val_t const * const restrict bv = bvals + (inds[jj] * nfactors);
for(idx_t r=0; r < nfactors; ++r) {
accumF[r] += v * bv[r];
}
}
/* scale inner products by row of A and update to M */
val_t const * const restrict av = avals + (fids[f] * nfactors);
for(idx_t r=0; r < nfactors; ++r) {
writeF[r] += accumF[r] * av[r];
}
}
idx_t const fid = (sids == NULL) ? s : sids[s];
val_t * const restrict mv = ovals + (fid * nfactors);
/* flush to output */
//mutex_set_lock(pool, fid);
for(idx_t r=0; r < nfactors; ++r) {
mv[r] += writeF[r];
writeF[r] = 0.;
}
// mutex_unset_lock(pool, fid);
}
}
static void p_csf_mttkrp_intl3_locked(
splatt_csf const * const ct,
idx_t const tile_id,
matrix_t ** mats,
idx_t const mode,
thd_info * const thds,
idx_t const * const restrict partition)
{
assert(ct->nmodes == 3);
val_t const * const vals = ct->pt[tile_id].vals;
idx_t const * const restrict sptr = ct->pt[tile_id].fptr[0];
idx_t const * const restrict fptr = ct->pt[tile_id].fptr[1];
idx_t const * const restrict sids = ct->pt[tile_id].fids[0];
idx_t const * const restrict fids = ct->pt[tile_id].fids[1];
idx_t const * const restrict inds = ct->pt[tile_id].fids[2];
val_t const * const avals = mats[csf_depth_to_mode(ct, 0)]->vals;
val_t const * const bvals = mats[csf_depth_to_mode(ct, 2)]->vals;
val_t * const ovals = mats[MAX_NMODES]->vals;
idx_t const nfactors = mats[MAX_NMODES]->J;
int const tid = 0; //splatt_omp_get_thread_num();
val_t * const restrict accumF = (val_t *) thds[tid].scratch[0];
idx_t const nslices = ct->pt[tile_id].nfibs[0];
idx_t const start = (partition != NULL) ? partition[tid] : 0;
idx_t const stop = (partition != NULL) ? partition[tid+1] : nslices;
for(idx_t s=start; s < stop; ++s) {
idx_t const fid = (sids == NULL) ? s : sids[s];
/* root row */
val_t const * const restrict rv = avals + (fid * nfactors);
/* foreach fiber in slice */
for(idx_t f=sptr[s]; f < sptr[s+1]; ++f) {
/* first entry of the fiber is used to initialize accumF */
idx_t const jjfirst = fptr[f];
val_t const vfirst = vals[jjfirst];
val_t const * const restrict bv = bvals + (inds[jjfirst] * nfactors);
for(idx_t r=0; r < nfactors; ++r) {
accumF[r] = vfirst * bv[r];
}
/* foreach nnz in fiber */
for(idx_t jj=fptr[f]+1; jj < fptr[f+1]; ++jj) {
val_t const v = vals[jj];
val_t const * const restrict bv = bvals + (inds[jj] * nfactors);
for(idx_t r=0; r < nfactors; ++r) {
accumF[r] += v * bv[r];
}
}
/* write to fiber row */
val_t * const restrict ov = ovals + (fids[f] * nfactors);
// mutex_set_lock(pool, fids[f]);
for(idx_t r=0; r < nfactors; ++r) {
ov[r] += rv[r] * accumF[r];
}
// mutex_unset_lock(pool, fids[f]);
}
}
}
static void p_csf_mttkrp_leaf3_locked(
splatt_csf const * const ct,
idx_t const tile_id,
matrix_t ** mats,
idx_t const mode,
thd_info * const thds,
idx_t const * const restrict partition)
{
assert(ct->nmodes == 3);
val_t const * const vals = ct->pt[tile_id].vals;
idx_t const * const restrict sptr = ct->pt[tile_id].fptr[0];
idx_t const * const restrict fptr = ct->pt[tile_id].fptr[1];
idx_t const * const restrict sids = ct->pt[tile_id].fids[0];
idx_t const * const restrict fids = ct->pt[tile_id].fids[1];
idx_t const * const restrict inds = ct->pt[tile_id].fids[2];
val_t const * const avals = mats[csf_depth_to_mode(ct, 0)]->vals;
val_t const * const bvals = mats[csf_depth_to_mode(ct, 1)]->vals;
val_t * const ovals = mats[MAX_NMODES]->vals;
idx_t const nfactors = mats[MAX_NMODES]->J;
int const tid = 0; //splatt_omp_get_thread_num();
val_t * const restrict accumF = (val_t *) thds[tid].scratch[0];
idx_t const nslices = ct->pt[tile_id].nfibs[0];
idx_t const start = (partition != NULL) ? partition[tid] : 0;
idx_t const stop = (partition != NULL) ? partition[tid+1] : nslices;
for(idx_t s=start; s < stop; ++s) {
idx_t const fid = (sids == NULL) ? s : sids[s];
/* root row */
val_t const * const restrict rv = avals + (fid * nfactors);
/* foreach fiber in slice */
for(idx_t f=sptr[s]; f < sptr[s+1]; ++f) {
/* fill fiber with hada */
val_t const * const restrict av = bvals + (fids[f] * nfactors);
for(idx_t r=0; r < nfactors; ++r) {
accumF[r] = rv[r] * av[r];
}
/* foreach nnz in fiber, scale with hada and write to ovals */
for(idx_t jj=fptr[f]; jj < fptr[f+1]; ++jj) {
val_t const v = vals[jj];
val_t * const restrict ov = ovals + (inds[jj] * nfactors);
// mutex_set_lock(pool, inds[jj]);
for(idx_t r=0; r < nfactors; ++r) {
ov[r] += v * accumF[r];
}
// mutex_unset_lock(pool, inds[jj]);
}
}
}
}
static void p_csf_mttkrp_root_nolock(
splatt_csf const * const ct,
idx_t const tile_id,
matrix_t ** mats,
idx_t const mode,
thd_info * const thds,
idx_t const * const restrict partition)
{
/* extract tensor structures */
idx_t const nmodes = ct->nmodes;
val_t const * const vals = ct->pt[tile_id].vals;
/* empty tile, just return */
if(vals == NULL) {
return;
}
if(nmodes == 3) {
p_csf_mttkrp_root3_nolock(ct, tile_id, mats, mode, thds, partition);
return;
}
idx_t const * const * const restrict fp
= (idx_t const * const *) ct->pt[tile_id].fptr;
idx_t const * const * const restrict fids
= (idx_t const * const *) ct->pt[tile_id].fids;
idx_t const nfactors = mats[0]->J;
val_t * mvals[MAX_NMODES];
val_t * buf[MAX_NMODES];
idx_t idxstack[MAX_NMODES];
int const tid = 0; //splatt_omp_get_thread_num();
for(idx_t m=0; m < nmodes; ++m) {
mvals[m] = mats[csf_depth_to_mode(ct, m)]->vals;
/* grab the next row of buf from thds */
buf[m] = ((val_t *) thds[tid].scratch[2]) + (nfactors * m);
memset(buf[m], 0, nfactors * sizeof(val_t));
}
val_t * const ovals = mats[MAX_NMODES]->vals;
idx_t const nfibs = ct->pt[tile_id].nfibs[0];
assert(nfibs <= mats[MAX_NMODES]->I);
/* break up loop by partition */
idx_t const start = (partition != NULL) ? partition[tid] : 0;
idx_t const stop = (partition != NULL) ? partition[tid+1] : nfibs;
for(idx_t s=start; s < stop; ++s) {
idx_t const fid = (fids[0] == NULL) ? s : fids[0][s];
assert(fid < mats[MAX_NMODES]->I);
p_propagate_up(buf[0], buf, idxstack, 0, s, fp, fids,
vals, mvals, nmodes, nfactors);
val_t * const restrict orow = ovals + (fid * nfactors);
val_t const * const restrict obuf = buf[0];
// mutex_set_lock(pool, fid);
for(idx_t f=0; f < nfactors; ++f) {
orow[f] += obuf[f];
}
// mutex_unset_lock(pool, fid);
} /* end foreach outer slice */
}
static void p_csf_mttkrp_root_locked(
splatt_csf const * const ct,
idx_t const tile_id,
matrix_t ** mats,
idx_t const mode,
thd_info * const thds,
idx_t const * const restrict partition)
{
/* extract tensor structures */
idx_t const nmodes = ct->nmodes;
val_t const * const vals = ct->pt[tile_id].vals;
/* empty tile, just return */
if(vals == NULL) {
return;
}
if(nmodes == 3) {
p_csf_mttkrp_root3_locked(ct, tile_id, mats, mode, thds, partition);
return;
}
idx_t const * const * const restrict fp
= (idx_t const * const *) ct->pt[tile_id].fptr;
idx_t const * const * const restrict fids
= (idx_t const * const *) ct->pt[tile_id].fids;
idx_t const nfactors = mats[0]->J;
val_t * mvals[MAX_NMODES];
val_t * buf[MAX_NMODES];
idx_t idxstack[MAX_NMODES];
int const tid = 0; //splatt_omp_get_thread_num();
for(idx_t m=0; m < nmodes; ++m) {
mvals[m] = mats[csf_depth_to_mode(ct, m)]->vals;
/* grab the next row of buf from thds */
buf[m] = ((val_t *) thds[tid].scratch[2]) + (nfactors * m);
memset(buf[m], 0, nfactors * sizeof(val_t));
}
val_t * const ovals = mats[MAX_NMODES]->vals;
idx_t const nfibs = ct->pt[tile_id].nfibs[0];
assert(nfibs <= mats[MAX_NMODES]->I);
idx_t const start = (partition != NULL) ? partition[tid] : 0;
idx_t const stop = (partition != NULL) ? partition[tid+1] : nfibs;
for(idx_t s=start; s < stop; ++s) {
idx_t const fid = (fids[0] == NULL) ? s : fids[0][s];
assert(fid < mats[MAX_NMODES]->I);
p_propagate_up(buf[0], buf, idxstack, 0, s, fp, fids,
vals, mvals, nmodes, nfactors);
val_t * const restrict orow = ovals + (fid * nfactors);
val_t const * const restrict obuf = buf[0];
// mutex_set_lock(pool, fid);
for(idx_t f=0; f < nfactors; ++f) {
orow[f] += obuf[f];
}
// mutex_unset_lock(pool, fid);
} /* end foreach outer slice */
}
static void p_csf_mttkrp_leaf3_nolock(
splatt_csf const * const ct,
idx_t const tile_id,
matrix_t ** mats,
idx_t const mode,
thd_info * const thds,
idx_t const * const partition)
{
assert(ct->nmodes == 3);
val_t const * const vals = ct->pt[tile_id].vals;
idx_t const * const restrict sptr = ct->pt[tile_id].fptr[0];
idx_t const * const restrict fptr = ct->pt[tile_id].fptr[1];
idx_t const * const restrict sids = ct->pt[tile_id].fids[0];
idx_t const * const restrict fids = ct->pt[tile_id].fids[1];
idx_t const * const restrict inds = ct->pt[tile_id].fids[2];
val_t const * const avals = mats[csf_depth_to_mode(ct, 0)]->vals;
val_t const * const bvals = mats[csf_depth_to_mode(ct, 1)]->vals;
val_t * const ovals = mats[MAX_NMODES]->vals;
idx_t const nfactors = mats[MAX_NMODES]->J;
int const tid = 0; //splatt_omp_get_thread_num();
val_t * const restrict accumF = (val_t *) thds[tid].scratch[0];
idx_t const nslices = ct->pt[tile_id].nfibs[0];
idx_t const start = (partition != NULL) ? partition[tid] : 0;
idx_t const stop = (partition != NULL) ? partition[tid+1] : nslices;
for(idx_t s=start; s < stop; ++s) {
idx_t const fid = (sids == NULL) ? s : sids[s];
/* root row */
val_t const * const restrict rv = avals + (fid * nfactors);
/* foreach fiber in slice */
for(idx_t f=sptr[s]; f < sptr[s+1]; ++f) {
/* fill fiber with hada */
val_t const * const restrict av = bvals + (fids[f] * nfactors);
for(idx_t r=0; r < nfactors; ++r) {
accumF[r] = rv[r] * av[r];
}
/* foreach nnz in fiber, scale with hada and write to ovals */
for(idx_t jj=fptr[f]; jj < fptr[f+1]; ++jj) {
val_t const v = vals[jj];
val_t * const restrict ov = ovals + (inds[jj] * nfactors);
for(idx_t r=0; r < nfactors; ++r) {
ov[r] += v * accumF[r];
}
}
}
}
}
static void p_csf_mttkrp_leaf_nolock(
splatt_csf const * const ct,
idx_t const tile_id,
matrix_t ** mats,
idx_t const mode,
thd_info * const thds,
idx_t const * const partition)
{
val_t const * const vals = ct->pt[tile_id].vals;
idx_t const nmodes = ct->nmodes;
/* pass empty tiles */
if(vals == NULL) {
return;
}
if(nmodes == 3) {
p_csf_mttkrp_leaf3_nolock(ct, tile_id, mats, mode, thds, partition);
return;
}
/* extract tensor structures */
idx_t const * const * const restrict fp
= (idx_t const * const *) ct->pt[tile_id].fptr;
idx_t const * const * const restrict fids
= (idx_t const * const *) ct->pt[tile_id].fids;
idx_t const nfactors = mats[0]->J;
val_t * mvals[MAX_NMODES];
val_t * buf[MAX_NMODES];
idx_t idxstack[MAX_NMODES];
int const tid = 0; //splatt_omp_get_thread_num();
for(idx_t m=0; m < nmodes; ++m) {
mvals[m] = mats[csf_depth_to_mode(ct, m)]->vals;
/* grab the next row of buf from thds */
buf[m] = ((val_t *) thds[tid].scratch[2]) + (nfactors * m);
}
/* foreach outer slice */
idx_t const nouter = ct->pt[tile_id].nfibs[0];
idx_t const start = (partition != NULL) ? partition[tid] : 0;
idx_t const stop = (partition != NULL) ? partition[tid+1] : nouter;
for(idx_t s=start; s < stop; ++s) {
idx_t const fid = (fids[0] == NULL) ? s : fids[0][s];
idxstack[0] = s;
/* clear out stale data */
for(idx_t m=1; m < nmodes-1; ++m) {
idxstack[m] = fp[m-1][idxstack[m-1]];
}
/* first buf will always just be a matrix row */
val_t const * const rootrow = mvals[0] + (fid*nfactors);
val_t * const rootbuf = buf[0];
for(idx_t f=0; f < nfactors; ++f) {
rootbuf[f] = rootrow[f];
}
idx_t depth = 0;
idx_t const outer_end = fp[0][s+1];
while(idxstack[1] < outer_end) {
/* move down to an nnz node */
for(; depth < nmodes-2; ++depth) {
/* propogate buf down */
val_t const * const restrict drow
= mvals[depth+1] + (fids[depth+1][idxstack[depth+1]] * nfactors);
p_assign_hada(buf[depth+1], buf[depth], drow, nfactors);
}
/* process all nonzeros [start, end) */
idx_t const start = fp[depth][idxstack[depth]];
idx_t const end = fp[depth][idxstack[depth]+1];
p_csf_process_fiber_nolock(mats[MAX_NMODES]->vals, buf[depth],
nfactors, start, end, fids[depth+1], vals);
/* now move back up to the next unprocessed child */
do {
++idxstack[depth];
--depth;
} while(depth > 0 && idxstack[depth+1] == fp[depth][idxstack[depth]+1]);
} /* end DFS */
} /* end outer slice loop */
}
static void p_csf_mttkrp_leaf_locked(
splatt_csf const * const ct,
idx_t const tile_id,
matrix_t ** mats,
idx_t const mode,
thd_info * const thds,
idx_t const * const restrict partition)
{
/* extract tensor structures */
val_t const * const vals = ct->pt[tile_id].vals;
idx_t const nmodes = ct->nmodes;
if(vals == NULL) {
return;
}
if(nmodes == 3) {
p_csf_mttkrp_leaf3_locked(ct, tile_id, mats, mode, thds, partition);
return;
}
idx_t const * const * const restrict fp
= (idx_t const * const *) ct->pt[tile_id].fptr;
idx_t const * const * const restrict fids
= (idx_t const * const *) ct->pt[tile_id].fids;
idx_t const nfactors = mats[0]->J;
val_t * mvals[MAX_NMODES];
val_t * buf[MAX_NMODES];
idx_t idxstack[MAX_NMODES];
int const tid = 0; //splatt_omp_get_thread_num();
for(idx_t m=0; m < nmodes; ++m) {
mvals[m] = mats[csf_depth_to_mode(ct, m)]->vals;
/* grab the next row of buf from thds */
buf[m] = ((val_t *) thds[tid].scratch[2]) + (nfactors * m);
}
/* foreach outer slice */
idx_t const nslices = ct->pt[tile_id].nfibs[0];
idx_t const start = (partition != NULL) ? partition[tid] : 0;
idx_t const stop = (partition != NULL) ? partition[tid+1] : nslices;
for(idx_t s=start; s < stop; ++s) {
idx_t const fid = (fids[0] == NULL) ? s : fids[0][s];
idxstack[0] = s;
/* clear out stale data */
for(idx_t m=1; m < nmodes-1; ++m) {
idxstack[m] = fp[m-1][idxstack[m-1]];
}
/* first buf will always just be a matrix row */
val_t const * const restrict rootrow = mvals[0] + (fid*nfactors);
val_t * const rootbuf = buf[0];
for(idx_t f=0; f < nfactors; ++f) {
rootbuf[f] = rootrow[f];
}
idx_t depth = 0;
idx_t const outer_end = fp[0][s+1];
while(idxstack[1] < outer_end) {
/* move down to an nnz node */
for(; depth < nmodes-2; ++depth) {
/* propogate buf down */
val_t const * const restrict drow
= mvals[depth+1] + (fids[depth+1][idxstack[depth+1]] * nfactors);
p_assign_hada(buf[depth+1], buf[depth], drow, nfactors);
}
/* process all nonzeros [start, end) */
idx_t const start = fp[depth][idxstack[depth]];
idx_t const end = fp[depth][idxstack[depth]+1];
p_csf_process_fiber_locked(mats[MAX_NMODES]->vals, buf[depth],
nfactors, start, end, fids[depth+1], vals);
/* now move back up to the next unprocessed child */
do {
++idxstack[depth];
--depth;
} while(depth > 0 && idxstack[depth+1] == fp[depth][idxstack[depth]+1]);
} /* end DFS */
} /* end outer slice loop */
}
static void p_csf_mttkrp_intl3_nolock(
splatt_csf const * const ct,
idx_t const tile_id,
matrix_t ** mats,
idx_t const mode,
thd_info * const thds,
idx_t const * const partition)
{
assert(ct->nmodes == 3);
val_t const * const vals = ct->pt[tile_id].vals;
idx_t const * const restrict sptr = ct->pt[tile_id].fptr[0];
idx_t const * const restrict fptr = ct->pt[tile_id].fptr[1];
idx_t const * const restrict sids = ct->pt[tile_id].fids[0];
idx_t const * const restrict fids = ct->pt[tile_id].fids[1];
idx_t const * const restrict inds = ct->pt[tile_id].fids[2];
val_t const * const avals = mats[csf_depth_to_mode(ct, 0)]->vals;
val_t const * const bvals = mats[csf_depth_to_mode(ct, 2)]->vals;
val_t * const ovals = mats[MAX_NMODES]->vals;
idx_t const nfactors = mats[MAX_NMODES]->J;
int const tid = 0; //splatt_omp_get_thread_num();
val_t * const restrict accumF = (val_t *) thds[tid].scratch[0];
idx_t const nslices = ct->pt[tile_id].nfibs[0];
idx_t const start = (partition != NULL) ? partition[tid] : 0;
idx_t const stop = (partition != NULL) ? partition[tid+1] : nslices;
for(idx_t s=start; s < stop; ++s) {
idx_t const fid = (sids == NULL) ? s : sids[s];
/* root row */
val_t const * const restrict rv = avals + (fid * nfactors);
/* foreach fiber in slice */
for(idx_t f=sptr[s]; f < sptr[s+1]; ++f) {
/* first entry of the fiber is used to initialize accumF */
idx_t const jjfirst = fptr[f];
val_t const vfirst = vals[jjfirst];
val_t const * const restrict bv = bvals + (inds[jjfirst] * nfactors);
for(idx_t r=0; r < nfactors; ++r) {
accumF[r] = vfirst * bv[r];
}
/* foreach nnz in fiber */
for(idx_t jj=fptr[f]+1; jj < fptr[f+1]; ++jj) {
val_t const v = vals[jj];
val_t const * const restrict bv = bvals + (inds[jj] * nfactors);
for(idx_t r=0; r < nfactors; ++r) {
accumF[r] += v * bv[r];
}
}
/* write to fiber row */
val_t * const restrict ov = ovals + (fids[f] * nfactors);
for(idx_t r=0; r < nfactors; ++r) {
ov[r] += rv[r] * accumF[r];
}
}
}
}
static void p_csf_mttkrp_intl_nolock(
splatt_csf const * const ct,
idx_t const tile_id,
matrix_t ** mats,
idx_t const mode,
thd_info * const thds,
idx_t const * const partition)
{
/* extract tensor structures */
idx_t const nmodes = ct->nmodes;
val_t const * const vals = ct->pt[tile_id].vals;
/* pass empty tiles */
if(vals == NULL) {
return;
}
if(nmodes == 3) {
p_csf_mttkrp_intl3_nolock(ct, tile_id, mats, mode, thds, partition);
return;
}
idx_t const * const * const restrict fp
= (idx_t const * const *) ct->pt[tile_id].fptr;
idx_t const * const * const restrict fids
= (idx_t const * const *) ct->pt[tile_id].fids;
idx_t const nfactors = mats[0]->J;
/* find out which level in the tree this is */
idx_t const outdepth = csf_mode_to_depth(ct, mode);
val_t * mvals[MAX_NMODES];
val_t * buf[MAX_NMODES];
idx_t idxstack[MAX_NMODES];
int const tid = 0; //splatt_omp_get_thread_num();
for(idx_t m=0; m < nmodes; ++m) {
mvals[m] = mats[csf_depth_to_mode(ct, m)]->vals;
/* grab the next row of buf from thds */
buf[m] = ((val_t *) thds[tid].scratch[2]) + (nfactors * m);
memset(buf[m], 0, nfactors * sizeof(val_t));
}
val_t * const ovals = mats[MAX_NMODES]->vals;
/* foreach outer slice */
idx_t const nslices = ct->pt[tile_id].nfibs[0];
idx_t const start = (partition != NULL) ? partition[tid] : 0;
idx_t const stop = (partition != NULL) ? partition[tid+1] : nslices;
for(idx_t s=start; s < stop; ++s) {
idx_t const fid = (fids[0] == NULL) ? s : fids[0][s];
/* push outer slice and fill stack */
idxstack[0] = s;
for(idx_t m=1; m <= outdepth; ++m) {
idxstack[m] = fp[m-1][idxstack[m-1]];
}
/* fill first buf */
val_t const * const restrict rootrow = mvals[0] + (fid*nfactors);
for(idx_t f=0; f < nfactors; ++f) {
buf[0][f] = rootrow[f];
}
/* process entire subtree */
idx_t depth = 0;
while(idxstack[1] < fp[0][s+1]) {
/* propagate values down to outdepth-1 */
for(; depth < outdepth; ++depth) {
val_t const * const restrict drow
= mvals[depth+1] + (fids[depth+1][idxstack[depth+1]] * nfactors);
p_assign_hada(buf[depth+1], buf[depth], drow, nfactors);
}
/* write to output and clear buf[outdepth] for next subtree */
idx_t const noderow = fids[outdepth][idxstack[outdepth]];
/* propagate value up to buf[outdepth] */
p_propagate_up(buf[outdepth], buf, idxstack, outdepth,idxstack[outdepth],
fp, fids, vals, mvals, nmodes, nfactors);
val_t * const restrict outbuf = ovals + (noderow * nfactors);
p_add_hada_clear(outbuf, buf[outdepth], buf[outdepth-1], nfactors);
/* backtrack to next unfinished node */
do {
++idxstack[depth];
--depth;
} while(depth > 0 && idxstack[depth+1] == fp[depth][idxstack[depth]+1]);
} /* end DFS */
} /* end foreach outer slice */
}
static void p_csf_mttkrp_intl_locked(
splatt_csf const * const ct,
idx_t const tile_id,
matrix_t ** mats,
idx_t const mode,
thd_info * const thds,
idx_t const * const partition)
{
/* extract tensor structures */
idx_t const nmodes = ct->nmodes;
val_t const * const vals = ct->pt[tile_id].vals;
/* pass empty tiles */
if(vals == NULL) {
return;
}
if(nmodes == 3) {
p_csf_mttkrp_intl3_locked(ct, tile_id, mats, mode, thds, partition);
return;
}
idx_t const * const * const restrict fp
= (idx_t const * const *) ct->pt[tile_id].fptr;
idx_t const * const * const restrict fids
= (idx_t const * const *) ct->pt[tile_id].fids;
idx_t const nfactors = mats[0]->J;
/* find out which level in the tree this is */
idx_t const outdepth = csf_mode_to_depth(ct, mode);
val_t * mvals[MAX_NMODES];
val_t * buf[MAX_NMODES];
idx_t idxstack[MAX_NMODES];
int const tid = 0; // splatt_omp_get_thread_num();
for(idx_t m=0; m < nmodes; ++m) {
mvals[m] = mats[csf_depth_to_mode(ct, m)]->vals;
/* grab the next row of buf from thds */
buf[m] = ((val_t *) thds[tid].scratch[2]) + (nfactors * m);
memset(buf[m], 0, nfactors * sizeof(val_t));
}
val_t * const ovals = mats[MAX_NMODES]->vals;
/* foreach outer slice */
idx_t const nslices = ct->pt[tile_id].nfibs[0];
idx_t const start = (partition != NULL) ? partition[tid] : 0;
idx_t const stop = (partition != NULL) ? partition[tid+1] : nslices;
for(idx_t s=start; s < stop; ++s) {
idx_t const fid = (fids[0] == NULL) ? s : fids[0][s];
/* push outer slice and fill stack */
idxstack[0] = s;
for(idx_t m=1; m <= outdepth; ++m) {
idxstack[m] = fp[m-1][idxstack[m-1]];
}
/* fill first buf */
val_t const * const restrict rootrow = mvals[0] + (fid*nfactors);
for(idx_t f=0; f < nfactors; ++f) {
buf[0][f] = rootrow[f];
}
/* process entire subtree */
idx_t depth = 0;
while(idxstack[1] < fp[0][s+1]) {
/* propagate values down to outdepth-1 */
for(; depth < outdepth; ++depth) {
val_t const * const restrict drow
= mvals[depth+1] + (fids[depth+1][idxstack[depth+1]] * nfactors);
p_assign_hada(buf[depth+1], buf[depth], drow, nfactors);
}
/* write to output and clear buf[outdepth] for next subtree */
idx_t const noderow = fids[outdepth][idxstack[outdepth]];
/* propagate value up to buf[outdepth] */
p_propagate_up(buf[outdepth], buf, idxstack, outdepth,idxstack[outdepth],
fp, fids, vals, mvals, nmodes, nfactors);
val_t * const restrict outbuf = ovals + (noderow * nfactors);
// mutex_set_lock(pool, noderow);
p_add_hada_clear(outbuf, buf[outdepth], buf[outdepth-1], nfactors);
// mutex_unset_lock(pool, noderow);
/* backtrack to next unfinished node */
do {
++idxstack[depth];
--depth;
} while(depth > 0 && idxstack[depth+1] == fp[depth][idxstack[depth]+1]);
} /* end DFS */
} /* end foreach outer slice */
}
/******************************************************************************
* PUBLIC FUNCTIONS
*****************************************************************************/
void mttkrp_csf(
splatt_csf const * const tensors,
matrix_t ** mats,
idx_t const mode,
thd_info * const thds,
splatt_mttkrp_ws * const ws,
double const * const opts)
{
/* ensure we use as many threads as our partitioning supports */
//splatt_omp_set_num_threads(ws->num_threads);
if(pool == NULL) {
pool = &mutex_pool1; //mutex_alloc();
}
printf("Max nmdoes: %d\n", MAX_NMODES);
/* clear output matrix */
matrix_t * const M = mats[MAX_NMODES];
M->I = tensors[0].dims[mode];
memset(M->vals, 0, M->I * M->J * sizeof(val_t));
idx_t const nmodes = tensors[0].nmodes;
/* reset thread times */
thd_reset(thds, 1) ; //splatt_omp_get_max_threads());
/* choose which MTTKRP function to use */
idx_t const which_csf = ws->mode_csf_map[mode];
idx_t const outdepth = csf_mode_to_depth(&(tensors[which_csf]), mode);
if(outdepth == 0) {
/* root */
p_schedule_tiles(tensors, which_csf,
p_csf_mttkrp_root_locked, p_csf_mttkrp_root_nolock,
mats, mode, thds, ws);
} else if(outdepth == nmodes - 1) {
/* leaf */
p_schedule_tiles(tensors, which_csf,
p_csf_mttkrp_leaf_locked, p_csf_mttkrp_leaf_nolock,
mats, mode, thds, ws);
} else {
/* internal */
p_schedule_tiles(tensors, which_csf,
p_csf_mttkrp_intl_locked, p_csf_mttkrp_intl_nolock,
mats, mode, thds, ws);
}
/*
// print thread times, if requested
if((int)opts[SPLATT_OPTION_VERBOSITY] == SPLATT_VERBOSITY_MAX) {
printf("MTTKRP mode %"SPLATT_PF_IDX": ", mode+1);
thd_time_stats(thds, 1 splatt_omp_get_max_threads());
if(ws->is_privatized[mode]) {
printf(" reduction-time: %0.3fs\n", ws->reduction_time);
}
}
thd_reset(thds, splatt_omp_get_max_threads());
*/
}
/******************************************************************************
* DEPRECATED FUNCTIONS
*****************************************************************************/
/******************************************************************************
* SPLATT MTTKRP
*****************************************************************************/
void mttkrp_splatt(
ftensor_t const * const ft,
matrix_t ** mats,
idx_t const mode,
thd_info * const thds,
idx_t const nthreads)
{
if(ft->tiled == SPLATT_SYNCTILE) {
mttkrp_splatt_sync_tiled(ft, mats, mode, thds, nthreads);
return;
}
if(ft->tiled == SPLATT_COOPTILE) {
mttkrp_splatt_coop_tiled(ft, mats, mode, thds, nthreads);
return;
}
matrix_t * const M = mats[MAX_NMODES];
matrix_t const * const A = mats[ft->dim_perm[1]];
matrix_t const * const B = mats[ft->dim_perm[2]];
idx_t const nslices = ft->dims[mode];
idx_t const rank = M->J;
val_t * const mvals = M->vals;
memset(mvals, 0, ft->dims[mode] * rank * sizeof(val_t));
val_t const * const avals = A->vals;
val_t const * const bvals = B->vals;
idx_t const * const restrict sptr = ft->sptr;
idx_t const * const restrict fptr = ft->fptr;
idx_t const * const restrict fids = ft->fids;
idx_t const * const restrict inds = ft->inds;
val_t const * const restrict vals = ft->vals;
// #pragma omp parallel
{
int const tid = 0; //splatt_omp_get_thread_num();
val_t * const restrict accumF = (val_t *) thds[tid].scratch[0];
timer_start(&thds[tid].ttime);
// #pragma omp for schedule(dynamic, 16) nowait
for(idx_t s=0; s < nslices; ++s) {
val_t * const restrict mv = mvals + (s * rank);
/* foreach fiber in slice */
for(idx_t f=sptr[s]; f < sptr[s+1]; ++f) {
/* first entry of the fiber is used to initialize accumF */
idx_t const jjfirst = fptr[f];
val_t const vfirst = vals[jjfirst];
val_t const * const restrict bv = bvals + (inds[jjfirst] * rank);
for(idx_t r=0; r < rank; ++r) {
accumF[r] = vfirst * bv[r];
}
/* foreach nnz in fiber */
for(idx_t jj=fptr[f]+1; jj < fptr[f+1]; ++jj) {
val_t const v = vals[jj];
val_t const * const restrict bv = bvals + (inds[jj] * rank);
for(idx_t r=0; r < rank; ++r) {
accumF[r] += v * bv[r];
}
}
/* scale inner products by row of A and update to M */
val_t const * const restrict av = avals + (fids[f] * rank);
for(idx_t r=0; r < rank; ++r) {
mv[r] += accumF[r] * av[r];
}
}
}
timer_stop(&thds[tid].ttime);
} /* end parallel region */
}
void mttkrp_splatt_sync_tiled(
ftensor_t const * const ft,
matrix_t ** mats,
idx_t const mode,
thd_info * const thds,
idx_t const nthreads)
{
matrix_t * const M = mats[MAX_NMODES];
matrix_t const * const A = mats[ft->dim_perm[1]];
matrix_t const * const B = mats[ft->dim_perm[2]];
idx_t const nslabs = ft->nslabs;
idx_t const rank = M->J;
val_t * const mvals = M->vals;
memset(mvals, 0, ft->dims[mode] * rank * sizeof(val_t));
val_t const * const avals = A->vals;
val_t const * const bvals = B->vals;
idx_t const * const restrict slabptr = ft->slabptr;
idx_t const * const restrict sids = ft->sids;
idx_t const * const restrict fptr = ft->fptr;
idx_t const * const restrict fids = ft->fids;
idx_t const * const restrict inds = ft->inds;
val_t const * const restrict vals = ft->vals;
// #pragma omp parallel
{
int const tid = 0; //splatt_omp_get_thread_num();
val_t * const restrict accumF = (val_t *) thds[tid].scratch[0];
timer_start(&thds[tid].ttime);
// #pragma omp for schedule(dynamic, 1) nowait
for(idx_t s=0; s < nslabs; ++s) {
/* foreach fiber in slice */
for(idx_t f=slabptr[s]; f < slabptr[s+1]; ++f) {
/* first entry of the fiber is used to initialize accumF */
idx_t const jjfirst = fptr[f];
val_t const vfirst = vals[jjfirst];
val_t const * const restrict bv = bvals + (inds[jjfirst] * rank);
for(idx_t r=0; r < rank; ++r) {
accumF[r] = vfirst * bv[r];
}
/* foreach nnz in fiber */
for(idx_t jj=fptr[f]+1; jj < fptr[f+1]; ++jj) {
val_t const v = vals[jj];
val_t const * const restrict bv = bvals + (inds[jj] * rank);
for(idx_t r=0; r < rank; ++r) {
accumF[r] += v * bv[r];
}
}
/* scale inner products by row of A and update to M */
val_t * const restrict mv = mvals + (sids[f] * rank);
val_t const * const restrict av = avals + (fids[f] * rank);
for(idx_t r=0; r < rank; ++r) {
mv[r] += accumF[r] * av[r];
}
}
}
timer_stop(&thds[tid].ttime);
} /* end parallel region */
}
void mttkrp_splatt_coop_tiled(
ftensor_t const * const ft,
matrix_t ** mats,
idx_t const mode,
thd_info * const thds,
idx_t const nthreads)
{
matrix_t * const M = mats[MAX_NMODES];
matrix_t const * const A = mats[ft->dim_perm[1]];
matrix_t const * const B = mats[ft->dim_perm[2]];
idx_t const nslabs = ft->nslabs;
idx_t const rank = M->J;
val_t * const mvals = M->vals;
memset(mvals, 0, ft->dims[mode] * rank * sizeof(val_t));
val_t const * const avals = A->vals;
val_t const * const bvals = B->vals;
idx_t const * const restrict slabptr = ft->slabptr;
idx_t const * const restrict sptr = ft->sptr;
idx_t const * const restrict sids = ft->sids;
idx_t const * const restrict fptr = ft->fptr;
idx_t const * const restrict fids = ft->fids;
idx_t const * const restrict inds = ft->inds;
val_t const * const restrict vals = ft->vals;
// #pragma omp parallel
{
int const tid = 0; // splatt_omp_get_thread_num();
val_t * const restrict accumF = (val_t *) thds[tid].scratch[0];
val_t * const localm = (val_t *) thds[tid].scratch[1];
timer_start(&thds[tid].ttime);
/* foreach slab */
for(idx_t s=0; s < nslabs; ++s) {
/* foreach fiber in slab */
// #pragma omp for schedule(dynamic, 8)
for(idx_t sl=slabptr[s]; sl < slabptr[s+1]; ++sl) {
idx_t const slice = sids[sl];
for(idx_t f=sptr[sl]; f < sptr[sl+1]; ++f) {
/* first entry of the fiber is used to initialize accumF */
idx_t const jjfirst = fptr[f];
val_t const vfirst = vals[jjfirst];
val_t const * const restrict bv = bvals + (inds[jjfirst] * rank);
for(idx_t r=0; r < rank; ++r) {
accumF[r] = vfirst * bv[r];
}
/* foreach nnz in fiber */
for(idx_t jj=fptr[f]+1; jj < fptr[f+1]; ++jj) {
val_t const v = vals[jj];
val_t const * const restrict bv = bvals + (inds[jj] * rank);
for(idx_t r=0; r < rank; ++r) {
accumF[r] += v * bv[r];
}
}
/* scale inner products by row of A and update thread-local M */
val_t * const restrict mv = localm + ((slice % TILE_SIZES[0]) * rank);
val_t const * const restrict av = avals + (fids[f] * rank);
for(idx_t r=0; r < rank; ++r) {
mv[r] += accumF[r] * av[r];
}
}
}
idx_t const start = s * TILE_SIZES[0];
idx_t const stop = SS_MIN((s+1) * TILE_SIZES[0], ft->dims[mode]);
// #pragma omp for schedule(static)
for(idx_t i=start; i < stop; ++i) {
/* map i back to global slice id */
idx_t const localrow = i % TILE_SIZES[0];
for(idx_t t=0; t < nthreads; ++t) {
val_t * const threadm = (val_t *) thds[t].scratch[1];
for(idx_t r=0; r < rank; ++r) {
mvals[r + (i*rank)] += threadm[r + (localrow*rank)];
threadm[r + (localrow*rank)] = 0.;
}
}
}
} /* end foreach slab */
timer_stop(&thds[tid].ttime);
} /* end omp parallel */
}
/******************************************************************************
* GIGA MTTKRP
*****************************************************************************/
void mttkrp_giga(
spmatrix_t const * const spmat,
matrix_t ** mats,
idx_t const mode,
val_t * const scratch)
{
matrix_t * const M = mats[MAX_NMODES];
matrix_t const * const A = mode == 0 ? mats[1] : mats[0];
matrix_t const * const B = mode == 2 ? mats[1] : mats[2];
idx_t const I = spmat->I;
idx_t const rank = M->J;
idx_t const * const restrict rowptr = spmat->rowptr;
idx_t const * const restrict colind = spmat->colind;
val_t const * const restrict vals = spmat->vals;
// #pragma omp parallel
{
for(idx_t r=0; r < rank; ++r) {
val_t * const restrict mv = M->vals + (r * I);
val_t const * const restrict av = A->vals + (r * A->I);
val_t const * const restrict bv = B->vals + (r * B->I);
/* Joined Hadamard products of X, C, and B */
// #pragma omp for schedule(dynamic, 16)
for(idx_t i=0; i < I; ++i) {
for(idx_t y=rowptr[i]; y < rowptr[i+1]; ++y) {
idx_t const a = colind[y] / B->I;
idx_t const b = colind[y] % B->I;
scratch[y] = vals[y] * av[a] * bv[b];
}
}
/* now accumulate rows into column of M1 */
// #pragma omp for schedule(dynamic, 16)
for(idx_t i=0; i < I; ++i) {
val_t sum = 0;
for(idx_t y=rowptr[i]; y < rowptr[i+1]; ++y) {
sum += scratch[y];
}
mv[i] = sum;
}
}
}
}
/******************************************************************************
* TTBOX MTTKRP
*****************************************************************************/
void mttkrp_ttbox(
sptensor_t const * const tt,
matrix_t ** mats,
idx_t const mode,
val_t * const scratch)
{
matrix_t * const M = mats[MAX_NMODES];
matrix_t const * const A = mode == 0 ? mats[1] : mats[0];
matrix_t const * const B = mode == 2 ? mats[1] : mats[2];
idx_t const I = tt->dims[mode];
idx_t const rank = M->J;
memset(M->vals, 0, I * rank * sizeof(val_t));
idx_t const nnz = tt->nnz;
idx_t const * const restrict indM = tt->ind[mode];
idx_t const * const restrict indA =
mode == 0 ? tt->ind[1] : tt->ind[0];
idx_t const * const restrict indB =
mode == 2 ? tt->ind[1] : tt->ind[2];
val_t const * const restrict vals = tt->vals;
for(idx_t r=0; r < rank; ++r) {
val_t * const restrict mv = M->vals + (r * I);
val_t const * const restrict av = A->vals + (r * A->I);
val_t const * const restrict bv = B->vals + (r * B->I);
/* stretch out columns of A and B */
// #pragma omp parallel for
for(idx_t x=0; x < nnz; ++x) {
scratch[x] = vals[x] * av[indA[x]] * bv[indB[x]];
}
/* now accumulate into m1 */
for(idx_t x=0; x < nnz; ++x) {
mv[indM[x]] += scratch[x];
}
}
}
void mttkrp_stream(
sptensor_t const * const tt,
matrix_t ** mats,
idx_t const mode)
{
if(pool == NULL) {
pool = &mutex_pool2; //mutex_alloc();
}
matrix_t * const M = mats[MAX_NMODES];
idx_t const I = tt->dims[mode];
idx_t const nfactors = M->J;
val_t * const outmat = M->vals;
memset(outmat, 0, I * nfactors * sizeof(*outmat));
idx_t const nmodes = tt->nmodes;
val_t * mvals[MAX_NMODES];
for(idx_t m=0; m < nmodes; ++m) {
mvals[m] = mats[m]->vals;
}
val_t const * const restrict vals = tt->vals;
// #pragma omp parallel
{
val_t * restrict accum = malloc(nfactors * sizeof(*accum));
/* stream through nnz */
// #pragma omp for schedule(static)
for(idx_t n=0; n < tt->nnz; ++n) {
/* initialize with value */
for(idx_t f=0; f < nfactors; ++f) {
accum[f] = vals[n];
}
for(idx_t m=0; m < nmodes; ++m) {
if(m == mode) {
continue;
}
val_t const * const restrict inrow = mvals[m] + \
(tt->ind[m][n] * nfactors);
for(idx_t f=0; f < nfactors; ++f) {
accum[f] *= inrow[f];
}
}
/* write to output */
idx_t const out_ind = tt->ind[mode][n];
val_t * const restrict outrow = outmat + (tt->ind[mode][n] * nfactors);
// mutex_set_lock(pool, out_ind);
for(idx_t f=0; f < nfactors; ++f) {
outrow[f] += accum[f];
}
// mutex_unset_lock(pool, out_ind);
}
splatt_free(accum);
} /* end omp parallel */
}
/******************************************************************************
* API FUNCTIONS
*****************************************************************************/
int splatt_mttkrp(
splatt_idx_t const mode,
splatt_idx_t const ncolumns,
splatt_csf const * const tensors,
splatt_val_t ** matrices,
splatt_val_t * const matout,
double const * const options)
{
idx_t const nmodes = tensors->nmodes;
/* fill matrix pointers */
matrix_t * mats[MAX_NMODES+1];
for(idx_t m=0; m < nmodes; ++m) {
mats[m] = (matrix_t *) malloc(sizeof(matrix_t));
mats[m]->I = tensors->dims[m];
mats[m]->J = ncolumns,
mats[m]->rowmajor = 1;
mats[m]->vals = matrices[m];
}
mats[MAX_NMODES] = (matrix_t *) malloc(sizeof(matrix_t));
mats[MAX_NMODES]->I = tensors->dims[mode];
mats[MAX_NMODES]->J = ncolumns;
mats[MAX_NMODES]->rowmajor = 1;
mats[MAX_NMODES]->vals = matout;
/* Setup thread structures. + 64 bytes is to avoid false sharing. */
idx_t const nthreads = (idx_t) options[SPLATT_OPTION_NTHREADS];
// splatt_omp_set_num_threads(nthreads);
thd_info * thds = thd_init(nthreads, 3,
(nmodes * ncolumns * sizeof(val_t)) + 64,
0,
(nmodes * ncolumns * sizeof(val_t)) + 64);
splatt_mttkrp_ws * ws = splatt_mttkrp_alloc_ws(tensors, ncolumns, options);
/* do the MTTKRP */
mttkrp_csf(tensors, mats, mode, thds, ws, options);
splatt_mttkrp_free_ws(ws);
/* cleanup */
thd_free(thds, nthreads);
for(idx_t m=0; m < nmodes; ++m) {
free(mats[m]);
}
free(mats[MAX_NMODES]);
return SPLATT_SUCCESS;
}
splatt_mttkrp_ws * splatt_mttkrp_alloc_ws(
splatt_csf const * const tensors,
splatt_idx_t const ncolumns,
double const * const opts)
{
splatt_mttkrp_ws * ws = malloc(sizeof(*ws));
idx_t num_csf = 0;
//#ifdef _OPENMP
// idx_t const num_threads = (idx_t) opts[SPLATT_OPTION_NTHREADS];
//#else
idx_t const num_threads = 1;
//#endif
ws->num_threads = num_threads;
/* map each MTTKRP mode to a CSF tensor */
splatt_csf_type which_csf = (splatt_csf_type) opts[SPLATT_OPTION_CSF_ALLOC];
for(idx_t m=0; m < tensors->nmodes; ++m) {
switch(which_csf) {
case SPLATT_CSF_ONEMODE:
/* only one tensor, map is easy */
ws->mode_csf_map[m] = 0;
num_csf = 1;
break;
case SPLATT_CSF_TWOMODE:
/* last mode is mapped to second tensor */
ws->mode_csf_map[m] = 0;
if(csf_mode_to_depth(&(tensors[0]), m) == tensors->nmodes-1) {
ws->mode_csf_map[m] = 1;
}
num_csf = 2;
break;
case SPLATT_CSF_ALLMODE:
/* each mode has its own tensor, map is easy */
ws->mode_csf_map[m] = m;
num_csf = tensors->nmodes;
break;
/* XXX */
default:
fprintf(stderr, "SPLATT: CSF type '%d' not recognized.\n", which_csf);
abort();
break;
}
}
assert(num_csf > 0);
ws->num_csf = num_csf;
/* Now setup partition info for each CSF. */
for(idx_t c=0; c < num_csf; ++c) {
ws->tile_partition[c] = NULL;
ws->tree_partition[c] = NULL;
}
for(idx_t c=0; c < num_csf; ++c) {
splatt_csf const * const csf = &(tensors[c]);
if(tensors[c].ntiles > 1) {
ws->tile_partition[c] = csf_partition_tiles_1d(csf, num_threads);
} else {
ws->tree_partition[c] = csf_partition_1d(csf, 0, num_threads);
}
}
/* allocate privatization buffer */
idx_t largest_priv_dim = 0;
ws->privatize_buffer =
malloc(num_threads * sizeof(*(ws->privatize_buffer)));
for(idx_t m=0; m < tensors->nmodes; ++m) {
ws->is_privatized[m] = p_is_privatized(tensors, m, opts);
if(ws->is_privatized[m]) {
largest_priv_dim = SS_MAX(largest_priv_dim, tensors->dims[m]);
if((int)opts[SPLATT_OPTION_VERBOSITY] == SPLATT_VERBOSITY_MAX) {
printf("PRIVATIZING-MODE: %"SPLATT_PF_IDX"\n", m+1);
}
}
}
for(idx_t t=0; t < num_threads; ++t) {
printf("Privatize buffer size: %d\n", largest_priv_dim * ncolumns * sizeof(**(ws->privatize_buffer)));
ws->privatize_buffer[t] = malloc(largest_priv_dim * ncolumns *
sizeof(**(ws->privatize_buffer)));
printf("BUFFERS: %d x %d", largest_priv_dim, ncolumns);
}
if(largest_priv_dim > 0 &&
(int)opts[SPLATT_OPTION_VERBOSITY] == SPLATT_VERBOSITY_MAX) {
size_t bytes = num_threads * largest_priv_dim * ncolumns *
sizeof(**(ws->privatize_buffer));
char * bstr = bytes_str(bytes);
printf("PRIVATIZATION-BUF: %s\n", bstr);
printf("\n");
free(bstr);
}
return ws;
}
void splatt_mttkrp_free_ws(
splatt_mttkrp_ws * const ws)
{
for(idx_t t=0; t < ws->num_threads; ++t) {
splatt_free(ws->privatize_buffer[t]);
}
splatt_free(ws->privatize_buffer);
for(idx_t c=0; c < ws->num_csf; ++c) {
splatt_free(ws->tile_partition[c]);
splatt_free(ws->tree_partition[c]);
}
splatt_free(ws);
}
|
collision_matrix.c | /* Copyright (C) 2015 Atsushi Togo */
/* All rights reserved. */
/* This file is part of phonopy. */
/* Redistribution and use in source and binary forms, with or without */
/* modification, are permitted provided that the following conditions */
/* are met: */
/* * Redistributions of source code must retain the above copyright */
/* notice, this list of conditions and the following disclaimer. */
/* * Redistributions in binary form must reproduce the above copyright */
/* notice, this list of conditions and the following disclaimer in */
/* the documentation and/or other materials provided with the */
/* distribution. */
/* * Neither the name of the phonopy project nor the names of its */
/* contributors may be used to endorse or promote products derived */
/* from this software without specific prior written permission. */
/* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */
/* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */
/* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS */
/* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE */
/* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */
/* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, */
/* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; */
/* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER */
/* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT */
/* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */
/* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */
/* POSSIBILITY OF SUCH DAMAGE. */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "phonoc_array.h"
#include "phonoc_utils.h"
#include "collision_matrix.h"
static void get_collision_matrix(double *collision_matrix,
const double *fc3_normal_squared,
const long num_band0,
const long num_band,
const double *frequencies,
const long (*triplets)[3],
const long *triplets_map,
const long num_gp,
const long *map_q,
const long *rot_grid_points,
const long num_ir_gp,
const long num_rot,
const double *rotations_cartesian,
const double *g,
const double temperature,
const double unit_conversion_factor,
const double cutoff_frequency);
static void
get_reducible_collision_matrix(double *collision_matrix,
const double *fc3_normal_squared,
const long num_band0,
const long num_band,
const double *frequencies,
const long (*triplets)[3],
const long *triplets_map,
const long num_gp,
const long *map_q,
const double *g,
const double temperature,
const double unit_conversion_factor,
const double cutoff_frequency);
static void get_inv_sinh(double *inv_sinh,
const long gp,
const double temperature,
const double *frequencies,
const long triplet[3],
const long *triplets_map,
const long *map_q,
const long num_band,
const double cutoff_frequency);
static long *create_gp2tp_map(const long *triplets_map,
const long num_gp);
void col_get_collision_matrix(double *collision_matrix,
const Darray *fc3_normal_squared,
const double *frequencies,
const long (*triplets)[3],
const long *triplets_map,
const long *map_q,
const long *rot_grid_points,
const double *rotations_cartesian,
const double *g,
const long num_ir_gp,
const long num_gp,
const long num_rot,
const double temperature,
const double unit_conversion_factor,
const double cutoff_frequency)
{
long num_triplets, num_band0, num_band;
num_triplets = fc3_normal_squared->dims[0];
num_band0 = fc3_normal_squared->dims[1];
num_band = fc3_normal_squared->dims[2];
get_collision_matrix(
collision_matrix,
fc3_normal_squared->data,
num_band0,
num_band,
frequencies,
triplets,
triplets_map,
num_gp,
map_q,
rot_grid_points,
num_ir_gp,
num_rot,
rotations_cartesian,
g + 2 * num_triplets * num_band0 * num_band * num_band,
temperature,
unit_conversion_factor,
cutoff_frequency);
}
void col_get_reducible_collision_matrix(double *collision_matrix,
const Darray *fc3_normal_squared,
const double *frequencies,
const long (*triplets)[3],
const long *triplets_map,
const long *map_q,
const double *g,
const long num_gp,
const double temperature,
const double unit_conversion_factor,
const double cutoff_frequency)
{
long num_triplets, num_band, num_band0;
num_triplets = fc3_normal_squared->dims[0];
num_band0 = fc3_normal_squared->dims[1];
num_band = fc3_normal_squared->dims[2];
get_reducible_collision_matrix(
collision_matrix,
fc3_normal_squared->data,
num_band0,
num_band,
frequencies,
triplets,
triplets_map,
num_gp,
map_q,
g + 2 * num_triplets * num_band0 * num_band * num_band,
temperature,
unit_conversion_factor,
cutoff_frequency);
}
static void get_collision_matrix(double *collision_matrix,
const double *fc3_normal_squared,
const long num_band0,
const long num_band,
const double *frequencies,
const long (*triplets)[3],
const long *triplets_map,
const long num_gp,
const long *map_q,
const long *rot_grid_points,
const long num_ir_gp,
const long num_rot,
const double *rotations_cartesian,
const double *g,
const double temperature,
const double unit_conversion_factor,
const double cutoff_frequency)
{
long i, j, k, l, m, n, ti, r_gp;
long *gp2tp_map;
double collision;
double *inv_sinh;
gp2tp_map = create_gp2tp_map(triplets_map, num_gp);
#pragma omp parallel for private(j, k, l, m, n, ti, r_gp, collision, inv_sinh)
for (i = 0; i < num_ir_gp; i++) {
inv_sinh = (double*)malloc(sizeof(double) * num_band);
for (j = 0; j < num_rot; j++) {
r_gp = rot_grid_points[i * num_rot + j];
ti = gp2tp_map[triplets_map[r_gp]];
get_inv_sinh(inv_sinh,
r_gp,
temperature,
frequencies,
triplets[ti],
triplets_map,
map_q,
num_band,
cutoff_frequency);
for (k = 0; k < num_band0; k++) {
for (l = 0; l < num_band; l++) {
collision = 0;
for (m = 0; m < num_band; m++) {
collision +=
fc3_normal_squared[ti * num_band0 * num_band * num_band +
k * num_band * num_band +
l * num_band + m] *
g[ti * num_band0 * num_band * num_band +
k * num_band * num_band +
l * num_band + m] *
inv_sinh[m] * unit_conversion_factor;
}
for (m = 0; m < 3; m++) {
for (n = 0; n < 3; n++) {
collision_matrix[k * 3 * num_ir_gp * num_band * 3 +
m * num_ir_gp * num_band * 3 +
i * num_band * 3 + l * 3 + n] +=
collision * rotations_cartesian[j * 9 + m * 3 + n];
}
}
}
}
}
free(inv_sinh);
inv_sinh = NULL;
}
free(gp2tp_map);
gp2tp_map = NULL;
}
static void
get_reducible_collision_matrix(double *collision_matrix,
const double *fc3_normal_squared,
const long num_band0,
const long num_band,
const double *frequencies,
const long (*triplets)[3],
const long *triplets_map,
const long num_gp,
const long *map_q,
const double *g,
const double temperature,
const double unit_conversion_factor,
const double cutoff_frequency)
{
long i, j, k, l, ti;
long *gp2tp_map;
double collision;
double *inv_sinh;
gp2tp_map = create_gp2tp_map(triplets_map, num_gp);
#pragma omp parallel for private(j, k, l, ti, collision, inv_sinh)
for (i = 0; i < num_gp; i++) {
inv_sinh = (double*)malloc(sizeof(double) * num_band);
ti = gp2tp_map[triplets_map[i]];
get_inv_sinh(inv_sinh,
i,
temperature,
frequencies,
triplets[ti],
triplets_map,
map_q,
num_band,
cutoff_frequency);
for (j = 0; j < num_band0; j++) {
for (k = 0; k < num_band; k++) {
collision = 0;
for (l = 0; l < num_band; l++) {
collision +=
fc3_normal_squared[ti * num_band0 * num_band * num_band +
j * num_band * num_band +
k * num_band + l] *
g[ti * num_band0 * num_band * num_band +
j * num_band * num_band +
k * num_band + l] *
inv_sinh[l] * unit_conversion_factor;
}
collision_matrix[j * num_gp * num_band + i * num_band + k] += collision;
}
}
free(inv_sinh);
inv_sinh = NULL;
}
free(gp2tp_map);
gp2tp_map = NULL;
}
static void get_inv_sinh(double *inv_sinh,
const long gp,
const double temperature,
const double *frequencies,
const long triplet[3],
const long *triplets_map,
const long *map_q,
const long num_band,
const double cutoff_frequency)
{
long i, gp2;
double f;
/* This assumes the algorithm of get_ir_triplets_at_q_perm_q1q2, */
/* where defined triplets_map[gp] == triplets_map[map_q[gp]]. */
/* If triplets_map[map_q[gp]] != map_q[gp], q1 and q2 are permuted. */
if (triplets_map[gp] == map_q[gp]) {
gp2 = triplet[2];
} else {
gp2 = triplet[1];
}
for (i = 0; i < num_band; i++) {
f = frequencies[gp2 * num_band + i];
if (f > cutoff_frequency) {
inv_sinh[i] = phonoc_inv_sinh_occupation(f, temperature);
} else {
inv_sinh[i] = 0;
}
}
}
/* Symmetrically independent triplets are indexed. */
/* Inverse definition of ir_grid_points in get_BZ_triplets_at_q */
/* in triplet_grid.c. */
static long *create_gp2tp_map(const long *triplets_map,
const long num_gp)
{
long i, num_ir;
long *gp2tp_map;
gp2tp_map = (long*)malloc(sizeof(long) * num_gp);
num_ir = 0;
for (i = 0; i < num_gp; i++) {
if (triplets_map[i] == i) {
gp2tp_map[i] = num_ir;
num_ir++;
} else { /* This should not be used. */
gp2tp_map[i] = -1;
}
}
return gp2tp_map;
}
|
par_csr_matvec.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
/******************************************************************************
*
* Matvec functions for hypre_CSRMatrix class.
*
*****************************************************************************/
#include "_hypre_parcsr_mv.h"
#include "_hypre_utilities.hpp" //RL: TODO par_csr_matvec_device.c, include cuda there
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixMatvec
*--------------------------------------------------------------------------*/
// y = alpha*A*x + beta*b
HYPRE_Int
hypre_ParCSRMatrixMatvecOutOfPlace( HYPRE_Complex alpha,
hypre_ParCSRMatrix *A,
hypre_ParVector *x,
HYPRE_Complex beta,
hypre_ParVector *b,
hypre_ParVector *y )
{
hypre_ParCSRCommHandle **comm_handle;
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(A);
hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(A);
hypre_Vector *x_local = hypre_ParVectorLocalVector(x);
hypre_Vector *b_local = hypre_ParVectorLocalVector(b);
hypre_Vector *y_local = hypre_ParVectorLocalVector(y);
hypre_Vector *x_tmp;
HYPRE_BigInt num_rows = hypre_ParCSRMatrixGlobalNumRows(A);
HYPRE_BigInt num_cols = hypre_ParCSRMatrixGlobalNumCols(A);
HYPRE_BigInt x_size = hypre_ParVectorGlobalSize(x);
HYPRE_BigInt b_size = hypre_ParVectorGlobalSize(b);
HYPRE_BigInt y_size = hypre_ParVectorGlobalSize(y);
HYPRE_Int num_vectors = hypre_VectorNumVectors(x_local);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(offd);
HYPRE_Int ierr = 0;
HYPRE_Int num_sends, jv;
HYPRE_Int vecstride = hypre_VectorVectorStride( x_local );
HYPRE_Int idxstride = hypre_VectorIndexStride( x_local );
HYPRE_Complex *x_tmp_data, **x_buf_data;
HYPRE_Complex *x_local_data = hypre_VectorData(x_local);
#if defined(HYPRE_USING_GPU)
HYPRE_Int sync_stream;
hypre_GetSyncCudaCompute(&sync_stream);
hypre_SetSyncCudaCompute(0);
#endif
/*---------------------------------------------------------------------
* Check for size compatibility. ParMatvec returns ierr = 11 if
* length of X doesn't equal the number of columns of A,
* ierr = 12 if the length of Y doesn't equal the number of rows
* of A, and ierr = 13 if both are true.
*
* Because temporary vectors are often used in ParMatvec, none of
* these conditions terminates processing, and the ierr flag
* is informational only.
*--------------------------------------------------------------------*/
hypre_assert( idxstride>0 );
if (num_cols != x_size)
{
ierr = 11;
}
if (num_rows != y_size || num_rows != b_size)
{
ierr = 12;
}
if (num_cols != x_size && (num_rows != y_size || num_rows != b_size))
{
ierr = 13;
}
hypre_assert( hypre_VectorNumVectors(b_local) == num_vectors );
hypre_assert( hypre_VectorNumVectors(y_local) == num_vectors );
if ( num_vectors == 1 )
{
x_tmp = hypre_SeqVectorCreate( num_cols_offd );
}
else
{
hypre_assert( num_vectors > 1 );
x_tmp = hypre_SeqMultiVectorCreate( num_cols_offd, num_vectors );
}
/*---------------------------------------------------------------------
* If there exists no CommPkg for A, a CommPkg is generated using
* equally load balanced partitionings
*--------------------------------------------------------------------*/
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
hypre_assert( num_cols_offd == hypre_ParCSRCommPkgRecvVecStart(comm_pkg, hypre_ParCSRCommPkgNumRecvs(comm_pkg)) );
hypre_assert( hypre_ParCSRCommPkgSendMapStart(comm_pkg, 0) == 0 );
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] -= hypre_MPI_Wtime();
#endif
HYPRE_Int use_persistent_comm = 0;
#ifdef HYPRE_USING_PERSISTENT_COMM
use_persistent_comm = num_vectors == 1;
// JSP TODO: we can use persistent communication for multi-vectors,
// but then we need different communication handles for different
// num_vectors.
hypre_ParCSRPersistentCommHandle *persistent_comm_handle;
#endif
if (use_persistent_comm)
{
#ifdef HYPRE_USING_PERSISTENT_COMM
persistent_comm_handle = hypre_ParCSRCommPkgGetPersistentCommHandle(1, comm_pkg);
#endif
}
else
{
comm_handle = hypre_CTAlloc(hypre_ParCSRCommHandle*, num_vectors, HYPRE_MEMORY_HOST);
}
/* x_tmp */
#if defined(HYPRE_USING_GPU)
/* for GPU and single vector, alloc persistent memory for x_tmp (in comm_pkg) and reuse */
if (num_vectors == 1)
{
if (!hypre_ParCSRCommPkgTmpData(comm_pkg))
{
#if 1
hypre_ParCSRCommPkgTmpData(comm_pkg) = hypre_TAlloc(HYPRE_Complex, num_cols_offd, HYPRE_MEMORY_DEVICE);
#else
hypre_ParCSRCommPkgTmpData(comm_pkg) = _hypre_TAlloc(HYPRE_Complex, num_cols_offd, hypre_MEMORY_DEVICE);
#endif
}
hypre_VectorData(x_tmp) = hypre_ParCSRCommPkgTmpData(comm_pkg);
hypre_SeqVectorSetDataOwner(x_tmp, 0);
}
#else
if (use_persistent_comm)
{
#ifdef HYPRE_USING_PERSISTENT_COMM
hypre_VectorData(x_tmp) = (HYPRE_Complex *) hypre_ParCSRCommHandleRecvDataBuffer(persistent_comm_handle);
hypre_SeqVectorSetDataOwner(x_tmp, 0);
#endif
}
#endif
hypre_SeqVectorInitialize_v2(x_tmp, HYPRE_MEMORY_DEVICE);
x_tmp_data = hypre_VectorData(x_tmp);
/* x_buff_data */
x_buf_data = hypre_CTAlloc(HYPRE_Complex*, num_vectors, HYPRE_MEMORY_HOST);
for (jv = 0; jv < num_vectors; ++jv)
{
#if defined(HYPRE_USING_GPU)
if (jv == 0)
{
if (!hypre_ParCSRCommPkgBufData(comm_pkg))
{
#if 1
hypre_ParCSRCommPkgBufData(comm_pkg) = hypre_TAlloc(HYPRE_Complex,
hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends),
HYPRE_MEMORY_DEVICE);
#else
hypre_ParCSRCommPkgBufData(comm_pkg) = _hypre_TAlloc(HYPRE_Complex,
hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends),
hypre_MEMORY_DEVICE);
#endif
}
x_buf_data[0] = hypre_ParCSRCommPkgBufData(comm_pkg);
continue;
}
#endif
if (use_persistent_comm)
{
#ifdef HYPRE_USING_PERSISTENT_COMM
x_buf_data[0] = (HYPRE_Complex *) hypre_ParCSRCommHandleSendDataBuffer(persistent_comm_handle);
continue;
#endif
}
x_buf_data[jv] = hypre_TAlloc(HYPRE_Complex,
hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends),
HYPRE_MEMORY_DEVICE);
}
/* The assert is because the following loop only works for 'column'
storage of a multivector. This needs to be fixed to work more generally,
at least for 'row' storage. This in turn, means either change CommPkg so
num_sends is no.zones*no.vectors (not no.zones) or, less dangerously, put
a stride in the logic of CommHandleCreate (stride either from a new arg or
a new variable inside CommPkg). Or put the num_vector iteration inside
CommHandleCreate (perhaps a new multivector variant of it).
*/
hypre_assert( idxstride == 1 );
//hypre_SeqVectorPrefetch(x_local, HYPRE_MEMORY_DEVICE);
/* send_map_elmts on device */
hypre_ParCSRCommPkgCopySendMapElmtsToDevice(comm_pkg);
for (jv = 0; jv < num_vectors; ++jv)
{
HYPRE_Complex *send_data = (HYPRE_Complex *) x_buf_data[jv];
HYPRE_Complex *locl_data = x_local_data + jv * vecstride;
/* if on device, no need to Sync: send_data is on device memory */
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
/* pack send data on device */
HYPRE_THRUST_CALL( gather,
hypre_ParCSRCommPkgDeviceSendMapElmts(comm_pkg),
hypre_ParCSRCommPkgDeviceSendMapElmts(comm_pkg) +
hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends),
locl_data,
send_data );
#elif defined(HYPRE_USING_DEVICE_OPENMP)
/* pack send data on device */
HYPRE_Int i;
HYPRE_Int *device_send_map_elmts = hypre_ParCSRCommPkgDeviceSendMapElmts(comm_pkg);
HYPRE_Int start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, 0);
HYPRE_Int end = hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends);
#pragma omp target teams distribute parallel for private(i) is_device_ptr(send_data, locl_data, device_send_map_elmts)
for (i = start; i < end; i++)
{
send_data[i] = locl_data[device_send_map_elmts[i]];
}
#else
HYPRE_Int i;
/* pack send data on host */
#if defined(HYPRE_USING_OPENMP)
#pragma omp parallel for HYPRE_SMP_SCHEDULE
#endif
for (i = hypre_ParCSRCommPkgSendMapStart(comm_pkg, 0);
i < hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends);
i ++)
{
send_data[i] = locl_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,i)];
}
#endif
}
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] += hypre_MPI_Wtime();
hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] -= hypre_MPI_Wtime();
#endif
/* nonblocking communication starts */
if (use_persistent_comm)
{
#ifdef HYPRE_USING_PERSISTENT_COMM
hypre_ParCSRPersistentCommHandleStart(persistent_comm_handle, HYPRE_MEMORY_DEVICE, x_buf_data[0]);
#endif
}
else
{
for ( jv = 0; jv < num_vectors; ++jv )
{
comm_handle[jv] = hypre_ParCSRCommHandleCreate_v2( 1, comm_pkg, HYPRE_MEMORY_DEVICE, x_buf_data[jv],
HYPRE_MEMORY_DEVICE, &x_tmp_data[jv*num_cols_offd] );
}
}
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] += hypre_MPI_Wtime();
#endif
/* overlapped local computation */
hypre_CSRMatrixMatvecOutOfPlace( alpha, diag, x_local, beta, b_local, y_local, 0 );
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] -= hypre_MPI_Wtime();
#endif
/* nonblocking communication ends */
if (use_persistent_comm)
{
#ifdef HYPRE_USING_PERSISTENT_COMM
hypre_ParCSRPersistentCommHandleWait(persistent_comm_handle, HYPRE_MEMORY_DEVICE, x_tmp_data);
#endif
}
else
{
for ( jv = 0; jv < num_vectors; ++jv )
{
hypre_ParCSRCommHandleDestroy(comm_handle[jv]);
comm_handle[jv] = NULL;
}
hypre_TFree(comm_handle, HYPRE_MEMORY_HOST);
}
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] += hypre_MPI_Wtime();
#endif
/* computation offd part */
if (num_cols_offd)
{
hypre_CSRMatrixMatvec( alpha, offd, x_tmp, 1.0, y_local );
}
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] -= hypre_MPI_Wtime();
#endif
hypre_SeqVectorDestroy(x_tmp); x_tmp = NULL;
if (!use_persistent_comm)
{
for ( jv = 0; jv < num_vectors; ++jv )
{
#if defined(HYPRE_USING_GPU)
if (jv == 0)
{
continue;
}
#endif
hypre_TFree(x_buf_data[jv], HYPRE_MEMORY_DEVICE);
}
hypre_TFree(x_buf_data, HYPRE_MEMORY_HOST);
}
#if defined(HYPRE_USING_GPU)
hypre_SetSyncCudaCompute(sync_stream);
hypre_SyncCudaComputeStream(hypre_handle());
#endif
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] += hypre_MPI_Wtime();
#endif
return ierr;
}
HYPRE_Int
hypre_ParCSRMatrixMatvec( HYPRE_Complex alpha,
hypre_ParCSRMatrix *A,
hypre_ParVector *x,
HYPRE_Complex beta,
hypre_ParVector *y )
{
return hypre_ParCSRMatrixMatvecOutOfPlace(alpha, A, x, beta, y, y);
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixMatvecT
*
* Performs y <- alpha * A^T * x + beta * y
*
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParCSRMatrixMatvecT( HYPRE_Complex alpha,
hypre_ParCSRMatrix *A,
hypre_ParVector *x,
HYPRE_Complex beta,
hypre_ParVector *y )
{
hypre_ParCSRCommHandle **comm_handle;
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(A);
hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(A);
hypre_CSRMatrix *diagT = hypre_ParCSRMatrixDiagT(A);
hypre_CSRMatrix *offdT = hypre_ParCSRMatrixOffdT(A);
hypre_Vector *x_local = hypre_ParVectorLocalVector(x);
hypre_Vector *y_local = hypre_ParVectorLocalVector(y);
hypre_Vector *y_tmp;
HYPRE_BigInt num_rows = hypre_ParCSRMatrixGlobalNumRows(A);
HYPRE_BigInt num_cols = hypre_ParCSRMatrixGlobalNumCols(A);
HYPRE_BigInt x_size = hypre_ParVectorGlobalSize(x);
HYPRE_BigInt y_size = hypre_ParVectorGlobalSize(y);
HYPRE_Int num_vectors = hypre_VectorNumVectors(y_local);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(offd);
HYPRE_Int ierr = 0;
HYPRE_Int num_sends, jv;
HYPRE_Int vecstride = hypre_VectorVectorStride(y_local);
HYPRE_Int idxstride = hypre_VectorIndexStride(y_local);
HYPRE_Complex *y_tmp_data, **y_buf_data;
HYPRE_Complex *y_local_data = hypre_VectorData(y_local);
#if defined(HYPRE_USING_GPU)
HYPRE_Int sync_stream;
hypre_GetSyncCudaCompute(&sync_stream);
hypre_SetSyncCudaCompute(0);
#endif
/*---------------------------------------------------------------------
* Check for size compatibility. MatvecT returns ierr = 1 if
* length of X doesn't equal the number of rows of A,
* ierr = 2 if the length of Y doesn't equal the number of
* columns of A, and ierr = 3 if both are true.
*
* Because temporary vectors are often used in MatvecT, none of
* these conditions terminates processing, and the ierr flag
* is informational only.
*--------------------------------------------------------------------*/
if (num_rows != x_size)
{
ierr = 1;
}
if (num_cols != y_size)
{
ierr = 2;
}
if (num_rows != x_size && num_cols != y_size)
{
ierr = 3;
}
hypre_assert( hypre_VectorNumVectors(x_local) == num_vectors );
hypre_assert( hypre_VectorNumVectors(y_local) == num_vectors );
if ( num_vectors == 1 )
{
y_tmp = hypre_SeqVectorCreate(num_cols_offd);
}
else
{
hypre_assert( num_vectors > 1 );
y_tmp = hypre_SeqMultiVectorCreate(num_cols_offd, num_vectors);
}
/*---------------------------------------------------------------------
* If there exists no CommPkg for A, a CommPkg is generated using
* equally load balanced partitionings
*--------------------------------------------------------------------*/
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
hypre_assert( num_cols_offd == hypre_ParCSRCommPkgRecvVecStart(comm_pkg, hypre_ParCSRCommPkgNumRecvs(comm_pkg)) );
hypre_assert( hypre_ParCSRCommPkgSendMapStart(comm_pkg, 0) == 0 );
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] -= hypre_MPI_Wtime();
#endif
HYPRE_Int use_persistent_comm = 0;
#ifdef HYPRE_USING_PERSISTENT_COMM
use_persistent_comm = num_vectors == 1;
// JSP TODO: we can use persistent communication for multi-vectors,
// but then we need different communication handles for different
// num_vectors.
hypre_ParCSRPersistentCommHandle *persistent_comm_handle;
#endif
if (use_persistent_comm)
{
#ifdef HYPRE_USING_PERSISTENT_COMM
persistent_comm_handle = hypre_ParCSRCommPkgGetPersistentCommHandle(2, comm_pkg);
#endif
}
else
{
comm_handle = hypre_CTAlloc(hypre_ParCSRCommHandle*, num_vectors, HYPRE_MEMORY_HOST);
}
/* y_tmp */
#if defined(HYPRE_USING_GPU)
/* for GPU and single vector, alloc persistent memory for y_tmp (in comm_pkg) and reuse */
if (num_vectors == 1)
{
if (!hypre_ParCSRCommPkgTmpData(comm_pkg))
{
#if 1
hypre_ParCSRCommPkgTmpData(comm_pkg) = hypre_TAlloc(HYPRE_Complex, num_cols_offd, HYPRE_MEMORY_DEVICE);
#else
hypre_ParCSRCommPkgTmpData(comm_pkg) = _hypre_TAlloc(HYPRE_Complex, num_cols_offd, hypre_MEMORY_DEVICE);
#endif
}
hypre_VectorData(y_tmp) = hypre_ParCSRCommPkgTmpData(comm_pkg);
hypre_SeqVectorSetDataOwner(y_tmp, 0);
}
#else
if (use_persistent_comm)
{
#ifdef HYPRE_USING_PERSISTENT_COMM
hypre_VectorData(y_tmp) = (HYPRE_Complex *) hypre_ParCSRCommHandleSendDataBuffer(persistent_comm_handle);
hypre_SeqVectorSetDataOwner(y_tmp, 0);
#endif
}
#endif
hypre_SeqVectorInitialize_v2(y_tmp, HYPRE_MEMORY_DEVICE);
y_tmp_data = hypre_VectorData(y_tmp);
/* y_buf_data */
y_buf_data = hypre_CTAlloc(HYPRE_Complex*, num_vectors, HYPRE_MEMORY_HOST);
for (jv = 0; jv < num_vectors; ++jv)
{
#if defined(HYPRE_USING_GPU)
if (jv == 0)
{
if (!hypre_ParCSRCommPkgBufData(comm_pkg))
{
#if 1
hypre_ParCSRCommPkgBufData(comm_pkg) = hypre_TAlloc(HYPRE_Complex,
hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends),
HYPRE_MEMORY_DEVICE);
#else
hypre_ParCSRCommPkgBufData(comm_pkg) = _hypre_TAlloc(HYPRE_Complex,
hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends),
hypre_MEMORY_DEVICE);
#endif
}
y_buf_data[0] = hypre_ParCSRCommPkgBufData(comm_pkg);
continue;
}
#endif
if (use_persistent_comm)
{
#ifdef HYPRE_USING_PERSISTENT_COMM
y_buf_data[0] = (HYPRE_Complex *) hypre_ParCSRCommHandleRecvDataBuffer(persistent_comm_handle);
continue;
#endif
}
y_buf_data[jv] = hypre_TAlloc(HYPRE_Complex,
hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends),
HYPRE_MEMORY_DEVICE);
}
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] += hypre_MPI_Wtime();
#endif
if (num_cols_offd)
{
if (offdT)
{
// offdT is optional. Used only if it's present
hypre_CSRMatrixMatvec(alpha, offdT, x_local, 0.0, y_tmp);
}
else
{
hypre_CSRMatrixMatvecT(alpha, offd, x_local, 0.0, y_tmp);
}
}
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] -= hypre_MPI_Wtime();
#endif
if (use_persistent_comm)
{
#ifdef HYPRE_USING_PERSISTENT_COMM
hypre_ParCSRPersistentCommHandleStart(persistent_comm_handle, HYPRE_MEMORY_DEVICE, y_tmp_data);
#endif
}
else
{
for ( jv = 0; jv < num_vectors; ++jv )
{
/* this is where we assume multivectors are 'column' storage */
comm_handle[jv] = hypre_ParCSRCommHandleCreate_v2( 2, comm_pkg, HYPRE_MEMORY_DEVICE, &y_tmp_data[jv*num_cols_offd],
HYPRE_MEMORY_DEVICE, y_buf_data[jv] );
}
}
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] += hypre_MPI_Wtime();
#endif
/* overlapped local computation */
if (diagT)
{
// diagT is optional. Used only if it's present.
hypre_CSRMatrixMatvec(alpha, diagT, x_local, beta, y_local);
}
else
{
hypre_CSRMatrixMatvecT(alpha, diag, x_local, beta, y_local);
}
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] -= hypre_MPI_Wtime();
#endif
/* nonblocking communication ends */
if (use_persistent_comm)
{
#ifdef HYPRE_USING_PERSISTENT_COMM
hypre_ParCSRPersistentCommHandleWait(persistent_comm_handle, HYPRE_MEMORY_DEVICE, y_buf_data[0]);
#endif
}
else
{
for ( jv = 0; jv < num_vectors; ++jv )
{
hypre_ParCSRCommHandleDestroy(comm_handle[jv]);
comm_handle[jv] = NULL;
}
hypre_TFree(comm_handle, HYPRE_MEMORY_HOST);
}
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_HALO_EXCHANGE] += hypre_MPI_Wtime();
hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] -= hypre_MPI_Wtime();
#endif
/* The assert is because the following loop only works for 'column'
storage of a multivector. This needs to be fixed to work more generally,
at least for 'row' storage. This in turn, means either change CommPkg so
num_sends is no.zones*no.vectors (not no.zones) or, less dangerously, put
a stride in the logic of CommHandleCreate (stride either from a new arg or
a new variable inside CommPkg). Or put the num_vector iteration inside
CommHandleCreate (perhaps a new multivector variant of it).
*/
hypre_assert( idxstride == 1 );
/* send_map_elmts on device */
hypre_ParCSRCommPkgCopySendMapElmtsToDevice(comm_pkg);
for (jv = 0; jv < num_vectors; ++jv)
{
HYPRE_Complex *recv_data = (HYPRE_Complex *) y_buf_data[jv];
HYPRE_Complex *locl_data = y_local_data + jv * vecstride;
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
/* unpack recv data on device */
if (!hypre_ParCSRCommPkgWorkSpace(comm_pkg))
{
hypre_ParCSRCommPkgWorkSpace(comm_pkg) =
hypre_TAlloc( char,
(2*sizeof(HYPRE_Int)+sizeof(HYPRE_Real)) * hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends),
HYPRE_MEMORY_DEVICE );
}
hypreDevice_GenScatterAdd(locl_data,
hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends),
hypre_ParCSRCommPkgDeviceSendMapElmts(comm_pkg),
recv_data,
hypre_ParCSRCommPkgWorkSpace(comm_pkg));
#elif defined(HYPRE_USING_DEVICE_OPENMP)
HYPRE_Int i, j;
/* unpack recv data on device */
for (i = 0; i < num_sends; i++)
{
HYPRE_Int *device_send_map_elmts = hypre_ParCSRCommPkgDeviceSendMapElmts(comm_pkg);
HYPRE_Int start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
HYPRE_Int end = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1);
#pragma omp target teams distribute parallel for private(j) is_device_ptr(recv_data, locl_data, device_send_map_elmts)
for (j = start; j < end; j++)
{
locl_data[device_send_map_elmts[j]] += recv_data[j];
}
}
#else
HYPRE_Int i;
/* unpack recv data on host, TODO OMP? */
for (i = hypre_ParCSRCommPkgSendMapStart(comm_pkg, 0);
i < hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends);
i ++)
{
locl_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,i)] += recv_data[i];
}
#endif
}
hypre_SeqVectorDestroy(y_tmp); y_tmp = NULL;
if (!use_persistent_comm)
{
for ( jv = 0; jv < num_vectors; ++jv )
{
#if defined(HYPRE_USING_GPU)
if (jv == 0)
{
continue;
}
#endif
hypre_TFree(y_buf_data[jv], HYPRE_MEMORY_DEVICE);
}
hypre_TFree(y_buf_data, HYPRE_MEMORY_HOST);
}
#if defined(HYPRE_USING_GPU)
hypre_SetSyncCudaCompute(sync_stream);
hypre_SyncCudaComputeStream(hypre_handle());
#endif
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_PACK_UNPACK] += hypre_MPI_Wtime();
#endif
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixMatvec_FF
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParCSRMatrixMatvec_FF( HYPRE_Complex alpha,
hypre_ParCSRMatrix *A,
hypre_ParVector *x,
HYPRE_Complex beta,
hypre_ParVector *y,
HYPRE_Int *CF_marker,
HYPRE_Int fpt )
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommHandle *comm_handle;
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(A);
hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(A);
hypre_Vector *x_local = hypre_ParVectorLocalVector(x);
hypre_Vector *y_local = hypre_ParVectorLocalVector(y);
HYPRE_BigInt num_rows = hypre_ParCSRMatrixGlobalNumRows(A);
HYPRE_BigInt num_cols = hypre_ParCSRMatrixGlobalNumCols(A);
hypre_Vector *x_tmp;
HYPRE_BigInt x_size = hypre_ParVectorGlobalSize(x);
HYPRE_BigInt y_size = hypre_ParVectorGlobalSize(y);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(offd);
HYPRE_Int ierr = 0;
HYPRE_Int num_sends, i, j, index, start, num_procs;
HYPRE_Int *int_buf_data = NULL;
HYPRE_Int *CF_marker_offd = NULL;
HYPRE_Complex *x_tmp_data = NULL;
HYPRE_Complex *x_buf_data = NULL;
HYPRE_Complex *x_local_data = hypre_VectorData(x_local);
/*---------------------------------------------------------------------
* Check for size compatibility. ParMatvec returns ierr = 11 if
* length of X doesn't equal the number of columns of A,
* ierr = 12 if the length of Y doesn't equal the number of rows
* of A, and ierr = 13 if both are true.
*
* Because temporary vectors are often used in ParMatvec, none of
* these conditions terminates processing, and the ierr flag
* is informational only.
*--------------------------------------------------------------------*/
hypre_MPI_Comm_size(comm,&num_procs);
if (num_cols != x_size)
ierr = 11;
if (num_rows != y_size)
ierr = 12;
if (num_cols != x_size && num_rows != y_size)
ierr = 13;
if (num_procs > 1)
{
if (num_cols_offd)
{
x_tmp = hypre_SeqVectorCreate( num_cols_offd );
hypre_SeqVectorInitialize(x_tmp);
x_tmp_data = hypre_VectorData(x_tmp);
}
/*---------------------------------------------------------------------
* If there exists no CommPkg for A, a CommPkg is generated using
* equally load balanced partitionings
*--------------------------------------------------------------------*/
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
if (num_sends)
x_buf_data = hypre_CTAlloc(HYPRE_Complex, hypre_ParCSRCommPkgSendMapStart
(comm_pkg, num_sends), HYPRE_MEMORY_HOST);
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
x_buf_data[index++]
= x_local_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle =
hypre_ParCSRCommHandleCreate ( 1, comm_pkg, x_buf_data, x_tmp_data );
}
hypre_CSRMatrixMatvec_FF( alpha, diag, x_local, beta, y_local, CF_marker,
CF_marker, fpt);
if (num_procs > 1)
{
hypre_ParCSRCommHandleDestroy(comm_handle);
comm_handle = NULL;
if (num_sends)
int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart
(comm_pkg, num_sends), HYPRE_MEMORY_HOST);
if (num_cols_offd) CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST);
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle =
hypre_ParCSRCommHandleCreate(11,comm_pkg,int_buf_data,CF_marker_offd );
hypre_ParCSRCommHandleDestroy(comm_handle);
comm_handle = NULL;
if (num_cols_offd) hypre_CSRMatrixMatvec_FF( alpha, offd, x_tmp, 1.0, y_local,
CF_marker, CF_marker_offd, fpt);
hypre_SeqVectorDestroy(x_tmp);
x_tmp = NULL;
hypre_TFree(x_buf_data, HYPRE_MEMORY_HOST);
hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST);
hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST);
}
return ierr;
}
|
zkboo_prove.c | /*
Name: zkboo_prove.c
Author: Tan Teik Guan
Description: Prove function for ZKBoo for baseline comparison. Modified from MPC_SHA256.c
*/
/*
============================================================================
Name : MPC_SHA256.c
Author : Sobuno
Version : 0.1
Description : MPC SHA256 for one block only
============================================================================
*/
#include <stdbool.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#include "zkboo_shared.h"
#include "omp.h"
#define CH(e,f,g) ((e & f) ^ ((~e) & g))
int totalRandom = 0;
int totalSha = 0;
int totalSS = 0;
int totalHash = 0;
int NUM_ROUNDS = 100;
uint32_t rand32() {
uint32_t x;
x = rand() & 0xff;
x |= (rand() & 0xff) << 8;
x |= (rand() & 0xff) << 16;
x |= (rand() & 0xff) << 24;
return x;
}
void printbits(uint32_t n) {
if (n) {
printbits(n >> 1);
printf("%d", n & 1);
}
}
void mpc_XOR(uint32_t x[3], uint32_t y[3], uint32_t z[3]) {
z[0] = x[0] ^ y[0];
z[1] = x[1] ^ y[1];
z[2] = x[2] ^ y[2];
}
void mpc_AND(uint32_t x[3], uint32_t y[3], uint32_t z[3], unsigned char *randomness[3], int* randCount, View views[3], int* countY) {
uint32_t r[3] = { getRandom32(randomness[0], *randCount), getRandom32(randomness[1], *randCount), getRandom32(randomness[2], *randCount)};
*randCount += 4;
uint32_t t[3] = { 0 };
t[0] = (x[0] & y[1]) ^ (x[1] & y[0]) ^ (x[0] & y[0]) ^ r[0] ^ r[1];
t[1] = (x[1] & y[2]) ^ (x[2] & y[1]) ^ (x[1] & y[1]) ^ r[1] ^ r[2];
t[2] = (x[2] & y[0]) ^ (x[0] & y[2]) ^ (x[2] & y[2]) ^ r[2] ^ r[0];
z[0] = t[0];
z[1] = t[1];
z[2] = t[2];
views[0].y[*countY] = z[0];
views[1].y[*countY] = z[1];
views[2].y[*countY] = z[2];
(*countY)++;
}
void mpc_NEGATE(uint32_t x[3], uint32_t z[3]) {
z[0] = ~x[0];
z[1] = ~x[1];
z[2] = ~x[2];
}
void mpc_ADD(uint32_t x[3], uint32_t y[3], uint32_t z[3], unsigned char *randomness[3], int* randCount, View views[3], int* countY) {
uint32_t c[3] = { 0 };
uint32_t r[3] = { getRandom32(randomness[0], *randCount), getRandom32(randomness[1], *randCount), getRandom32(randomness[2], *randCount)};
*randCount += 4;
uint8_t a[3], b[3];
uint8_t t;
for(int i=0;i<31;i++)
{
a[0]=GETBIT(x[0]^c[0],i);
a[1]=GETBIT(x[1]^c[1],i);
a[2]=GETBIT(x[2]^c[2],i);
b[0]=GETBIT(y[0]^c[0],i);
b[1]=GETBIT(y[1]^c[1],i);
b[2]=GETBIT(y[2]^c[2],i);
t = (a[0]&b[1]) ^ (a[1]&b[0]) ^ GETBIT(r[1],i);
SETBIT(c[0],i+1, t ^ (a[0]&b[0]) ^ GETBIT(c[0],i) ^ GETBIT(r[0],i));
t = (a[1]&b[2]) ^ (a[2]&b[1]) ^ GETBIT(r[2],i);
SETBIT(c[1],i+1, t ^ (a[1]&b[1]) ^ GETBIT(c[1],i) ^ GETBIT(r[1],i));
t = (a[2]&b[0]) ^ (a[0]&b[2]) ^ GETBIT(r[0],i);
SETBIT(c[2],i+1, t ^ (a[2]&b[2]) ^ GETBIT(c[2],i) ^ GETBIT(r[2],i));
}
z[0]=x[0]^y[0]^c[0];
z[1]=x[1]^y[1]^c[1];
z[2]=x[2]^y[2]^c[2];
views[0].y[*countY] = c[0];
views[1].y[*countY] = c[1];
views[2].y[*countY] = c[2];
*countY += 1;
}
void mpc_ADDK(uint32_t x[3], uint32_t y, uint32_t z[3], unsigned char *randomness[3], int* randCount, View views[3], int* countY) {
uint32_t c[3] = { 0 };
uint32_t r[3] = { getRandom32(randomness[0], *randCount), getRandom32(randomness[1], *randCount), getRandom32(randomness[2], *randCount)};
*randCount += 4;
uint8_t a[3], b[3];
uint8_t t;
for(int i=0;i<31;i++)
{
a[0]=GETBIT(x[0]^c[0],i);
a[1]=GETBIT(x[1]^c[1],i);
a[2]=GETBIT(x[2]^c[2],i);
b[0]=GETBIT(y^c[0],i);
b[1]=GETBIT(y^c[1],i);
b[2]=GETBIT(y^c[2],i);
t = (a[0]&b[1]) ^ (a[1]&b[0]) ^ GETBIT(r[1],i);
SETBIT(c[0],i+1, t ^ (a[0]&b[0]) ^ GETBIT(c[0],i) ^ GETBIT(r[0],i));
t = (a[1]&b[2]) ^ (a[2]&b[1]) ^ GETBIT(r[2],i);
SETBIT(c[1],i+1, t ^ (a[1]&b[1]) ^ GETBIT(c[1],i) ^ GETBIT(r[1],i));
t = (a[2]&b[0]) ^ (a[0]&b[2]) ^ GETBIT(r[0],i);
SETBIT(c[2],i+1, t ^ (a[2]&b[2]) ^ GETBIT(c[2],i) ^ GETBIT(r[2],i));
}
z[0]=x[0]^y^c[0];
z[1]=x[1]^y^c[1];
z[2]=x[2]^y^c[2];
views[0].y[*countY] = c[0];
views[1].y[*countY] = c[1];
views[2].y[*countY] = c[2];
*countY += 1;
}
int sha256(unsigned char* result, unsigned char* input, int numBits) {
uint32_t hA[8] = { 0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a,
0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19 };
if (numBits > 447) {
printf("Input too long, aborting!");
return -1;
}
int chars = numBits >> 3;
unsigned char* chunk = calloc(64, 1); //512 bits
memcpy(chunk, input, chars);
chunk[chars] = 0x80;
//Last 8 chars used for storing length of input without padding, in big-endian.
//Since we only care for one block, we are safe with just using last 9 bits and 0'ing the rest
//chunk[60] = numBits >> 24;
//chunk[61] = numBits >> 16;
chunk[62] = numBits >> 8;
chunk[63] = numBits;
uint32_t w[64];
int i;
for (i = 0; i < 16; i++) {
w[i] = (chunk[i * 4] << 24) | (chunk[i * 4 + 1] << 16)
| (chunk[i * 4 + 2] << 8) | chunk[i * 4 + 3];
}
uint32_t s0, s1;
for (i = 16; i < 64; i++) {
s0 = RIGHTROTATE(w[i - 15], 7) ^ RIGHTROTATE(w[i - 15], 18)
^ (w[i - 15] >> 3);
s1 = RIGHTROTATE(w[i - 2], 17) ^ RIGHTROTATE(w[i - 2], 19)
^ (w[i - 2] >> 10);
w[i] = w[i - 16] + s0 + w[i - 7] + s1;
}
uint32_t a, b, c, d, e, f, g, h, temp1, temp2, maj;
a = hA[0];
b = hA[1];
c = hA[2];
d = hA[3];
e = hA[4];
f = hA[5];
g = hA[6];
h = hA[7];
for (i = 0; i < 64; i++) {
s1 = RIGHTROTATE(e,6) ^ RIGHTROTATE(e, 11) ^ RIGHTROTATE(e, 25);
temp1 = h + s1 + CH(e, f, g) + k[i] + w[i];
s0 = RIGHTROTATE(a,2) ^ RIGHTROTATE(a, 13) ^ RIGHTROTATE(a, 22);
maj = (a & (b ^ c)) ^ (b & c);
temp2 = s0 + maj;
h = g;
g = f;
f = e;
e = d + temp1;
d = c;
c = b;
b = a;
a = temp1 + temp2;
}
hA[0] += a;
hA[1] += b;
hA[2] += c;
hA[3] += d;
hA[4] += e;
hA[5] += f;
hA[6] += g;
hA[7] += h;
for (i = 0; i < 8; i++) {
result[i * 4] = (hA[i] >> 24);
result[i * 4 + 1] = (hA[i] >> 16);
result[i * 4 + 2] = (hA[i] >> 8);
result[i * 4 + 3] = hA[i];
}
return 0;
}
void mpc_RIGHTROTATE(uint32_t x[], int i, uint32_t z[]) {
z[0] = RIGHTROTATE(x[0], i);
z[1] = RIGHTROTATE(x[1], i);
z[2] = RIGHTROTATE(x[2], i);
}
void mpc_RIGHTSHIFT(uint32_t x[3], int i, uint32_t z[3]) {
z[0] = x[0] >> i;
z[1] = x[1] >> i;
z[2] = x[2] >> i;
}
void mpc_MAJ(uint32_t a[], uint32_t b[3], uint32_t c[3], uint32_t z[3], unsigned char *randomness[3], int* randCount, View views[3], int* countY) {
uint32_t t0[3];
uint32_t t1[3];
mpc_XOR(a, b, t0);
mpc_XOR(a, c, t1);
mpc_AND(t0, t1, z, randomness, randCount, views, countY);
mpc_XOR(z, a, z);
}
void mpc_CH(uint32_t e[], uint32_t f[3], uint32_t g[3], uint32_t z[3], unsigned char *randomness[3], int* randCount, View views[3], int* countY) {
uint32_t t0[3];
//e & (f^g) ^ g
mpc_XOR(f,g,t0);
mpc_AND(e,t0,t0, randomness, randCount, views, countY);
mpc_XOR(t0,g,z);
}
int mpc_sha256(unsigned char* results[3], unsigned char* inputs[3], unsigned char * addMsg, int numBits, unsigned char *randomness[3], int* randCount, View views[3], int* countY) {
if (numBits > 447) {
printf("Input too long, aborting!");
return -1;
}
int chars = numBits >> 3;
unsigned char* chunks[3];
uint32_t w[64][3];
uint32_t msg[MSG_SIZE/4];
if (addMsg)
{
for (int j=0;j<(numBits/32);j++)
{
msg[j] = (addMsg[j*4]<<24) | (addMsg[j*4+1]<<16) | (addMsg[j*4+2] << 8) | (addMsg[j*4+3]);
}
}
for (int i =0; i<64;i++)
{
w[i][0]=w[i][1]=w[i][2] = 0;
}
for (int i = 0; i < 3; i++) {
chunks[i] = calloc(64, 1); //512 bits
memcpy(chunks[i], inputs[i], chars);
chunks[i][chars] = 0x80;
//Last 8 chars used for storing length of input without padding, in big-endian.
//Since we only care for one block, we are safe with just using last 9 bits and 0'ing the rest
//chunk[60] = numBits >> 24;
//chunk[61] = numBits >> 16;
chunks[i][62] = numBits >> 8;
chunks[i][63] = numBits;
memcpy(views[i].x, chunks[i], 64);
for (int j = 0; j < 16; j++) {
w[j][i] = (chunks[i][j * 4] << 24) | (chunks[i][j * 4 + 1] << 16)
| (chunks[i][j * 4 + 2] << 8) | chunks[i][j * 4 + 3];
}
free(chunks[i]);
}
if (addMsg)
{
for (int j=0;j<(MSG_SIZE/4);j++)
{
mpc_ADDK(w[j], msg[j], w[j], randomness, randCount, views, countY);
}
}
uint32_t s0[3], s1[3];
uint32_t t0[3], t1[3];
for (int j = 16; j < 64; j++) {
//s0[i] = RIGHTROTATE(w[i][j-15],7) ^ RIGHTROTATE(w[i][j-15],18) ^ (w[i][j-15] >> 3);
mpc_RIGHTROTATE(w[j-15], 7, t0);
mpc_RIGHTROTATE(w[j-15], 18, t1);
mpc_XOR(t0, t1, t0);
mpc_RIGHTSHIFT(w[j-15], 3, t1);
mpc_XOR(t0, t1, s0);
//s1[i] = RIGHTROTATE(w[i][j-2],17) ^ RIGHTROTATE(w[i][j-2],19) ^ (w[i][j-2] >> 10);
mpc_RIGHTROTATE(w[j-2], 17, t0);
mpc_RIGHTROTATE(w[j-2], 19, t1);
mpc_XOR(t0, t1, t0);
mpc_RIGHTSHIFT(w[j-2], 10, t1);
mpc_XOR(t0, t1, s1);
//w[i][j] = w[i][j-16]+s0[i]+w[i][j-7]+s1[i];
mpc_ADD(w[j-16], s0, t1, randomness, randCount, views, countY);
mpc_ADD(w[j-7], t1, t1, randomness, randCount, views, countY);
mpc_ADD(t1, s1, w[j], randomness, randCount, views, countY);
}
uint32_t a[3] = { hA[0],hA[0],hA[0] };
uint32_t b[3] = { hA[1],hA[1],hA[1] };
uint32_t c[3] = { hA[2],hA[2],hA[2] };
uint32_t d[3] = { hA[3],hA[3],hA[3] };
uint32_t e[3] = { hA[4],hA[4],hA[4] };
uint32_t f[3] = { hA[5],hA[5],hA[5] };
uint32_t g[3] = { hA[6],hA[6],hA[6] };
uint32_t h[3] = { hA[7],hA[7],hA[7] };
uint32_t temp1[3], temp2[3], maj[3];
for (int i = 0; i < 64; i++) {
//s1 = RIGHTROTATE(e,6) ^ RIGHTROTATE(e,11) ^ RIGHTROTATE(e,25);
mpc_RIGHTROTATE(e, 6, t0);
mpc_RIGHTROTATE(e, 11, t1);
mpc_XOR(t0, t1, t0);
mpc_RIGHTROTATE(e, 25, t1);
mpc_XOR(t0, t1, s1);
//ch = (e & f) ^ ((~e) & g);
//temp1 = h + s1 + CH(e,f,g) + k[i]+w[i];
//t0 = h + s1
mpc_ADD(h, s1, t0, randomness, randCount, views, countY);
mpc_CH(e, f, g, t1, randomness, randCount, views, countY);
//t1 = t0 + t1 (h+s1+ch)
mpc_ADD(t0, t1, t1, randomness, randCount, views, countY);
mpc_ADDK(t1, k[i], t1, randomness, randCount, views, countY);
mpc_ADD(t1, w[i], temp1, randomness, randCount, views, countY);
//s0 = RIGHTROTATE(a,2) ^ RIGHTROTATE(a,13) ^ RIGHTROTATE(a,22);
mpc_RIGHTROTATE(a, 2, t0);
mpc_RIGHTROTATE(a, 13, t1);
mpc_XOR(t0, t1, t0);
mpc_RIGHTROTATE(a, 22, t1);
mpc_XOR(t0, t1, s0);
mpc_MAJ(a, b, c, maj, randomness, randCount, views, countY);
//temp2 = s0+maj;
mpc_ADD(s0, maj, temp2, randomness, randCount, views, countY);
memcpy(h, g, sizeof(uint32_t) * 3);
memcpy(g, f, sizeof(uint32_t) * 3);
memcpy(f, e, sizeof(uint32_t) * 3);
//e = d+temp1;
mpc_ADD(d, temp1, e, randomness, randCount, views, countY);
memcpy(d, c, sizeof(uint32_t) * 3);
memcpy(c, b, sizeof(uint32_t) * 3);
memcpy(b, a, sizeof(uint32_t) * 3);
//a = temp1+temp2;
mpc_ADD(temp1, temp2, a, randomness, randCount, views, countY);
}
uint32_t hHa[8][3] = { { hA[0],hA[0],hA[0] }, { hA[1],hA[1],hA[1] }, { hA[2],hA[2],hA[2] }, { hA[3],hA[3],hA[3] },
{ hA[4],hA[4],hA[4] }, { hA[5],hA[5],hA[5] }, { hA[6],hA[6],hA[6] }, { hA[7],hA[7],hA[7] } };
mpc_ADD(hHa[0], a, hHa[0], randomness, randCount, views, countY);
mpc_ADD(hHa[1], b, hHa[1], randomness, randCount, views, countY);
mpc_ADD(hHa[2], c, hHa[2], randomness, randCount, views, countY);
mpc_ADD(hHa[3], d, hHa[3], randomness, randCount, views, countY);
mpc_ADD(hHa[4], e, hHa[4], randomness, randCount, views, countY);
mpc_ADD(hHa[5], f, hHa[5], randomness, randCount, views, countY);
mpc_ADD(hHa[6], g, hHa[6], randomness, randCount, views, countY);
mpc_ADD(hHa[7], h, hHa[7], randomness, randCount, views, countY);
for (int i = 0; i < 8; i++) {
mpc_RIGHTSHIFT(hHa[i], 24, t0);
results[0][i * 4] = t0[0];
results[1][i * 4] = t0[1];
results[2][i * 4] = t0[2];
mpc_RIGHTSHIFT(hHa[i], 16, t0);
results[0][i * 4 + 1] = t0[0];
results[1][i * 4 + 1] = t0[1];
results[2][i * 4 + 1] = t0[2];
mpc_RIGHTSHIFT(hHa[i], 8, t0);
results[0][i * 4 + 2] = t0[0];
results[1][i * 4 + 2] = t0[1];
results[2][i * 4 + 2] = t0[2];
results[0][i * 4 + 3] = hHa[i][0];
results[1][i * 4 + 3] = hHa[i][1];
results[2][i * 4 + 3] = hHa[i][2];
}
return 0;
}
int writeToFile(char filename[], void* data, int size, int numItems) {
FILE *file;
file = fopen(filename, "wb");
if (!file) {
printf("Unable to open file!");
return 1;
}
fwrite(data, size, numItems, file);
fclose(file);
return 0;
}
int secretShare(unsigned char* input, int numBytes, unsigned char output[3][numBytes]) {
if(RAND_bytes(output[0], numBytes) != 1) {
printf("RAND_bytes failed crypto, aborting\n");
}
if(RAND_bytes(output[1], numBytes) != 1) {
printf("RAND_bytes failed crypto, aborting\n");
}
for (int j = 0; j < numBytes; j++) {
output[2][j] = input[j] ^ output[0][j] ^ output[1][j];
}
return 0;
}
a commit(int numBytes, unsigned char shares[3][numBytes], unsigned char *randomness[3], unsigned char rs[3][4], View views[3]) {
unsigned char* inputs[3];
inputs[0] = shares[0];
inputs[1] = shares[1];
inputs[2] = shares[2];
unsigned char* hashes[3];
hashes[0] = malloc(32);
hashes[1] = malloc(32);
hashes[2] = malloc(32);
int* randCount = calloc(1, sizeof(int));
int* countY = calloc(1, sizeof(int));
*countY = 0;
mpc_sha256(hashes, inputs, NULL, numBytes * 8, randomness, randCount, views, countY);
//Explicitly add y to view
free(randCount);
for(int i = 0; i<8; i++) {
views[0].y[*countY] = (hashes[0][i * 4] << 24) | (hashes[0][i * 4 + 1] << 16)
| (hashes[0][i * 4 + 2] << 8) | hashes[0][i * 4 + 3];
views[1].y[*countY] = (hashes[1][i * 4] << 24) | (hashes[1][i * 4 + 1] << 16)
| (hashes[1][i * 4 + 2] << 8) | hashes[1][i * 4 + 3];
views[2].y[*countY] = (hashes[2][i * 4] << 24) | (hashes[2][i * 4 + 1] << 16)
| (hashes[2][i * 4 + 2] << 8) | hashes[2][i * 4 + 3];
*countY += 1;
}
free(countY);
free(hashes[0]);
free(hashes[1]);
free(hashes[2]);
uint32_t* result1 = malloc(32);
output(views[0], result1);
uint32_t* result2 = malloc(32);
output(views[1], result2);
uint32_t* result3 = malloc(32);
output(views[2], result3);
a a;
memcpy(a.yp[0], result1, 32);
memcpy(a.yp[1], result2, 32);
memcpy(a.yp[2], result3, 32);
free(result1);
free(result2);
free(result3);
return a;
}
z prove(int e, unsigned char keys[3][16], unsigned char rs[3][4], View views[3]) {
z z;
memcpy(z.ke, keys[e], 16);
memcpy(z.ke1, keys[(e + 1) % 3], 16);
z.ve = views[e];
z.ve1 = views[(e + 1) % 3];
memcpy(z.re, rs[e],4);
memcpy(z.re1, rs[(e + 1) % 3],4);
return z;
}
int main(int argc, char * argv[]) {
setbuf(stdout, NULL);
srand((unsigned) time(NULL));
init_EVP();
openmp_thread_setup();
char CHALLENGE[MSG_SIZE+1]; //55 is max length as we only support 447 bits = 55.875 bytes
//
if (argc != 3)
{
printf("Usage: %s <number of rounds (e.g. 20, 40, 60, 80, 100)> <challenge (Max %d char)>\n",argv[0],MSG_SIZE);
return -1;
}
NUM_ROUNDS = atoi(argv[1]);
unsigned char garbage[4];
if(RAND_bytes(garbage, 4) != 1) {
printf("RAND_bytes failed crypto, aborting\n");
return 0;
}
memset(CHALLENGE,0,MSG_SIZE+1);
strncpy(CHALLENGE,argv[2],MSG_SIZE);
int i = strlen(CHALLENGE);
printf("Challenge length: %d\n", i);
printf("Iterations of SHA: %d\n", NUM_ROUNDS);
unsigned char input[MSG_SIZE];
memset(input,0,sizeof(input));
for(int j = 0; j<i; j++) {
input[j] = CHALLENGE[j];
}
struct timeval begin, delta;
gettimeofday(&begin,NULL);
unsigned char rs[NUM_ROUNDS][3][4];
unsigned char keys[NUM_ROUNDS][3][16];
a as[NUM_ROUNDS];
View localViews[NUM_ROUNDS][3];
int totalCrypto = 0;
z* zs;
for(int loops=0;loops<100;loops++)
{
//Generating keys
if(RAND_bytes((unsigned char *) keys, NUM_ROUNDS*3*16) != 1) {
printf("RAND_bytes failed crypto, aborting\n");
return 0;
}
if(RAND_bytes((unsigned char *)rs, NUM_ROUNDS*3*4) != 1) {
printf("RAND_bytes failed crypto, aborting\n");
return 0;
}
//Sharing secrets
unsigned char shares[NUM_ROUNDS][3][i];
if(RAND_bytes((unsigned char *)shares, NUM_ROUNDS*3*i) != 1) {
printf("RAND_bytes failed crypto, aborting\n");
return 0;
}
#pragma omp parallel for
for(int k=0; k<NUM_ROUNDS; k++) {
for (int j = 0; j < i; j++) {
shares[k][2][j] = input[j] ^ shares[k][0][j] ^ shares[k][1][j];
}
}
//Generating randomness
unsigned char *randomness[NUM_ROUNDS][3];
#pragma omp parallel for
for(int k=0; k<(NUM_ROUNDS); k++) {
for(int j = 0; j<3; j++) {
randomness[k][j] = malloc(2912*sizeof(unsigned char));
getAllRandomness(keys[k][j], randomness[k][j]);
}
}
//Running MPC-SHA2
#pragma omp parallel for
for(int k=0; k<NUM_ROUNDS; k++) {
as[k] = commit(i, shares[k], randomness[k], rs[k], localViews[k]);
for(int j=0; j<3; j++) {
free(randomness[k][j]);
}
}
//Committing
#pragma omp parallel for
for(int k=0; k<(NUM_ROUNDS); k++) {
unsigned char hash1[SHA256_DIGEST_LENGTH];
memset(hash1,0,sizeof(hash1));
H(keys[k][0], localViews[k][0], rs[k][0], hash1);
memcpy(as[k].h[0], &hash1, 32);
H(keys[k][1], localViews[k][1], rs[k][1], hash1);
memcpy(as[k].h[1], &hash1, 32);
H(keys[k][2], localViews[k][2], rs[k][2], hash1);
memcpy(as[k].h[2], &hash1, 32);
}
//Generating E
int es[NUM_ROUNDS];
uint32_t finalHash[8];
for (int j = 0; j < 8; j++) {
finalHash[j] = as[0].yp[0][j]^as[0].yp[1][j]^as[0].yp[2][j];
}
printf("output H(Challenge) = ");
for (int i = 0; i< 8;i++)
{
printf("%02X",finalHash[i]);
}
printf("\n");
H3(finalHash, as, NUM_ROUNDS, es);
//Packing Z
zs = malloc(sizeof(z)*NUM_ROUNDS);
#pragma omp parallel for
for(int i = 0; i<(NUM_ROUNDS); i++) {
zs[i] = prove(es[i],keys[i],rs[i], localViews[i]);
}
}
gettimeofday(&delta,NULL);
unsigned long inMilli = (delta.tv_sec - begin.tv_sec)*1000000 + (delta.tv_usec - begin.tv_usec);
inMilli /= 1000;
//Writing to file
FILE *file;
char outputFile[3*sizeof(int) + 8];
sprintf(outputFile, "out%i.bin", NUM_ROUNDS);
file = fopen(outputFile, "wb");
if (!file) {
printf("Unable to open file!");
return 1;
}
fwrite(as, sizeof(a), NUM_ROUNDS, file);
fwrite(zs, sizeof(z), NUM_ROUNDS, file);
fclose(file);
free(zs);
printf("Total time taken for 100 loops: %d mili-seconds\n",inMilli);
printf("Time taken for 1 loops: %d mili-seconds\n",inMilli/100);
printf("\n");
printf("Proof output to file %s", outputFile);
openmp_thread_cleanup();
cleanup_EVP();
return EXIT_SUCCESS;
}
|
wvt_relax.c | #include "globals.h"
#include "tree.h"
#include "kernel.h"
#ifdef OUTPUT_DIAGNOSTICS
#include "diagnostics.h"
#endif
#include "redistribution.h"
#define WVTNNGB DESNNGB
void writeStepFile ( int it );
/* Settle SPH particle with weighted Voronoi tesselations (Diehl+ 2012).
* Here hsml is not the SPH smoothing length, but is related to a local
* metric defined ultimately by the density model.
* Relaxation is done in units of the boxsize, hence the box volume is 1
* Return code true means that rerun could be usefull */
void Regularise_sph_particles()
{
const int nPart = Param.Npart;
const double boxsize[3] = { Problem.Boxsize[0], Problem.Boxsize[1],
Problem.Boxsize[2]
};
const double boxhalf[3] = { boxsize[0] / 2, boxsize[1] / 2, boxsize[2] / 2, };
const double median_boxsize = fmax ( boxsize[1], boxsize[2] ); // boxsize[0] is largest
printf ( "Starting iterative SPH regularisation \n"
" Maxiter=%d, MpsFraction=%g StepReduction=%g LimitMps=(%g,%g,%g,%g)\n\n",
Param.Maxiter, Param.MpsFraction, Param.StepReduction, Param.LimitMps[0], Param.LimitMps[1], Param.LimitMps[2], Param.LimitMps[3] );
fflush ( stdout );
float *hsml = NULL;
size_t nBytes = nPart * sizeof ( *hsml );
hsml = Malloc ( nBytes );
float *delta[3] = { NULL };
nBytes = nPart * sizeof ( **delta );
delta[0] = Malloc ( nBytes );
delta[1] = Malloc ( nBytes );
delta[2] = Malloc ( nBytes );
int it = 0;
#ifdef TWO_DIM
double npart_1D = pow ( nPart, 1.0 / 2.0 );
#else
double npart_1D = pow ( nPart, 1.0 / 3.0 );
#endif
double step = 1.0 / ( npart_1D * Param.MpsFraction );
double errLast = DBL_MAX;
double errDiff = DBL_MAX;
const double volume = Problem.Boxsize[0] * Problem.Boxsize[1] * Problem.Boxsize[2];
const double rho_mean = nPart * Problem.Mpart / volume;
double last_cnt = DBL_MAX;
#ifdef OUTPUT_DIAGNOSTICS
initIterationDiagnostics();
#endif
for ( ;; ) {
Find_sph_quantities();
if ( it++ > Param.Maxiter ) {
printf ( "Max iterations reached, result might not be converged properly.\n" );
break;
}
#ifdef SAVE_WVT_STEPS
writeStepFile ( it );
#endif
resetRedistributionFlags();
if ( it <= Param.LastMoveStep && it % Param.RedistributionFrequency == 0 ) {
const int firstIt = 1;
const double amplitude = Param.MoveFractionMax;
const double decay = log ( Param.MoveFractionMax / Param.MoveFractionMin ) / ( Param.LastMoveStep / Param.RedistributionFrequency - firstIt );
const double moveFraction = amplitude * exp ( -decay * ( it / Param.RedistributionFrequency - firstIt ) );
const int movePart = Param.Npart * moveFraction;
const int maxProbes = Param.Npart * Param.ProbesFraction * moveFraction / Param.MoveFractionMax;
redistributeParticles ( movePart, maxProbes );
Find_sph_quantities();
}
double errMin = DBL_MAX, errMax = 0, errMean = 0, errSigma = 0.;
#pragma omp parallel for reduction(+:errMean,errSigma) reduction(max:errMax) reduction(min:errMin)
for ( int ipart = 0; ipart < nPart; ipart++ ) { // get error
const float err = relativeDensityError ( ipart );
errMin = fmin ( err, errMin );
errMax = fmax ( err, errMax );
errMean += err;
errSigma += err * err;
}
errMean /= nPart;
errSigma = sqrt ( errSigma / nPart - p2 ( errMean ) );
errDiff = ( errLast - errMean ) / errMean;
printf ( " #%02d: Err min=%3g max=%3g mean=%03g sigma=%03g diff=%03g step=%g\n", it, errMin, errMax, errMean, errSigma, errDiff, step );
errLast = errMean;
double vSphSum = 0; // total volume defined by hsml
double max_hsml = 0;
#pragma omp parallel for shared(hsml) reduction(+:vSphSum,max_hsml)
for ( int ipart = 0; ipart < nPart; ipart++ ) { // find hsml
float rho = ( *Density_Func_Ptr ) ( ipart, Param.BiasCorrection );
SphP[ipart].Rho_Model = rho;
#ifdef TWO_DIM
hsml[ipart] = pow ( WVTNNGB * Problem.Mpart / rho / pi, 1. / 2. );
vSphSum += p2 ( hsml[ipart] );
#else
hsml[ipart] = pow ( WVTNNGB * Problem.Mpart / rho / fourpithird, 1. / 3. );
vSphSum += p3 ( hsml[ipart] );
#endif
max_hsml = max ( max_hsml, hsml[ipart] );
}
#ifdef TWO_DIM
float norm_hsml = pow ( WVTNNGB / vSphSum / pi , 1.0 / 2.0 ) * median_boxsize;
#else
float norm_hsml = pow ( WVTNNGB / vSphSum / fourpithird , 1.0 / 3.0 ) * median_boxsize;
#endif
#pragma omp parallel for
for ( int ipart = 0; ipart < nPart; ipart++ ) {
hsml[ipart] *= norm_hsml;
}
#pragma omp parallel for shared(delta, hsml, P) schedule(dynamic, nPart/Omp.NThreads/256)
for ( int ipart = 0; ipart < nPart; ipart++ ) {
delta[0][ipart] = delta[1][ipart] = delta[2][ipart] = 0;
int ngblist[NGBMAX] = { 0 };
int ngbcnt = Find_ngb ( ipart, hsml[ipart], ngblist );
for ( int i = 0; i < ngbcnt; i++ ) { // neighbour loop
int jpart = ngblist[i];
if ( ipart == jpart ) {
continue;
}
double d[3];
double r2 = 0.0;
for ( int p = 0; p < 3; ++p ) {
d[p] = P[ipart].Pos[p] - P[jpart].Pos[p];
if ( Problem.Periodic[p] ) {
while ( d[p] > boxhalf[p] ) { // find closest image
d[p] -= boxsize[p];
}
while ( d[p] < -boxhalf[p] ) {
d[p] += boxsize[p];
}
}
r2 += d[p] * d[p];
}
Assert ( r2 > 0,
"Found two particles %d & %d at the same location. "
"Consider increasing the space between your density field"
" and the box boundaries.", ipart, jpart );
float h = 0.5 * ( hsml[ipart] + hsml[jpart] );
if ( r2 > p2 ( h ) ) {
continue ;
}
float r = sqrt ( r2 );
// * p3(h) is a for legacy reasons - at some point retune the code to work without it
// norm_hsml also plays a minor role with that
#ifdef TWO_DIM
double kernel_fac = p2 ( h );
#else
double kernel_fac = p3 ( h );
#endif
float wk = sph_kernel ( r, h ) * kernel_fac;
delta[0][ipart] += step * h * wk * d[0] / r;
delta[1][ipart] += step * h * wk * d[1] / r;
#ifndef TWO_DIM
delta[2][ipart] += step * h * wk * d[2] / r;
#endif
}
}
int cnt = 0, cnt1 = 0, cnt2 = 0, cnt3 = 0;
#pragma omp parallel for shared(delta,P) reduction(+:cnt,cnt1,cnt2,cnt3)
for ( int ipart = 0; ipart < nPart; ipart++ ) { // move particles
const float d = sqrt ( p2 ( delta[0][ipart] ) + p2 ( delta[1][ipart] ) + p2 ( delta[2][ipart] ) );
const float h = SphP[ipart].Hsml;
#ifdef TWO_DIM
float d_mps = pow ( pi * p2 ( h ) / DESNNGB, 1.0 / 3.0 );
#else
float d_mps = pow ( fourpithird * p3 ( h ) / DESNNGB, 1.0 / 3.0 );
#endif
if ( d > 1 * d_mps ) {
++cnt;
}
if ( d > 0.1 * d_mps ) {
++cnt1;
}
if ( d > 0.01 * d_mps ) {
++cnt2;
}
if ( d > 0.001 * d_mps ) {
++cnt3;
}
P[ipart].Pos[0] += delta[0][ipart]; // push !
P[ipart].Pos[1] += delta[1][ipart];
P[ipart].Pos[2] += delta[2][ipart];
while ( P[ipart].Pos[0] < 0 ) { // keep it in the box
P[ipart].Pos[0] += boxsize[0];
}
while ( P[ipart].Pos[0] > boxsize[0] ) {
P[ipart].Pos[0] -= boxsize[0];
}
while ( P[ipart].Pos[1] < 0 ) {
P[ipart].Pos[1] += boxsize[1];
}
while ( P[ipart].Pos[1] > boxsize[1] ) {
P[ipart].Pos[1] -= boxsize[1];
}
while ( P[ipart].Pos[2] < 0 ) {
P[ipart].Pos[2] += boxsize[2];
}
while ( P[ipart].Pos[2] > boxsize[2] ) {
P[ipart].Pos[2] -= boxsize[2];
}
}
double moveMps[4];
moveMps[0] = cnt * 100. / Param.Npart;
moveMps[1] = cnt1 * 100. / Param.Npart;
moveMps[2] = cnt2 * 100. / Param.Npart;
moveMps[3] = cnt3 * 100. / Param.Npart;
printf ( " Del %g%% > Dmps; %g%% > Dmps/10; %g%% > Dmps/100; %g%% > Dmps/1000\n",
moveMps[0], moveMps[1], moveMps[2], moveMps[3] );
if ( it == 1 ) {
if ( moveMps[0] < 10. ) {
fprintf ( stderr, "WARNING: Hardly any initial movement detected. Consider decreasing MpsFraction in the parameter file!\n" );
fflush ( stderr );
} else if ( moveMps[0] > 80. ) {
fprintf ( stderr, "WARNING: A lot of initial movement detected. Consider increasing MpsFraction in the parameter file!\n" );
fflush ( stderr );
}
}
#ifdef OUTPUT_DIAGNOSTICS
struct Quadruplet errorQuad;
errorQuad.min = errMin;
errorQuad.max = errMax;
errorQuad.mean = errMean;
errorQuad.sigma = errSigma;
const struct Quadruplet deltaQuad = calculateStatsOn ( delta, Param.Npart );
writeIterationDiagnostics ( it, &errorQuad, errDiff, moveMps, &deltaQuad );
#endif
if ( ( moveMps[0] < Param.LimitMps[0] )
|| ( moveMps[1] < Param.LimitMps[1] )
|| ( moveMps[2] < Param.LimitMps[2] )
|| ( moveMps[3] < Param.LimitMps[3] ) ) {
break;
}
// force convergence if distribution doesnt tighten
if ( cnt1 > last_cnt && ( it > Param.LastMoveStep || it % Param.RedistributionFrequency != 0 ) ) {
step *= Param.StepReduction;
}
last_cnt = cnt1;
fflush ( stdout );
}
Free ( hsml );
Free ( delta[0] );
Free ( delta[1] );
Free ( delta[2] );
printf ( "done\n\n" );
fflush ( stdout );
return ;
}
void writeStepFile ( int it )
{
char problem_name[CHARBUFSIZE] = "";
char wvt_stepnumber[CHARBUFSIZE] = "";
char wvt_stepname[CHARBUFSIZE] = "";
sprintf ( problem_name, "%s", Problem.Name );
strcpy ( wvt_stepname, problem_name );
sprintf ( wvt_stepnumber, "_%03d", it );
strcat ( wvt_stepname, wvt_stepnumber );
sprintf ( Problem.Name, "%s", wvt_stepname );
printf ( "Writing file %s\n", Problem.Name );
Write_output ( 0 ); // not verbose
sprintf ( Problem.Name, "%s", problem_name );
}
|
test.c | #include <stdlib.h>
#include <stdio.h>
#include "../utilities/check.h"
#include "../utilities/utilities.h"
#define N 100
int main ()
{
int a[N], b[N], c[N];
check_offloading();
long cpuExec = 0;
#pragma omp target map(tofrom: cpuExec)
{
cpuExec = omp_is_initial_device();
}
if (!cpuExec) {
// Test: no clauses
int fail = 0;
for (int i = 0 ; i < N ; i++) {
a[i] = -1;
b[i] = i;
c[i] = 2*i;
}
#pragma omp target map(tofrom: a) map(to: b,c)
#pragma omp parallel
#pragma omp for simd
for (int i = 0 ; i < N ; i++)
a[i] += b[i] + c[i];
for (int i = 0 ; i < N ; i++)
if (a[i] != (-1 + i + 2*i)) {
printf("Error at %d: device = %d, host = %d\n", i, a[i], (-1 + i + 2*i));
fail = 1;
}
if (fail)
printf ("Failed\n");
else
printf("Succeeded\n");
// Test: private, firstprivate, lastprivate, linear
fail = 0;
for (int i = 0 ; i < N ; i++) {
a[i] = -1;
b[i] = i;
c[i] = 2*i;
}
int q = -5;
int p = -3;
int r = 0;
int l = 10;
#pragma omp target teams num_teams(1) thread_limit(1024) map(tofrom: a, r) map(to: b,c)
#pragma omp parallel
#pragma omp for simd private(q) firstprivate(p) lastprivate(r) linear(l:2)
for (int i = 0 ; i < N ; i++) {
q = i + 5;
p += i + 2;
a[i] += p*b[i] + c[i]*q +l;
r = i;
}
for (int i = 0 ; i < N ; i++) {
int expected = (-1 + (-3 + i + 2)*i + (2*i)*(i + 5) + 10+(2*i));
if (a[i] != expected) {
printf("Error at %d: device = %d, host = %d\n", i, a[i], expected);
fail = 1;
}
}
if (r != N-1) {
printf("Error for lastprivate: device = %d, host = %d\n", r, N-1);
fail = 1;
}
if (fail)
printf ("Failed\n");
else
printf("Succeeded\n");
// Test: schedule static no chunk
fail = 0;
for (int i = 0 ; i < N ; i++) {
a[i] = -1;
b[i] = i;
c[i] = 2*i;
}
#pragma omp target map(tofrom: a) map(to: b,c)
#pragma omp parallel
#pragma omp for simd schedule(static)
for (int i = 0 ; i < N ; i++)
a[i] += b[i] + c[i];
for (int i = 0 ; i < N ; i++)
if (a[i] != (-1 + i + 2*i)) {
printf("Error at %d: device = %d, host = %d\n", i, a[i], (-1 + i + 2*i));
fail = 1;
}
if (fail)
printf ("Failed\n");
else
printf("Succeeded\n");
// Test: schedule static no chunk, monotonic
fail = 0;
for (int i = 0 ; i < N ; i++) {
a[i] = -1;
b[i] = i;
c[i] = 2*i;
}
#pragma omp target teams num_teams(1) thread_limit(1024) map(tofrom: a) map(to: b,c)
#pragma omp parallel
#pragma omp for simd schedule(monotonic: static)
for (int i = 0 ; i < N ; i++)
a[i] += b[i] + c[i];
for (int i = 0 ; i < N ; i++)
if (a[i] != (-1 + i + 2*i)) {
printf("Error at %d: device = %d, host = %d\n", i, a[i], (-1 + i + 2*i));
fail = 1;
}
if (fail)
printf ("Failed\n");
else
printf("Succeeded\n");
// Test: schedule static no chunk, simd
fail = 0;
for (int i = 0 ; i < N ; i++) {
a[i] = -1;
b[i] = i;
c[i] = 2*i;
}
#pragma omp target map(tofrom: a) map(to: b,c)
#pragma omp parallel
#pragma omp for simd schedule(simd: static)
for (int i = 0 ; i < N ; i++)
a[i] += b[i] + c[i];
for (int i = 0 ; i < N ; i++)
if (a[i] != (-1 + i + 2*i)) {
printf("Error at %d: device = %d, host = %d\n", i, a[i], (-1 + i + 2*i));
fail = 1;
}
if (fail)
printf ("Failed\n");
else
printf("Succeeded\n");
// Test: schedule static chunk
fail = 0;
for (int i = 0 ; i < N ; i++) {
a[i] = -1;
b[i] = i;
c[i] = 2*i;
}
int ch = 10;
#pragma omp target map(tofrom: a) map(to: b,c)
#pragma omp parallel
#pragma omp for simd schedule(static, ch)
for (int i = 0 ; i < N ; i++)
a[i] += b[i] + c[i];
for (int i = 0 ; i < N ; i++)
if (a[i] != (-1 + i + 2*i)) {
printf("Error at %d: device = %d, host = %d\n", i, a[i], (-1 + i + 2*i));
fail = 1;
}
if (fail)
printf ("Failed\n");
else
printf("Succeeded\n");
// Test: schedule static chunk, monotonic
fail = 0;
for (int i = 0 ; i < N ; i++) {
a[i] = -1;
b[i] = i;
c[i] = 2*i;
}
ch = 10;
#pragma omp target map(tofrom: a) map(to: b,c)
#pragma omp parallel
#pragma omp for simd schedule(monotonic: static, ch)
for (int i = 0 ; i < N ; i++)
a[i] += b[i] + c[i];
for (int i = 0 ; i < N ; i++)
if (a[i] != (-1 + i + 2*i)) {
printf("Error at %d: device = %d, host = %d\n", i, a[i], (-1 + i + 2*i));
fail = 1;
}
if (fail)
printf ("Failed\n");
else
printf("Succeeded\n");
// Test: schedule static chunk, simd
fail = 0;
for (int i = 0 ; i < N ; i++) {
a[i] = -1;
b[i] = i;
c[i] = 2*i;
}
ch = 10;
#pragma omp target map(tofrom: a) map(to: b,c)
#pragma omp parallel
#pragma omp for simd schedule(simd: static, ch)
for (int i = 0 ; i < N ; i++)
a[i] += b[i] + c[i];
for (int i = 0 ; i < N ; i++)
if (a[i] != (-1 + i + 2*i)) {
printf("Error at %d: device = %d, host = %d\n", i, a[i], (-1 + i + 2*i));
fail = 1;
}
if (fail)
printf ("Failed\n");
else
printf("Succeeded\n");
// Test: schedule dyanmic no chunk
fail = 0;
for (int i = 0 ; i < N ; i++) {
a[i] = -1;
b[i] = i;
c[i] = 2*i;
}
#pragma omp target map(tofrom: a) map(to: b,c)
#pragma omp parallel
#pragma omp for simd schedule(dynamic)
for (int i = 0 ; i < N ; i++)
a[i] += b[i] + c[i];
for (int i = 0 ; i < N ; i++)
if (a[i] != (-1 + i + 2*i)) {
printf("Error at %d: device = %d, host = %d\n", i, a[i], (-1 + i + 2*i));
fail = 1;
}
if (fail)
printf ("Failed\n");
else
printf("Succeeded\n");
// hangs
#if 0
// Test: schedule dyanmic no chunk, monotonic
fail = 0;
for (int i = 0 ; i < N ; i++) {
a[i] = -1;
b[i] = i;
c[i] = 2*i;
}
#pragma omp target map(tofrom: a) map(to: b,c)
#pragma omp parallel
#pragma omp for simd schedule(monotonic: dynamic)
for (int i = 0 ; i < N ; i++)
a[i] += b[i] + c[i];
for (int i = 0 ; i < N ; i++)
if (a[i] != (-1 + i + 2*i)) {
printf("Error at %d: device = %d, host = %d\n", i, a[i], (-1 + i + 2*i));
fail = 1;
}
if (fail)
printf ("Failed\n");
else
printf("Succeeded\n");
// Test: schedule dyanmic no chunk, nonmonotonic
fail = 0;
for (int i = 0 ; i < N ; i++) {
a[i] = -1;
b[i] = i;
c[i] = 2*i;
}
#pragma omp target map(tofrom: a) map(to: b,c)
#pragma omp parallel
#pragma omp for simd schedule(nonmonotonic: dynamic)
for (int i = 0 ; i < N ; i++)
a[i] += b[i] + c[i];
for (int i = 0 ; i < N ; i++)
if (a[i] != (-1 + i + 2*i)) {
printf("Error at %d: device = %d, host = %d\n", i, a[i], (-1 + i + 2*i));
fail = 1;
}
if (fail)
printf ("Failed\n");
else
printf("Succeeded\n");
#endif
// Test: schedule dyanmic no chunk, simd
fail = 0;
for (int i = 0 ; i < N ; i++) {
a[i] = -1;
b[i] = i;
c[i] = 2*i;
}
#pragma omp target map(tofrom: a) map(to: b,c)
#pragma omp parallel
#pragma omp for simd schedule(simd: dynamic)
for (int i = 0 ; i < N ; i++)
a[i] += b[i] + c[i];
for (int i = 0 ; i < N ; i++)
if (a[i] != (-1 + i + 2*i)) {
printf("Error at %d: device = %d, host = %d\n", i, a[i], (-1 + i + 2*i));
fail = 1;
}
if (fail)
printf ("Failed\n");
else
printf("Succeeded\n");
// Test: schedule dynamic chunk
fail = 0;
for (int i = 0 ; i < N ; i++) {
a[i] = -1;
b[i] = i;
c[i] = 2*i;
}
ch = 10;
#pragma omp target map(tofrom: a) map(to: b,c)
#pragma omp parallel
#pragma omp for simd schedule(dynamic, ch)
for (int i = 0 ; i < N ; i++)
a[i] += b[i] + c[i];
for (int i = 0 ; i < N ; i++)
if (a[i] != (-1 + i + 2*i)) {
printf("Error at %d: device = %d, host = %d\n", i, a[i], (-1 + i + 2*i));
fail = 1;
}
if (fail)
printf ("Failed\n");
else
printf("Succeeded\n");
#if 0
// Test: schedule dynamic chunk, monotonic
fail = 0;
for (int i = 0 ; i < N ; i++) {
a[i] = -1;
b[i] = i;
c[i] = 2*i;
}
ch = 10;
#pragma omp target map(tofrom: a) map(to: b,c)
#pragma omp parallel
#pragma omp for simd schedule(monotonic: dynamic, ch)
for (int i = 0 ; i < N ; i++)
a[i] += b[i] + c[i];
for (int i = 0 ; i < N ; i++)
if (a[i] != (-1 + i + 2*i)) {
printf("Error at %d: device = %d, host = %d\n", i, a[i], (-1 + i + 2*i));
fail = 1;
}
if (fail)
printf ("Failed\n");
else
printf("Succeeded\n");
// Test: schedule dynamic chunk, nonmonotonic
fail = 0;
for (int i = 0 ; i < N ; i++) {
a[i] = -1;
b[i] = i;
c[i] = 2*i;
}
ch = 10;
#pragma omp target map(tofrom: a) map(to: b,c)
#pragma omp parallel
#pragma omp for simd schedule(nonmonotonic: dynamic, ch)
for (int i = 0 ; i < N ; i++)
a[i] += b[i] + c[i];
for (int i = 0 ; i < N ; i++)
if (a[i] != (-1 + i + 2*i)) {
printf("Error at %d: device = %d, host = %d\n", i, a[i], (-1 + i + 2*i));
fail = 1;
}
if (fail)
printf ("Failed\n");
else
printf("Succeeded\n");
#endif
// Test: schedule dynamic chunk, simd
fail = 0;
for (int i = 0 ; i < N ; i++) {
a[i] = -1;
b[i] = i;
c[i] = 2*i;
}
ch = 10;
#pragma omp target map(tofrom: a) map(to: b,c)
#pragma omp parallel
#pragma omp for simd schedule(simd: dynamic, ch)
for (int i = 0 ; i < N ; i++)
a[i] += b[i] + c[i];
for (int i = 0 ; i < N ; i++)
if (a[i] != (-1 + i + 2*i)) {
printf("Error at %d: device = %d, host = %d\n", i, a[i], (-1 + i + 2*i));
fail = 1;
}
if (fail)
printf ("Failed\n");
else
printf("Succeeded\n");
// Test: schedule guided no chunk
fail = 0;
for (int i = 0 ; i < N ; i++) {
a[i] = -1;
b[i] = i;
c[i] = 2*i;
}
#pragma omp target map(tofrom: a) map(to: b,c)
#pragma omp parallel
#pragma omp for simd schedule(guided)
for (int i = 0 ; i < N ; i++)
a[i] += b[i] + c[i];
for (int i = 0 ; i < N ; i++)
if (a[i] != (-1 + i + 2*i)) {
printf("Error at %d: device = %d, host = %d\n", i, a[i], (-1 + i + 2*i));
fail = 1;
}
if (fail)
printf ("Failed\n");
else
printf("Succeeded\n");
#if 0
// Test: schedule guided no chunk, monotonic
fail = 0;
for (int i = 0 ; i < N ; i++) {
a[i] = -1;
b[i] = i;
c[i] = 2*i;
}
#pragma omp target map(tofrom: a) map(to: b,c)
#pragma omp parallel
#pragma omp for simd schedule(monotonic: guided)
for (int i = 0 ; i < N ; i++)
a[i] += b[i] + c[i];
for (int i = 0 ; i < N ; i++)
if (a[i] != (-1 + i + 2*i)) {
printf("Error at %d: device = %d, host = %d\n", i, a[i], (-1 + i + 2*i));
fail = 1;
}
if (fail)
printf ("Failed\n");
else
printf("Succeeded\n");
// Test: schedule guided no chunk, nonmonotonic
fail = 0;
for (int i = 0 ; i < N ; i++) {
a[i] = -1;
b[i] = i;
c[i] = 2*i;
}
#pragma omp target map(tofrom: a) map(to: b,c)
#pragma omp parallel
#pragma omp for simd schedule(nonmonotonic: guided)
for (int i = 0 ; i < N ; i++)
a[i] += b[i] + c[i];
for (int i = 0 ; i < N ; i++)
if (a[i] != (-1 + i + 2*i)) {
printf("Error at %d: device = %d, host = %d\n", i, a[i], (-1 + i + 2*i));
fail = 1;
}
if (fail)
printf ("Failed\n");
else
printf("Succeeded\n");
#endif
// Test: schedule guided no chunk, simd
fail = 0;
for (int i = 0 ; i < N ; i++) {
a[i] = -1;
b[i] = i;
c[i] = 2*i;
}
#pragma omp target map(tofrom: a) map(to: b,c)
#pragma omp parallel
#pragma omp for simd schedule(simd: guided)
for (int i = 0 ; i < N ; i++)
a[i] += b[i] + c[i];
for (int i = 0 ; i < N ; i++)
if (a[i] != (-1 + i + 2*i)) {
printf("Error at %d: device = %d, host = %d\n", i, a[i], (-1 + i + 2*i));
fail = 1;
}
if (fail)
printf ("Failed\n");
else
printf("Succeeded\n");
// Test: schedule guided chunk
fail = 0;
for (int i = 0 ; i < N ; i++) {
a[i] = -1;
b[i] = i;
c[i] = 2*i;
}
ch = 10;
#pragma omp target map(tofrom: a) map(to: b,c)
#pragma omp parallel
#pragma omp for simd schedule(guided, ch)
for (int i = 0 ; i < N ; i++)
a[i] += b[i] + c[i];
for (int i = 0 ; i < N ; i++)
if (a[i] != (-1 + i + 2*i)) {
printf("Error at %d: device = %d, host = %d\n", i, a[i], (-1 + i + 2*i));
fail = 1;
}
if (fail)
printf ("Failed\n");
else
printf("Succeeded\n");
#if 0
// Test: schedule guided chunk, monotonic
fail = 0;
for (int i = 0 ; i < N ; i++) {
a[i] = -1;
b[i] = i;
c[i] = 2*i;
}
ch = 10;
#pragma omp target map(tofrom: a) map(to: b,c)
#pragma omp parallel
#pragma omp for simd schedule(monotonic: guided, ch)
for (int i = 0 ; i < N ; i++)
a[i] += b[i] + c[i];
for (int i = 0 ; i < N ; i++)
if (a[i] != (-1 + i + 2*i)) {
printf("Error at %d: device = %d, host = %d\n", i, a[i], (-1 + i + 2*i));
fail = 1;
}
if (fail)
printf ("Failed\n");
else
printf("Succeeded\n");
// Test: schedule guided chunk, nonmonotonic
fail = 0;
for (int i = 0 ; i < N ; i++) {
a[i] = -1;
b[i] = i;
c[i] = 2*i;
}
ch = 10;
#pragma omp target map(tofrom: a) map(to: b,c)
#pragma omp parallel
#pragma omp for simd schedule(nonmonotonic: guided, ch)
for (int i = 0 ; i < N ; i++)
a[i] += b[i] + c[i];
for (int i = 0 ; i < N ; i++)
if (a[i] != (-1 + i + 2*i)) {
printf("Error at %d: device = %d, host = %d\n", i, a[i], (-1 + i + 2*i));
fail = 1;
}
if (fail)
printf ("Failed\n");
else
printf("Succeeded\n");
#endif
// Test: schedule guided chunk, simd
fail = 0;
for (int i = 0 ; i < N ; i++) {
a[i] = -1;
b[i] = i;
c[i] = 2*i;
}
ch = 10;
#pragma omp target map(tofrom: a) map(to: b,c)
#pragma omp parallel
#pragma omp for simd schedule(simd: guided, ch)
for (int i = 0 ; i < N ; i++)
a[i] += b[i] + c[i];
for (int i = 0 ; i < N ; i++)
if (a[i] != (-1 + i + 2*i)) {
printf("Error at %d: device = %d, host = %d\n", i, a[i], (-1 + i + 2*i));
fail = 1;
}
if (fail)
printf ("Failed\n");
else
printf("Succeeded\n");
// Test: schedule auto
fail = 0;
for (int i = 0 ; i < N ; i++) {
a[i] = -1;
b[i] = i;
c[i] = 2*i;
}
#pragma omp target map(tofrom: a) map(to: b,c)
#pragma omp parallel
#pragma omp for simd schedule(auto)
for (int i = 0 ; i < N ; i++)
a[i] += b[i] + c[i];
for (int i = 0 ; i < N ; i++)
if (a[i] != (-1 + i + 2*i)) {
printf("Error at %d: device = %d, host = %d\n", i, a[i], (-1 + i + 2*i));
fail = 1;
}
if (fail)
printf ("Failed\n");
else
printf("Succeeded\n");
// Test: schedule auto, monotonic
fail = 0;
for (int i = 0 ; i < N ; i++) {
a[i] = -1;
b[i] = i;
c[i] = 2*i;
}
#pragma omp target map(tofrom: a) map(to: b,c)
#pragma omp parallel
#pragma omp for simd schedule(monotonic: auto)
for (int i = 0 ; i < N ; i++)
a[i] += b[i] + c[i];
for (int i = 0 ; i < N ; i++)
if (a[i] != (-1 + i + 2*i)) {
printf("Error at %d: device = %d, host = %d\n", i, a[i], (-1 + i + 2*i));
fail = 1;
}
if (fail)
printf ("Failed\n");
else
printf("Succeeded\n");
// Test: schedule auto, simd
fail = 0;
for (int i = 0 ; i < N ; i++) {
a[i] = -1;
b[i] = i;
c[i] = 2*i;
}
#pragma omp target map(tofrom: a) map(to: b,c)
#pragma omp parallel
#pragma omp for simd schedule(simd: auto)
for (int i = 0 ; i < N ; i++)
a[i] += b[i] + c[i];
for (int i = 0 ; i < N ; i++)
if (a[i] != (-1 + i + 2*i)) {
printf("Error at %d: device = %d, host = %d\n", i, a[i], (-1 + i + 2*i));
fail = 1;
}
if (fail)
printf ("Failed\n");
else
printf("Succeeded\n");
// Test: schedule runtime
fail = 0;
for (int i = 0 ; i < N ; i++) {
a[i] = -1;
b[i] = i;
c[i] = 2*i;
}
#pragma omp target map(tofrom: a) map(to: b,c)
#pragma omp parallel
#pragma omp for simd schedule(runtime)
for (int i = 0 ; i < N ; i++)
a[i] += b[i] + c[i];
for (int i = 0 ; i < N ; i++)
if (a[i] != (-1 + i + 2*i)) {
printf("Error at %d: device = %d, host = %d\n", i, a[i], (-1 + i + 2*i));
fail = 1;
}
if (fail)
printf ("Failed\n");
else
printf("Succeeded\n");
#if 0
// Test: schedule runtime, monotonic
fail = 0;
for (int i = 0 ; i < N ; i++) {
a[i] = -1;
b[i] = i;
c[i] = 2*i;
}
#pragma omp target map(tofrom: a) map(to: b,c)
#pragma omp parallel
#pragma omp for simd schedule(monotonic: runtime)
for (int i = 0 ; i < N ; i++)
a[i] += b[i] + c[i];
for (int i = 0 ; i < N ; i++)
if (a[i] != (-1 + i + 2*i)) {
printf("Error at %d: device = %d, host = %d\n", i, a[i], (-1 + i + 2*i));
fail = 1;
}
if (fail)
printf ("Failed\n");
else
printf("Succeeded\n");
#endif
// Test: schedule runtime, simd
fail = 0;
for (int i = 0 ; i < N ; i++) {
a[i] = -1;
b[i] = i;
c[i] = 2*i;
}
#pragma omp target map(tofrom: a) map(to: b,c)
#pragma omp parallel
#pragma omp for simd schedule(simd: runtime)
for (int i = 0 ; i < N ; i++)
a[i] += b[i] + c[i];
for (int i = 0 ; i < N ; i++)
if (a[i] != (-1 + i + 2*i)) {
printf("Error at %d: device = %d, host = %d\n", i, a[i], (-1 + i + 2*i));
fail = 1;
}
if (fail)
printf ("Failed\n");
else
printf("Succeeded\n");
// Test: collapse
fail = 0;
int ma[N][N], mb[N][N], mc[N][N];
for (int i = 0 ; i < N ; i++)
for (int j = 0 ; j < N ; j++) {
ma[i][j] = -1;
mb[i][j] = i;
mc[i][j] = 2*i;
}
#pragma omp target map(tofrom: ma) map(to: mb,mc)
#pragma omp parallel
#pragma omp for simd collapse(2)
for (int i = 0 ; i < N ; i++)
for (int j = 0 ; j < N ; j++)
ma[i][j] += mb[i][j] + mc[i][j];
for (int i = 0 ; i < N ; i++)
for (int j = 0 ; j < N ; j++)
if (ma[i][j] != (-1 + i + 2*i)) {
printf("Error at %d: device = %d, host = %d\n", i, ma[i][j], (-1 + i + 2*i));
fail = 1;
}
if (fail)
printf ("Failed\n");
else
printf("Succeeded\n");
// Test: ordered
fail = 0;
for (int i = 0 ; i < N ; i++) {
a[i] = -1;
b[i] = i;
c[i] = 2*i;
}
#pragma omp target map(tofrom: a) map(to: b,c)
#pragma omp parallel
#pragma omp for simd ordered
for (int i = 0 ; i < N ; i++)
a[i] += b[i] + c[i];
for (int i = 0 ; i < N ; i++)
if (a[i] != (-1 + i + 2*i)) {
printf("Error at %d: device = %d, host = %d\n", i, a[i], (-1 + i + 2*i));
fail = 1;
}
if (fail)
printf ("Failed\n");
else
printf("Succeeded\n");
// Test: nowait
fail = 0;
for (int i = 0 ; i < N ; i++) {
a[i] = -1;
b[i] = i;
c[i] = 2*i;
}
#pragma omp target map(tofrom: a) map(to: b,c)
#pragma omp parallel
#pragma omp for simd nowait
for (int i = 0 ; i < N ; i++)
a[i] += b[i] + c[i];
for (int i = 0 ; i < N ; i++)
if (a[i] != (-1 + i + 2*i)) {
printf("Error at %d: device = %d, host = %d\n", i, a[i], (-1 + i + 2*i));
fail = 1;
}
if (fail)
printf ("Failed\n");
else
printf("Succeeded\n");
// Test: safelen
fail = 0;
for (int i = 0 ; i < N ; i++) {
a[i] = -1;
b[i] = i;
c[i] = 2*i;
}
#pragma omp target map(tofrom: a) map(to: b,c)
#pragma omp parallel
#pragma omp for simd safelen(16)
for (int i = 0 ; i < N ; i++)
a[i] += b[i] + c[i];
for (int i = 0 ; i < N ; i++)
if (a[i] != (-1 + i + 2*i)) {
printf("Error at %d: device = %d, host = %d\n", i, a[i], (-1 + i + 2*i));
fail = 1;
}
if (fail)
printf ("Failed\n");
else
printf("Succeeded\n");
// Test: simdlen
fail = 0;
for (int i = 0 ; i < N ; i++) {
a[i] = -1;
b[i] = i;
c[i] = 2*i;
}
#pragma omp target map(tofrom: a) map(to: b,c)
#pragma omp parallel
#pragma omp for simd simdlen(16)
for (int i = 0 ; i < N ; i++)
a[i] += b[i] + c[i];
for (int i = 0 ; i < N ; i++)
if (a[i] != (-1 + i + 2*i)) {
printf("Error at %d: device = %d, host = %d\n", i, a[i], (-1 + i + 2*i));
fail = 1;
}
if (fail)
printf ("Failed\n");
else
printf("Succeeded\n");
// Test: aligned
fail = 0;
for (int i = 0 ; i < N ; i++) {
a[i] = -1;
b[i] = i;
c[i] = 2*i;
}
#pragma omp target map(tofrom: a) map(to: b,c)
#pragma omp parallel
#pragma omp for simd aligned(a,b,c:8)
for (int i = 0 ; i < N ; i++)
a[i] += b[i] + c[i];
for (int i = 0 ; i < N ; i++)
if (a[i] != (-1 + i + 2*i)) {
printf("Error at %d: device = %d, host = %d\n", i, a[i], (-1 + i + 2*i));
fail = 1;
}
if (fail)
printf ("Failed\n");
else
printf("Succeeded\n");
} else {
DUMP_SUCCESS(27);
}
return 0;
}
|
ast-dump-openmp-begin-declare-variant_9.c | // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -verify -ast-dump %s | FileCheck %s --check-prefix=C
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -verify -ast-dump %s -x c++| FileCheck %s --check-prefix=CXX
// expected-no-diagnostics
int also_before(void) {
return 0;
}
#pragma omp begin declare variant match(implementation={vendor(llvm)})
int also_after(void) {
return 1;
}
int also_before(void) {
return 2;
}
#pragma omp end declare variant
int also_after(void) {
return 0;
}
void foo();
typedef int(*fd)(void);
int main() {
// Should return 0.
fd fns[2];
fns[0] = &also_before;
fns[1] = also_after;
return (foo(), also_after)() +
(fns[0])() +
(1[fns])();
}
// Make sure:
// - we see the specialization in the AST
// - we pick the right callees
// C: |-FunctionDecl [[ADDR_0:0x[a-z0-9]*]] <{{.*}}, line:7:1> line:5:5 used also_before 'int ({{.*}})'
// C-NEXT: | |-CompoundStmt [[ADDR_1:0x[a-z0-9]*]] <col:23, line:7:1>
// C-NEXT: | | `-ReturnStmt [[ADDR_2:0x[a-z0-9]*]] <line:6:3, col:10>
// C-NEXT: | | `-IntegerLiteral [[ADDR_3:0x[a-z0-9]*]] <col:10> 'int' 0
// C-NEXT: | `-OMPDeclareVariantAttr [[ADDR_4:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(llvm)}
// C-NEXT: | `-DeclRefExpr [[ADDR_5:0x[a-z0-9]*]] <line:13:1> 'int ({{.*}})' Function [[ADDR_6:0x[a-z0-9]*]] 'also_before[implementation={vendor(llvm)}]' 'int ({{.*}})'
// C-NEXT: |-FunctionDecl [[ADDR_7:0x[a-z0-9]*]] <line:10:1, col:20> col:5 implicit used also_after 'int ({{.*}})'
// C-NEXT: | `-OMPDeclareVariantAttr [[ADDR_8:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(llvm)}
// C-NEXT: | `-DeclRefExpr [[ADDR_9:0x[a-z0-9]*]] <col:1> 'int ({{.*}})' Function [[ADDR_10:0x[a-z0-9]*]] 'also_after[implementation={vendor(llvm)}]' 'int ({{.*}})'
// C-NEXT: |-FunctionDecl [[ADDR_10]] <col:1, line:12:1> line:10:1 also_after[implementation={vendor(llvm)}] 'int ({{.*}})'
// C-NEXT: | `-CompoundStmt [[ADDR_11:0x[a-z0-9]*]] <col:22, line:12:1>
// C-NEXT: | `-ReturnStmt [[ADDR_12:0x[a-z0-9]*]] <line:11:3, col:10>
// C-NEXT: | `-IntegerLiteral [[ADDR_13:0x[a-z0-9]*]] <col:10> 'int' 1
// C-NEXT: |-FunctionDecl [[ADDR_6]] <line:13:1, line:15:1> line:13:1 also_before[implementation={vendor(llvm)}] 'int ({{.*}})'
// C-NEXT: | `-CompoundStmt [[ADDR_14:0x[a-z0-9]*]] <col:23, line:15:1>
// C-NEXT: | `-ReturnStmt [[ADDR_15:0x[a-z0-9]*]] <line:14:3, col:10>
// C-NEXT: | `-IntegerLiteral [[ADDR_16:0x[a-z0-9]*]] <col:10> 'int' 2
// C-NEXT: |-FunctionDecl [[ADDR_17:0x[a-z0-9]*]] prev [[ADDR_7]] <line:18:1, line:20:1> line:18:5 used also_after 'int ({{.*}})'
// C-NEXT: | |-CompoundStmt [[ADDR_18:0x[a-z0-9]*]] <col:22, line:20:1>
// C-NEXT: | | `-ReturnStmt [[ADDR_19:0x[a-z0-9]*]] <line:19:3, col:10>
// C-NEXT: | | `-IntegerLiteral [[ADDR_20:0x[a-z0-9]*]] <col:10> 'int' 0
// C-NEXT: | `-OMPDeclareVariantAttr [[ADDR_21:0x[a-z0-9]*]] <<invalid sloc>> Inherited Implicit implementation={vendor(llvm)}
// C-NEXT: | `-DeclRefExpr [[ADDR_9]] <line:10:1> 'int ({{.*}})' Function [[ADDR_10]] 'also_after[implementation={vendor(llvm)}]' 'int ({{.*}})'
// C-NEXT: |-FunctionDecl [[ADDR_22:0x[a-z0-9]*]] <line:22:1, col:10> col:6 used foo 'void ({{.*}})'
// C-NEXT: |-TypedefDecl [[ADDR_23:0x[a-z0-9]*]] <line:23:1, col:22> col:14 referenced fd 'int (*)({{.*}})'
// C-NEXT: | `-PointerType [[ADDR_24:0x[a-z0-9]*]] 'int (*)({{.*}})'
// C-NEXT: | `-ParenType [[ADDR_25:0x[a-z0-9]*]] 'int ({{.*}})' sugar
// C-NEXT: | `-FunctionProtoType [[ADDR_26:0x[a-z0-9]*]] 'int ({{.*}})' cdecl
// C-NEXT: | `-BuiltinType [[ADDR_27:0x[a-z0-9]*]] 'int'
// C-NEXT: `-FunctionDecl [[ADDR_28:0x[a-z0-9]*]] <line:24:1, line:32:1> line:24:5 main 'int ({{.*}})'
// C-NEXT: `-CompoundStmt [[ADDR_29:0x[a-z0-9]*]] <col:12, line:32:1>
// C-NEXT: |-DeclStmt [[ADDR_30:0x[a-z0-9]*]] <line:26:3, col:12>
// C-NEXT: | `-VarDecl [[ADDR_31:0x[a-z0-9]*]] <col:3, col:11> col:6 used fns 'fd[2]'
// C-NEXT: |-BinaryOperator [[ADDR_32:0x[a-z0-9]*]] <line:27:3, col:13> 'fd':'int (*)({{.*}})' '='
// C-NEXT: | |-ArraySubscriptExpr [[ADDR_33:0x[a-z0-9]*]] <col:3, col:8> 'fd':'int (*)({{.*}})' lvalue
// C-NEXT: | | |-ImplicitCastExpr [[ADDR_34:0x[a-z0-9]*]] <col:3> 'fd *' <ArrayToPointerDecay>
// C-NEXT: | | | `-DeclRefExpr [[ADDR_35:0x[a-z0-9]*]] <col:3> 'fd[2]' {{.*}}Var [[ADDR_31]] 'fns' 'fd[2]'
// C-NEXT: | | `-IntegerLiteral [[ADDR_36:0x[a-z0-9]*]] <col:7> 'int' 0
// C-NEXT: | `-UnaryOperator [[ADDR_37:0x[a-z0-9]*]] <col:12, col:13> 'int (*)({{.*}})' prefix '&' cannot overflow
// C-NEXT: | `-DeclRefExpr [[ADDR_38:0x[a-z0-9]*]] <col:13> 'int ({{.*}})' Function [[ADDR_0]] 'also_before' 'int ({{.*}})'
// C-NEXT: |-BinaryOperator [[ADDR_39:0x[a-z0-9]*]] <line:28:3, col:12> 'fd':'int (*)({{.*}})' '='
// C-NEXT: | |-ArraySubscriptExpr [[ADDR_40:0x[a-z0-9]*]] <col:3, col:8> 'fd':'int (*)({{.*}})' lvalue
// C-NEXT: | | |-ImplicitCastExpr [[ADDR_41:0x[a-z0-9]*]] <col:3> 'fd *' <ArrayToPointerDecay>
// C-NEXT: | | | `-DeclRefExpr [[ADDR_42:0x[a-z0-9]*]] <col:3> 'fd[2]' {{.*}}Var [[ADDR_31]] 'fns' 'fd[2]'
// C-NEXT: | | `-IntegerLiteral [[ADDR_43:0x[a-z0-9]*]] <col:7> 'int' 1
// C-NEXT: | `-ImplicitCastExpr [[ADDR_44:0x[a-z0-9]*]] <col:12> 'int (*)({{.*}})' <FunctionToPointerDecay>
// C-NEXT: | `-DeclRefExpr [[ADDR_45:0x[a-z0-9]*]] <col:12> 'int ({{.*}})' Function [[ADDR_17]] 'also_after' 'int ({{.*}})'
// C-NEXT: `-ReturnStmt [[ADDR_46:0x[a-z0-9]*]] <line:29:3, line:31:19>
// C-NEXT: `-BinaryOperator [[ADDR_47:0x[a-z0-9]*]] <line:29:10, line:31:19> 'int' '+'
// C-NEXT: |-BinaryOperator [[ADDR_48:0x[a-z0-9]*]] <line:29:10, line:30:19> 'int' '+'
// C-NEXT: | |-CallExpr [[ADDR_49:0x[a-z0-9]*]] <line:29:10, col:30> 'int'
// C-NEXT: | | `-ParenExpr [[ADDR_50:0x[a-z0-9]*]] <col:10, col:28> 'int (*)({{.*}})'
// C-NEXT: | | `-BinaryOperator [[ADDR_51:0x[a-z0-9]*]] <col:11, col:18> 'int (*)({{.*}})' ','
// C-NEXT: | | |-CallExpr [[ADDR_52:0x[a-z0-9]*]] <col:11, col:15> 'void'
// C-NEXT: | | | `-ImplicitCastExpr [[ADDR_53:0x[a-z0-9]*]] <col:11> 'void (*)({{.*}})' <FunctionToPointerDecay>
// C-NEXT: | | | `-DeclRefExpr [[ADDR_54:0x[a-z0-9]*]] <col:11> 'void ({{.*}})' Function [[ADDR_22]] 'foo' 'void ({{.*}})'
// C-NEXT: | | `-ImplicitCastExpr [[ADDR_55:0x[a-z0-9]*]] <col:18> 'int (*)({{.*}})' <FunctionToPointerDecay>
// C-NEXT: | | `-DeclRefExpr [[ADDR_56:0x[a-z0-9]*]] <col:18> 'int ({{.*}})' Function [[ADDR_17]] 'also_after' 'int ({{.*}})'
// C-NEXT: | `-CallExpr [[ADDR_57:0x[a-z0-9]*]] <line:30:10, col:19> 'int'
// C-NEXT: | `-ImplicitCastExpr [[ADDR_58:0x[a-z0-9]*]] <col:10, col:17> 'fd':'int (*)({{.*}})' <LValueToRValue>
// C-NEXT: | `-ParenExpr [[ADDR_59:0x[a-z0-9]*]] <col:10, col:17> 'fd':'int (*)({{.*}})' lvalue
// C-NEXT: | `-ArraySubscriptExpr [[ADDR_60:0x[a-z0-9]*]] <col:11, col:16> 'fd':'int (*)({{.*}})' lvalue
// C-NEXT: | |-ImplicitCastExpr [[ADDR_61:0x[a-z0-9]*]] <col:11> 'fd *' <ArrayToPointerDecay>
// C-NEXT: | | `-DeclRefExpr [[ADDR_62:0x[a-z0-9]*]] <col:11> 'fd[2]' {{.*}}Var [[ADDR_31]] 'fns' 'fd[2]'
// C-NEXT: | `-IntegerLiteral [[ADDR_63:0x[a-z0-9]*]] <col:15> 'int' 0
// C-NEXT: `-CallExpr [[ADDR_64:0x[a-z0-9]*]] <line:31:10, col:19> 'int'
// C-NEXT: `-ImplicitCastExpr [[ADDR_65:0x[a-z0-9]*]] <col:10, col:17> 'fd':'int (*)({{.*}})' <LValueToRValue>
// C-NEXT: `-ParenExpr [[ADDR_66:0x[a-z0-9]*]] <col:10, col:17> 'fd':'int (*)({{.*}})' lvalue
// C-NEXT: `-ArraySubscriptExpr [[ADDR_67:0x[a-z0-9]*]] <col:11, col:16> 'fd':'int (*)({{.*}})' lvalue
// C-NEXT: |-IntegerLiteral [[ADDR_68:0x[a-z0-9]*]] <col:11> 'int' 1
// C-NEXT: `-ImplicitCastExpr [[ADDR_69:0x[a-z0-9]*]] <col:13> 'fd *' <ArrayToPointerDecay>
// C-NEXT: `-DeclRefExpr [[ADDR_70:0x[a-z0-9]*]] <col:13> 'fd[2]' {{.*}}Var [[ADDR_31]] 'fns' 'fd[2]'
// CXX: |-FunctionDecl [[ADDR_0:0x[a-z0-9]*]] <{{.*}}, line:7:1> line:5:5 used also_before 'int ({{.*}})'
// CXX-NEXT: | |-CompoundStmt [[ADDR_1:0x[a-z0-9]*]] <col:23, line:7:1>
// CXX-NEXT: | | `-ReturnStmt [[ADDR_2:0x[a-z0-9]*]] <line:6:3, col:10>
// CXX-NEXT: | | `-IntegerLiteral [[ADDR_3:0x[a-z0-9]*]] <col:10> 'int' 0
// CXX-NEXT: | `-OMPDeclareVariantAttr [[ADDR_4:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(llvm)}
// CXX-NEXT: | `-DeclRefExpr [[ADDR_5:0x[a-z0-9]*]] <line:13:1> 'int ({{.*}})' Function [[ADDR_6:0x[a-z0-9]*]] 'also_before[implementation={vendor(llvm)}]' 'int ({{.*}})'
// CXX-NEXT: |-FunctionDecl [[ADDR_7:0x[a-z0-9]*]] <line:10:1, col:20> col:5 implicit used also_after 'int ({{.*}})'
// CXX-NEXT: | `-OMPDeclareVariantAttr [[ADDR_8:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(llvm)}
// CXX-NEXT: | `-DeclRefExpr [[ADDR_9:0x[a-z0-9]*]] <col:1> 'int ({{.*}})' Function [[ADDR_10:0x[a-z0-9]*]] 'also_after[implementation={vendor(llvm)}]' 'int ({{.*}})'
// CXX-NEXT: |-FunctionDecl [[ADDR_10]] <col:1, line:12:1> line:10:1 also_after[implementation={vendor(llvm)}] 'int ({{.*}})'
// CXX-NEXT: | `-CompoundStmt [[ADDR_11:0x[a-z0-9]*]] <col:22, line:12:1>
// CXX-NEXT: | `-ReturnStmt [[ADDR_12:0x[a-z0-9]*]] <line:11:3, col:10>
// CXX-NEXT: | `-IntegerLiteral [[ADDR_13:0x[a-z0-9]*]] <col:10> 'int' 1
// CXX-NEXT: |-FunctionDecl [[ADDR_6]] <line:13:1, line:15:1> line:13:1 also_before[implementation={vendor(llvm)}] 'int ({{.*}})'
// CXX-NEXT: | `-CompoundStmt [[ADDR_14:0x[a-z0-9]*]] <col:23, line:15:1>
// CXX-NEXT: | `-ReturnStmt [[ADDR_15:0x[a-z0-9]*]] <line:14:3, col:10>
// CXX-NEXT: | `-IntegerLiteral [[ADDR_16:0x[a-z0-9]*]] <col:10> 'int' 2
// CXX-NEXT: |-FunctionDecl [[ADDR_17:0x[a-z0-9]*]] prev [[ADDR_7]] <line:18:1, line:20:1> line:18:5 used also_after 'int ({{.*}})'
// CXX-NEXT: | |-CompoundStmt [[ADDR_18:0x[a-z0-9]*]] <col:22, line:20:1>
// CXX-NEXT: | | `-ReturnStmt [[ADDR_19:0x[a-z0-9]*]] <line:19:3, col:10>
// CXX-NEXT: | | `-IntegerLiteral [[ADDR_20:0x[a-z0-9]*]] <col:10> 'int' 0
// CXX-NEXT: | `-OMPDeclareVariantAttr [[ADDR_21:0x[a-z0-9]*]] <<invalid sloc>> Inherited Implicit implementation={vendor(llvm)}
// CXX-NEXT: | `-DeclRefExpr [[ADDR_9]] <line:10:1> 'int ({{.*}})' Function [[ADDR_10]] 'also_after[implementation={vendor(llvm)}]' 'int ({{.*}})'
// CXX-NEXT: |-FunctionDecl [[ADDR_22:0x[a-z0-9]*]] <line:22:1, col:10> col:6 used foo 'void ({{.*}})'
// CXX-NEXT: |-TypedefDecl [[ADDR_23:0x[a-z0-9]*]] <line:23:1, col:22> col:14 referenced fd 'int (*)({{.*}})'
// CXX-NEXT: | `-PointerType [[ADDR_24:0x[a-z0-9]*]] 'int (*)({{.*}})'
// CXX-NEXT: | `-ParenType [[ADDR_25:0x[a-z0-9]*]] 'int ({{.*}})' sugar
// CXX-NEXT: | `-FunctionProtoType [[ADDR_26:0x[a-z0-9]*]] 'int ({{.*}})' cdecl
// CXX-NEXT: | `-BuiltinType [[ADDR_27:0x[a-z0-9]*]] 'int'
// CXX-NEXT: `-FunctionDecl [[ADDR_28:0x[a-z0-9]*]] <line:24:1, line:32:1> line:24:5 main 'int ({{.*}})'
// CXX-NEXT: `-CompoundStmt [[ADDR_29:0x[a-z0-9]*]] <col:12, line:32:1>
// CXX-NEXT: |-DeclStmt [[ADDR_30:0x[a-z0-9]*]] <line:26:3, col:12>
// CXX-NEXT: | `-VarDecl [[ADDR_31:0x[a-z0-9]*]] <col:3, col:11> col:6 used fns 'fd[2]'
// CXX-NEXT: |-BinaryOperator [[ADDR_32:0x[a-z0-9]*]] <line:27:3, col:13> 'fd':'int (*)({{.*}})' {{.*}}'='
// CXX-NEXT: | |-ArraySubscriptExpr [[ADDR_33:0x[a-z0-9]*]] <col:3, col:8> 'fd':'int (*)({{.*}})' lvalue
// CXX-NEXT: | | |-ImplicitCastExpr [[ADDR_34:0x[a-z0-9]*]] <col:3> 'fd *' <ArrayToPointerDecay>
// CXX-NEXT: | | | `-DeclRefExpr [[ADDR_35:0x[a-z0-9]*]] <col:3> 'fd[2]' {{.*}}Var [[ADDR_31]] 'fns' 'fd[2]'
// CXX-NEXT: | | `-IntegerLiteral [[ADDR_36:0x[a-z0-9]*]] <col:7> 'int' 0
// CXX-NEXT: | `-UnaryOperator [[ADDR_37:0x[a-z0-9]*]] <col:12, col:13> 'int (*)({{.*}})' prefix '&' cannot overflow
// CXX-NEXT: | `-DeclRefExpr [[ADDR_38:0x[a-z0-9]*]] <col:13> 'int ({{.*}})' {{.*}}Function [[ADDR_0]] 'also_before' 'int ({{.*}})'
// CXX-NEXT: |-BinaryOperator [[ADDR_39:0x[a-z0-9]*]] <line:28:3, col:12> 'fd':'int (*)({{.*}})' {{.*}}'='
// CXX-NEXT: | |-ArraySubscriptExpr [[ADDR_40:0x[a-z0-9]*]] <col:3, col:8> 'fd':'int (*)({{.*}})' lvalue
// CXX-NEXT: | | |-ImplicitCastExpr [[ADDR_41:0x[a-z0-9]*]] <col:3> 'fd *' <ArrayToPointerDecay>
// CXX-NEXT: | | | `-DeclRefExpr [[ADDR_42:0x[a-z0-9]*]] <col:3> 'fd[2]' {{.*}}Var [[ADDR_31]] 'fns' 'fd[2]'
// CXX-NEXT: | | `-IntegerLiteral [[ADDR_43:0x[a-z0-9]*]] <col:7> 'int' 1
// CXX-NEXT: | `-ImplicitCastExpr [[ADDR_44:0x[a-z0-9]*]] <col:12> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CXX-NEXT: | `-DeclRefExpr [[ADDR_45:0x[a-z0-9]*]] <col:12> 'int ({{.*}})' {{.*}}Function [[ADDR_17]] 'also_after' 'int ({{.*}})'
// CXX-NEXT: `-ReturnStmt [[ADDR_46:0x[a-z0-9]*]] <line:29:3, line:31:19>
// CXX-NEXT: `-BinaryOperator [[ADDR_47:0x[a-z0-9]*]] <line:29:10, line:31:19> 'int' '+'
// CXX-NEXT: |-BinaryOperator [[ADDR_48:0x[a-z0-9]*]] <line:29:10, line:30:19> 'int' '+'
// CXX-NEXT: | |-CallExpr [[ADDR_49:0x[a-z0-9]*]] <line:29:10, col:30> 'int'
// CXX-NEXT: | | `-ImplicitCastExpr [[ADDR_50:0x[a-z0-9]*]] <col:10, col:28> 'int (*)({{.*}})' <FunctionToPointerDecay>
// CXX-NEXT: | | `-ParenExpr [[ADDR_51:0x[a-z0-9]*]] <col:10, col:28> 'int ({{.*}})' lvalue
// CXX-NEXT: | | `-BinaryOperator [[ADDR_52:0x[a-z0-9]*]] <col:11, col:18> 'int ({{.*}})' {{.*}}','
// CXX-NEXT: | | |-CallExpr [[ADDR_53:0x[a-z0-9]*]] <col:11, col:15> 'void'
// CXX-NEXT: | | | `-ImplicitCastExpr [[ADDR_54:0x[a-z0-9]*]] <col:11> 'void (*)({{.*}})' <FunctionToPointerDecay>
// CXX-NEXT: | | | `-DeclRefExpr [[ADDR_55:0x[a-z0-9]*]] <col:11> 'void ({{.*}})' {{.*}}Function [[ADDR_22]] 'foo' 'void ({{.*}})'
// CXX-NEXT: | | `-DeclRefExpr [[ADDR_56:0x[a-z0-9]*]] <col:18> 'int ({{.*}})' {{.*}}Function [[ADDR_17]] 'also_after' 'int ({{.*}})'
// CXX-NEXT: | `-CallExpr [[ADDR_57:0x[a-z0-9]*]] <line:30:10, col:19> 'int'
// CXX-NEXT: | `-ImplicitCastExpr [[ADDR_58:0x[a-z0-9]*]] <col:10, col:17> 'fd':'int (*)({{.*}})' <LValueToRValue>
// CXX-NEXT: | `-ParenExpr [[ADDR_59:0x[a-z0-9]*]] <col:10, col:17> 'fd':'int (*)({{.*}})' lvalue
// CXX-NEXT: | `-ArraySubscriptExpr [[ADDR_60:0x[a-z0-9]*]] <col:11, col:16> 'fd':'int (*)({{.*}})' lvalue
// CXX-NEXT: | |-ImplicitCastExpr [[ADDR_61:0x[a-z0-9]*]] <col:11> 'fd *' <ArrayToPointerDecay>
// CXX-NEXT: | | `-DeclRefExpr [[ADDR_62:0x[a-z0-9]*]] <col:11> 'fd[2]' {{.*}}Var [[ADDR_31]] 'fns' 'fd[2]'
// CXX-NEXT: | `-IntegerLiteral [[ADDR_63:0x[a-z0-9]*]] <col:15> 'int' 0
// CXX-NEXT: `-CallExpr [[ADDR_64:0x[a-z0-9]*]] <line:31:10, col:19> 'int'
// CXX-NEXT: `-ImplicitCastExpr [[ADDR_65:0x[a-z0-9]*]] <col:10, col:17> 'fd':'int (*)({{.*}})' <LValueToRValue>
// CXX-NEXT: `-ParenExpr [[ADDR_66:0x[a-z0-9]*]] <col:10, col:17> 'fd':'int (*)({{.*}})' lvalue
// CXX-NEXT: `-ArraySubscriptExpr [[ADDR_67:0x[a-z0-9]*]] <col:11, col:16> 'fd':'int (*)({{.*}})' lvalue
// CXX-NEXT: |-IntegerLiteral [[ADDR_68:0x[a-z0-9]*]] <col:11> 'int' 1
// CXX-NEXT: `-ImplicitCastExpr [[ADDR_69:0x[a-z0-9]*]] <col:13> 'fd *' <ArrayToPointerDecay>
// CXX-NEXT: `-DeclRefExpr [[ADDR_70:0x[a-z0-9]*]] <col:13> 'fd[2]' {{.*}}Var [[ADDR_31]] 'fns' 'fd[2]'
|
mat_mul_p4a_10000.c | /*
* file for mat_mul.c
*/
#include "./mat_mul.h"
#include "./size.h"
void mat_mul(int *a, int *b, int *c);
void mat_mul(int *a, int *b, int *c)
{
int i, j, k, t;
#pragma omp parallel for private(j, t, k)
for(i = 0; i <= 9999; i += 1)
for(j = 0; j <= 9999; j += 1) {
c[i*10000+j] = 0;
for(k = 0; k <= 9999; k += 1)
for(t = 0; t <= 99; t += 1)
c[i*10000+j] += a[i*10000+k]*b[j*10000+k];
}
return;
}
|
mandelbrot.c | /*
* mandelbrot.c: adaptive anti-aliasing
* (c)2010-2018 Seiji Nishimura
* $Id: mandelbrot.c,v 1.1.1.3 2018/09/11 00:00:00 seiji Exp seiji $
*/
#ifdef USE_MPI
#include <mpi.h>
#endif
#include <time.h>
#include <math.h>
#include <stdlib.h>
#include <pixmap.h>
#include <palette.h>
#include <stdbool.h>
// for adaptive anti-aliasing
#define MIN_SAMPLES (0x01<<4)
#define MAX_SAMPLES (0x01<<16)
#define ROUND(x) ((int) round(x))
#define MIN(x,y) (((x)<(y))?(x):(y))
#define MAX(x,y) (((x)>(y))?(x):(y))
// uniform RNG for [0:1)
#define SRAND(s) srand(s)
#define DRAND() ((double) rand()/(RAND_MAX+1.0))
// prototypes
void colormap_init (pixel_t *, int);
void jitter_init (double *, double *);
void draw_image (pixmap_t *, pixmap_t *, pixel_t *,
int, double, double, double, double *, double *, int, int);
void rough_sketch (pixmap_t *, pixel_t *, int, double, double, double, int, int);
void pixmap_reduction(pixmap_t *, int, int);
int mandelbrot (int, double, double);
bool detect_edge (pixmap_t *, pixel_t *, int, int);
bool equivalent_color(pixel_t, pixel_t);
//======================================================================
int main(int argc, char **argv)
{
int nprocs = 1, myrank = 0;
pixmap_t image, sketch;
pixel_t colormap[ITER_MAX];
double dx[MAX_SAMPLES],
dy[MAX_SAMPLES];
#ifdef USE_MPI
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &nprocs);
MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
#endif
pixmap_create(&image , WIDTH, HEIGHT);
pixmap_create(&sketch, WIDTH, HEIGHT);
colormap_init(colormap, ITER_MAX);
jitter_init(dx, dy);
draw_image(&image, &sketch, colormap,
ITER_MAX, CENTER_R, CENTER_I, RADIUS, dx, dy, nprocs, myrank);
if (myrank == 0)
pixmap_write_ppmfile(&image, "output.ppm");
pixmap_destroy(&sketch);
pixmap_destroy(&image );
#ifdef USE_MPI
MPI_Finalize();
#endif
return EXIT_SUCCESS;
}
//----------------------------------------------------------------------
void colormap_init(pixel_t *colormap, int iter_max)
{
int colormap_mask = COLORMAP_CYCLE - 1;
colormap[0] = pixel_set_rgb(0x00, 0x00, 0x00);
for (int i = 1; i < iter_max; i++)
#ifdef REVERSE_COLORMAP
colormap[i] = palette(COLORMAP_TYPE, 0x00, colormap_mask,
colormap_mask - (i & colormap_mask));
#else
colormap[i] = palette(COLORMAP_TYPE, 0x00, colormap_mask,
i & colormap_mask );
#endif
return;
}
//----------------------------------------------------------------------
void jitter_init(double *dx, double *dy)
{
SRAND((int) time(NULL));
for (int k = 0; k < MAX_SAMPLES; k++) {
dx[k] = DRAND();
dy[k] = DRAND();
}
return;
}
//----------------------------------------------------------------------
void draw_image(pixmap_t *image, pixmap_t *sketch, pixel_t *colormap, int iter_max,
double c_r, double c_i, double radius, double *dx, double *dy, int nprocs, int myrank)
{ // adaptive anti-aliasing
int iter_mask = iter_max - 1;
int width, height;
double d;
pixmap_get_size(image, &width, &height);
d = 2.0 * radius / MIN(width, height);
rough_sketch(sketch, colormap, iter_max, c_r, c_i, radius, nprocs, myrank);
#pragma omp parallel for schedule(static,1)
for (int xy = myrank; xy < width * height; xy += nprocs) {
int x = xy % width,
y = xy / width;
pixel_t pixel;
if (detect_edge(sketch, &pixel, x, y)) {
pixel_t average = pixel;
int sum_r, sum_g, sum_b,
m = 1, n = MIN_SAMPLES;
sum_r = pixel_get_r(pixel);
sum_g = pixel_get_g(pixel);
sum_b = pixel_get_b(pixel);
do {
pixel = average;
for (int k = m; k < n; k++) { // pixel refinement with MC integration
double p_r = c_r + d * ((x + dx[k]) - width / 2),
p_i = c_i + d * (height / 2 - (y + dy[k]));
int iter = mandelbrot(iter_max, p_r, p_i);
sum_r += pixel_get_r(colormap[iter & iter_mask]);
sum_g += pixel_get_g(colormap[iter & iter_mask]);
sum_b += pixel_get_b(colormap[iter & iter_mask]);
}
average = pixel_set_rgb(ROUND((double) sum_r / n),
ROUND((double) sum_g / n),
ROUND((double) sum_b / n));
} while (!equivalent_color(average, pixel) &&
(n = (m = n) << 0x01) <= MAX_SAMPLES);
pixel = average;
}
pixmap_put_pixel(image, pixel, x, y);
}
pixmap_reduction(image, nprocs, myrank);
return;
}
//----------------------------------------------------------------------
void rough_sketch(pixmap_t *sketch, pixel_t *colormap, int iter_max,
double c_r, double c_i, double radius, int nprocs, int myrank)
{
int iter_mask = iter_max - 1;
int width, height;
double d;
pixmap_get_size(sketch, &width, &height);
d = 2.0 * radius / MIN(width, height);
#pragma omp parallel for schedule(static,1)
for (int xy = myrank; xy < width * height; xy += nprocs) {
int x = xy % width,
y = xy / width;
double p_r = c_r + d * (x - width / 2),
p_i = c_i + d * (height / 2 - y);
int iter = mandelbrot(iter_max, p_r, p_i);
pixmap_put_pixel(sketch, colormap[iter & iter_mask], x, y);
}
pixmap_reduction(sketch, nprocs, myrank);
return;
}
//----------------------------------------------------------------------
void pixmap_reduction(pixmap_t *pixmap, int nprocs, int myrank)
{
#ifdef USE_MPI
int width, height;
pixmap_get_size(pixmap, &width, &height);
MPI_Allreduce(MPI_IN_PLACE, pixmap->data, width * height * SIZEOF_PIXEL_T,
MPI_BYTE, MPI_BOR, MPI_COMM_WORLD);
#endif
return;
}
//----------------------------------------------------------------------
int mandelbrot(int iter_max, double p_r, double p_i)
{ // kernel function (scalar version)
int i;
double z_r, z_i, work;
z_r = p_r;
z_i = p_i;
work = 2.0 * z_r * z_i;
for (i = 1; i < iter_max && (z_r *= z_r) +
(z_i *= z_i) < 4.0; i++) {
z_r += p_r - z_i ;
z_i = p_i + work;
work = 2.0 * z_r * z_i;
}
return i;
}
//----------------------------------------------------------------------
bool detect_edge(pixmap_t *pixmap, pixel_t *pixel, int x, int y)
{
int width, height;
pixmap_get_size (pixmap, &width, &height);
pixmap_get_pixel(pixmap, pixel, x, y);
for (int j = MAX(0, y - 1); j <= MIN(height - 1, y + 1); j++)
for (int i = MAX(0, x - 1); i <= MIN(width - 1, x + 1); i++)
if (i != x || j != y) {
pixel_t p;
pixmap_get_pixel(pixmap, &p, i, j);
if (!equivalent_color(*pixel, p))
return true;
}
return false;
}
//----------------------------------------------------------------------
bool equivalent_color(pixel_t p, pixel_t q)
#ifdef USE_SAME_COLOR
{
return pixel_get_r(p) == pixel_get_r(q) &&
pixel_get_g(p) == pixel_get_g(q) &&
pixel_get_b(p) == pixel_get_b(q);
}
#else //......................................
{
int dr, dg, db;
dr = pixel_get_r(p) - pixel_get_r(q);
dg = pixel_get_g(p) - pixel_get_g(q);
db = pixel_get_b(p) - pixel_get_b(q);
return (fabs(0.299 * dr + 0.587 * dg + 0.114 * db) < 1.5 ||
fabs(0.213 * dr + 0.715 * dg + 0.072 * db) < 1.5) &&
fabs(0.596 * dr - 0.274 * dg - 0.322 * db) < 4.2;
}
#endif
|
genome.c | /* =============================================================================
*
* genome.c
*
* =============================================================================
*
* Copyright (C) Stanford University, 2006. All Rights Reserved.
* Author: Chi Cao Minh
*
* =============================================================================
*
* For the license of bayes/sort.h and bayes/sort.c, please see the header
* of the files.
*
* ------------------------------------------------------------------------
*
* For the license of kmeans, please see kmeans/LICENSE.kmeans
*
* ------------------------------------------------------------------------
*
* For the license of ssca2, please see ssca2/COPYRIGHT
*
* ------------------------------------------------------------------------
*
* For the license of lib/mt19937ar.c and lib/mt19937ar.h, please see the
* header of the files.
*
* ------------------------------------------------------------------------
*
* For the license of lib/rbtree.h and lib/rbtree.c, please see
* lib/LEGALNOTICE.rbtree and lib/LICENSE.rbtree
*
* ------------------------------------------------------------------------
*
* Unless otherwise noted, the following license applies to STAMP files:
*
* Copyright (c) 2007, Stanford University
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of Stanford University nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY STANFORD UNIVERSITY ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL STANFORD UNIVERSITY BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*
* =============================================================================
*/
#include <assert.h>
#include <getopt.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "gene.h"
#include "random.h"
#include "segments.h"
#include "sequencer.h"
#include "thread.h"
#include "timer.h"
#include "tm.h"
#include "vector.h"
enum param_types {
PARAM_GENE = (unsigned char)'g',
PARAM_NUMBER = (unsigned char)'n',
PARAM_SEGMENT = (unsigned char)'s',
PARAM_THREAD = (unsigned char)'t',
};
#define PARAM_DEFAULT_GENE (1L << 14)
#define PARAM_DEFAULT_NUMBER (1L << 22)
#define PARAM_DEFAULT_SEGMENT (1L << 6)
#define PARAM_DEFAULT_THREAD (1L)
long global_params[256]; /* 256 = ascii limit */
/* =============================================================================
* displayUsage
* =============================================================================
*/
static void
displayUsage (const char* appName)
{
printf("Usage: %s [options]\n", appName);
puts("\nOptions: (defaults)\n");
printf(" g <UINT> Length of [g]ene (%li)\n", PARAM_DEFAULT_GENE);
printf(" n <UINT> Min [n]umber of segments (%li)\n", PARAM_DEFAULT_NUMBER);
printf(" s <UINT> Length of [s]egment (%li)\n", PARAM_DEFAULT_SEGMENT);
printf(" t <UINT> Number of [t]hreads (%li)\n", PARAM_DEFAULT_THREAD);
puts("");
puts("The actual number of segments created may be greater than -n");
puts("in order to completely cover the gene.");
exit(1);
}
/* =============================================================================
* setDefaultParams
* =============================================================================
*/
static void
setDefaultParams( void )
{
global_params[PARAM_GENE] = PARAM_DEFAULT_GENE;
global_params[PARAM_NUMBER] = PARAM_DEFAULT_NUMBER;
global_params[PARAM_SEGMENT] = PARAM_DEFAULT_SEGMENT;
global_params[PARAM_THREAD] = PARAM_DEFAULT_THREAD;
}
/* =============================================================================
* parseArgs
* =============================================================================
*/
static void
parseArgs (long argc, char* const argv[])
{
long i;
long opt;
opterr = 0;
setDefaultParams();
while ((opt = getopt(argc, argv, "g:n:s:t:")) != -1) {
switch (opt) {
case 'g':
case 'n':
case 's':
case 't':
global_params[(unsigned char)opt] = atol(optarg);
break;
case '?':
default:
opterr++;
break;
}
}
for (i = optind; i < argc; i++) {
fprintf(stderr, "Non-option argument: %s\n", argv[i]);
opterr++;
}
if (opterr) {
displayUsage(argv[0]);
}
}
/* =============================================================================
* main
* =============================================================================
*/
MAIN (argc,argv)
{
TIMER_T start;
TIMER_T stop;
GOTO_REAL();
/* Initialization */
parseArgs(argc, (char** const)argv);
SIM_GET_NUM_CPU(global_params[PARAM_THREAD]);
printf("Creating gene and segments... ");
fflush(stdout);
long geneLength = global_params[PARAM_GENE];
long segmentLength = global_params[PARAM_SEGMENT];
long minNumSegment = global_params[PARAM_NUMBER];
long numThread = global_params[PARAM_THREAD];
TM_STARTUP(numThread);
P_MEMORY_STARTUP(numThread);
thread_startup(numThread);
random_t* randomPtr = random_alloc();
assert(randomPtr != NULL);
random_seed(randomPtr, 0);
gene_t* genePtr = gene_alloc(geneLength);
assert( genePtr != NULL);
gene_create(genePtr, randomPtr);
char* gene = genePtr->contents;
segments_t* segmentsPtr = segments_alloc(segmentLength, minNumSegment);
assert(segmentsPtr != NULL);
segments_create(segmentsPtr, genePtr, randomPtr);
sequencer_t* sequencerPtr = sequencer_alloc(geneLength, segmentLength, segmentsPtr);
assert(sequencerPtr != NULL);
puts("done.");
printf("Gene length = %li\n", genePtr->length);
printf("Segment length = %li\n", segmentsPtr->length);
printf("Number segments = %li\n", vector_getSize(segmentsPtr->contentsPtr));
fflush(stdout);
/* Benchmark */
printf("Sequencing gene... ");
fflush(stdout);
TIMER_READ(start);
GOTO_SIM();
#ifdef OTM
#pragma omp parallel
{
sequencer_run(sequencerPtr);
}
#else
thread_start(sequencer_run, (void*)sequencerPtr);
#endif
GOTO_REAL();
TIMER_READ(stop);
puts("done.");
printf("\nTime = %lf\n", TIMER_DIFF_SECONDS(start, stop));
fflush(stdout);
/* Check result */
{
char* sequence = sequencerPtr->sequence;
int result = strcmp(gene, sequence);
printf("Sequence matches gene: %s\n", (result ? "no" : "yes"));
if (result) {
printf("gene = %s\n", gene);
printf("sequence = %s\n", sequence);
}
fflush(stdout);
assert(strlen(sequence) >= strlen(gene));
}
/* Clean up */
printf("Deallocating memory... ");
fflush(stdout);
sequencer_free(sequencerPtr);
segments_free(segmentsPtr);
gene_free(genePtr);
random_free(randomPtr);
puts("done.");
fflush(stdout);
TM_SHUTDOWN();
P_MEMORY_SHUTDOWN();
GOTO_SIM();
thread_shutdown();
MAIN_RETURN(0);
}
/* =============================================================================
*
* End of genome.c
*
* =============================================================================
*/
|
StateRewardData.h | //
// CubismUP_3D
//
// Written by Guido Novati ( novatig@ethz.ch ).
// Copyright (c) 2017 ETHZ. All rights reserved.
//
#ifndef CubismUP_3D_StateRewardData_h
#define CubismUP_3D_StateRewardData_h
//#include <cassert>
//#include <assert.h>
// utmost import to be defined before including cubism
static const int NpLatLine = 10;
//#define __ExploreHalfWake
#ifdef __RL_MPI_CLIENT //hardcoded BC for DCyl
#define checkTerm(...) checkTerm_DcylFollower(__VA_ARGS__)
#define sendInitC(...) sendInitC_DcylFollower(__VA_ARGS__)
#define setRefFrm() setRefFrm_DCylFollower()
//TODO:
// - 2/N fish want open bc in z
// - cleaning: maybe compile cubism and set flags based on user's app choice
#endif
#include "../Definitions.h"
#include "../ObstacleBlock.h"
CubismUP_3D_NAMESPACE_BEGIN
struct StateReward
{
double lengthscale, timescale;
double velscale = lengthscale/timescale;
double forcescale = velscale*velscale*lengthscale*lengthscale; //l^4/t^2
double powerscale = forcescale*velscale; //rho*l^3 * l/t^2 * l/t
bool bRestart = false;
bool bForgiving=0, bLoadedActions=0, bInteractive=0, randomStart=0;
//bool randomActions, bSpiral;
int info=1, stepId=0;//, nActions=2;
double t_next_comm=0, Tstartlearn=1e9, GoalDX=0, new_curv=0, old_curv=0, new_Tp=0;
//exponential averages
double thExp = 0, vxExp = 0, vyExp = 0, avExp = 0;
//average quantities
double avg_wght = 0;
double ThetaAvg = 0, ThetaVel = 0, VxAvg = 0, VyAvg = 0, AvAvg = 0;
double PoutBnd = 0, Pout = 0, defPowerBnd = 0, defPower = 0, ToD = 0;
double EffPDefBnd = 0, EffPDef = 0, Pthrust = 0, Pdrag = 0;
void resetAverage()
{
avg_wght = ThetaAvg = ThetaVel = VxAvg = VyAvg = AvAvg = Pthrust = ToD = 0;
PoutBnd = Pout = defPowerBnd = defPower = Pdrag = EffPDefBnd = EffPDef = 0;
}
void updateAverages(const double _dt,
const double _th, const double _vx, const double _vy, const double _av,
const double _pO1, const double _pO2, const double _pW1, const double _pW2,
const double _eff1, const double _eff2, const double _pT, const double _pD,
const double _T, const double _D)
{
if(_dt<=0) return;
const double _ToD=_D<1e-9?0:_T/_D, _W=1/(avg_wght+_dt), _vt=atan2(_vy,_vx);
VxAvg = ( VxAvg * avg_wght + _vx * _dt ) * _W;
VyAvg = ( VyAvg * avg_wght + _vy * _dt ) * _W;
AvAvg = ( AvAvg * avg_wght + _av * _dt ) * _W;
ThetaAvg = ( ThetaAvg * avg_wght + _th * _dt ) * _W;
ThetaVel = ( ThetaVel * avg_wght + _vt * _dt ) * _W;
Pout = ( Pout * avg_wght + _pO1 * _dt ) * _W;
PoutBnd = ( PoutBnd * avg_wght + _pO2 * _dt ) * _W;
defPower = ( defPower * avg_wght + _pW1 * _dt ) * _W;
defPowerBnd = ( defPowerBnd * avg_wght + _pW2 * _dt ) * _W;
EffPDef = ( EffPDef * avg_wght + _eff1 * _dt ) * _W;
EffPDefBnd = ( EffPDefBnd * avg_wght + _eff2 * _dt ) * _W;
Pthrust = ( Pthrust * avg_wght + _pT * _dt ) * _W;
Pdrag = ( Pdrag * avg_wght + _pD * _dt ) * _W;
ToD = ( ToD * avg_wght + _ToD * _dt ) * _W;
avg_wght += _dt;
battery += _dt * defPowerBnd;
thExp = (1-_dt) * thExp + _dt * _th;
vxExp = (1-_dt) * vxExp + _dt * _vx;
vyExp = (1-_dt) * vyExp + _dt * _vy;
avExp = (1-_dt) * avExp + _dt * _av;
}
//inst quantitites
double Xrel = 0, Xabs = 0, Xpov = 0, Yrel = 0, Yabs = 0, Ypov = 0, Theta = 0;
double VxInst = 0, VyInst = 0, AvInst = 0, VX = 0, VY = 0, AV = 0;
double phaseShift = 0, Dist = 0, Quad = 0, RelAng = 0;
double battery = 1, ext_X = -1, ext_Y = -1, ext_Z = -1;
void updateInstant(
const double _xR, const double _xA, const double _yR, const double _yA,
const double _th, const double _vx, const double _vy, const double _av)
{
Xrel = _xR; Xabs = _xA; Yrel = _yR; Yabs = _yA; Theta= _th;
VxInst=_vx; VyInst=_vy; AvInst=_av;
if (Xrel<0.05 || Yrel<0.025) bRestart = true;
if (ext_X>0 && ext_X-Xrel<0.2) bRestart = true;
if (ext_Y>0 && ext_Y-Yrel<.025) bRestart = true;
}
//sensors
vector<double> FPAbove, FVAbove, FPBelow, FVBelow;
vector<double> PXAbove, PYAbove, PXBelow, PYBelow;
vector<double> raySight;
vector<vector<double>> loadedActions;
StateReward(const double _lengthscale = 1, const double _timescale = 1) :
lengthscale(_lengthscale), timescale(_timescale)
{
//printf("scales: %f %f %f %f %f",
// lengthscale,timescale,velscale,forcescale,powerscale);
FPAbove.resize(NpLatLine,0); FVAbove.resize(NpLatLine,0);
FPBelow.resize(NpLatLine,0); FVBelow.resize(NpLatLine,0);
PXAbove.resize(NpLatLine,0); PYAbove.resize(NpLatLine,0);
PXBelow.resize(NpLatLine,0); PYBelow.resize(NpLatLine,0);
raySight.resize(2*NpLatLine,0);
}
StateReward& operator= (const StateReward& s)
{
lengthscale = s.lengthscale;
timescale = s.timescale;
velscale = lengthscale/timescale;
forcescale = velscale*velscale*lengthscale*lengthscale; //l^4/t^2
powerscale = forcescale*velscale; //rho*l^3 * l/t^2 * l/t
#ifdef __RL_TRAINING
printf("scales: %f %f %f %f %f",
lengthscale,timescale,velscale,forcescale,powerscale);
#endif
FPAbove.resize(NpLatLine,0); FVAbove.resize(NpLatLine,0);
FPBelow.resize(NpLatLine,0); FVBelow.resize(NpLatLine,0);
PXAbove.resize(NpLatLine,0); PYAbove.resize(NpLatLine,0);
PXBelow.resize(NpLatLine,0); PYBelow.resize(NpLatLine,0);
raySight.resize(2*NpLatLine,0);
return *this;
}
void parseArguments(ArgumentParser & parser)
{
bInteractive = parser("-interactive").asBool(false);
Tstartlearn = parser("-Tstartlearn").asDouble(bInteractive ? timescale : 1e9);
GoalDX = parser("-GoalDX").asDouble(0);
//nActions = parser("-nActions").asInt(2);
bForgiving = parser("-easyFailBox").asBool(false);
randomStart = parser("-randomStart").asBool(false);
bLoadedActions = parser("-useLoadedActions").asBool(false);
//hardcoded to compute avg state components for halfT b4 first comm... iffy
t_next_comm = Tstartlearn;// - timescale/2;
if (bLoadedActions) readLoadedActions();
printf("scales: %f %f %f %f %f, %d, %f, %f, %d, %d, %d\n",
lengthscale,timescale,velscale,forcescale,powerscale, bInteractive, Tstartlearn, GoalDX, bForgiving, randomStart, bLoadedActions);
}
vector<double> useLoadedActions()
{
if (loadedActions.size()>1) {
vector<double> actions = loadedActions.back();
loadedActions.pop_back();
return actions;
} //else zero actions
else return vector<double>();
}
void readLoadedActions(const int nActions = 2)
{
double dummy_time;
vector<double> action(nActions);
ifstream in("orders_1.txt");
std::string line;
if(in.good()) {
while (getline(in, line)) {
std::istringstream line_in(line);
if(nActions==2) line_in >> dummy_time >> action[0] >> action[1];
else line_in >> dummy_time >> action[0];
//i want to do pop back later:
loadedActions.insert(loadedActions.begin(),action);
}
in.close();
}
}
void updateStepId(const int _stepId) {stepId=_stepId;}
void finalize(const double xFOR, const double yFOR, const double thFOR,
const double vxFOR, const double vyFOR, const double avFOR)
{
//velocity of reference from fish pov
VX = (VxInst-vxFOR)*std::cos(Theta) + (VyInst-vyFOR)*std::sin(Theta);
VY = (VyInst-vyFOR)*std::cos(Theta) - (VxInst-vxFOR)*std::sin(Theta);
AV = (AvInst-avFOR);
//velocity of fish in reference pov
const double vxAvg = VxAvg, vyAvg = VyAvg;
VxAvg = vxAvg*std::cos(Theta) + vyAvg*std::sin(Theta);
VyAvg = vyAvg*std::cos(Theta) - vxAvg*std::sin(Theta);
AvAvg = AvAvg;
//position in reference frame
Xpov = (Xrel-xFOR)*std::cos(thFOR) + (Yrel-yFOR)*std::sin(thFOR);
Ypov = (Yrel-yFOR)*std::cos(thFOR) - (Xrel-xFOR)*std::sin(thFOR);
RelAng = Theta - thFOR;
const double Xframe=(xFOR-Xrel)*std::cos(Theta)+(yFOR-Yrel)*std::sin(Theta);
const double Yframe=(yFOR-Yrel)*std::cos(Theta)-(xFOR-Xrel)*std::sin(Theta);
Dist = std::sqrt(std::pow(Xrel-xFOR,2) + std::pow(Yrel-yFOR,2));
Quad = std::atan2(Yframe, Xframe);
}
bool checkTerm_LeadFollower(const double xFOR, const double yFOR,
const double thFOR,const double vxFOR,const double vyFOR,const double avFOR)
{
checkTerm_bounds(xFOR, yFOR);
if(not bInteractive or bRestart) return bRestart;
const double _Xrel = (Xrel-xFOR)*cos(thFOR) + (Yrel-yFOR)*sin(thFOR);
const double _Yrel = (Yrel-yFOR)*cos(thFOR) - (Xrel-xFOR)*sin(thFOR);
const double _thRel= Theta - thFOR;
const double _Dist = sqrt(pow(Xrel-xFOR,2) + pow(Yrel-yFOR,2));
bRestart = _Dist < .25*lengthscale;
if(bRestart) {printf("Too close\n"); return bRestart;}
//at DX=1, allowed DY=.5, at DX=2.5 allowed DY=.75
bRestart = fabs(_Yrel)>(bForgiving?lengthscale: _Xrel/6 + 7*lengthscale/12);
if(bRestart) {printf("Too much vertical distance\n"); return bRestart;}
#ifdef __ExploreHalfWake
bRestart = _Yrel < -.1*lengthscale;
if(bRestart) {printf("Wrong half of the wake\n"); return bRestart;}
#endif
bRestart = std::fabs(_thRel)> (bForgiving ? M_PI : M_PI/2);
if(bRestart) {printf("Too different inclination\n"); return bRestart;}
bRestart = _Xrel < lengthscale || _Xrel > 2.5*lengthscale;
if(bRestart) {printf("Too far from horizontal goal\n"); return bRestart;}
return bRestart;
}
bool checkTerm_DcylFollower(const double xFOR,const double yFOR,
const double thFOR,const double vxFOR,const double vyFOR,const double avFOR)
{
if (bRestart) printf("Already ended\n");
if(not bInteractive or bRestart) return bRestart;
for(int i=0; i<NpLatLine; i++) {
if(PXAbove[i]< xFOR||PXBelow[i]< xFOR) {printf("Touching\n"); bRestart=1;}
if(PXAbove[i]> 0.8||PXBelow[i]> 0.8) {printf("Boundary\n"); bRestart=1;}
if(PYAbove[i]< 0||PYBelow[i]< 0) {printf("Boundary\n"); bRestart=1;}
if(PYAbove[i]>ext_Y||PYBelow[i]>ext_Y) {printf("Boundary\n"); bRestart=1;}
if(bRestart) return bRestart;
}
const double _Xrel = (Xrel-xFOR)*cos(thFOR) + (Yrel-yFOR)*sin(thFOR);
const double _Yrel = (Yrel-yFOR)*cos(thFOR) - (Xrel-xFOR)*sin(thFOR);
const double _Dist = sqrt(pow(Xrel-xFOR,2) + pow(Yrel-yFOR,2));
(void)_Xrel; // To stop complaining about unused variables.
(void)_Yrel;
(void)_Dist;
bRestart = std::fabs(_Yrel) > 2*lengthscale;
if(bRestart) {printf("Too much vertical distance\n"); return bRestart;}
bRestart = std::fabs(Theta)>M_PI;
if(bRestart) {printf("Too different inclination\n"); return bRestart;}
return bRestart;
}
bool checkTerm_bounds(const double xFOR, const double yFOR)
{
if ( Xrel<.05*lengthscale || Yrel<.025*lengthscale) bRestart = true;
if ( ext_X>0 && ext_X-Xrel < .2 *lengthscale ) bRestart = true;
if ( ext_Y>0 && ext_Y-Yrel < .025*lengthscale ) bRestart = true;
if (bRestart) printf("Out of bounds\n");
return bRestart;
}
struct skinForcesVels
{
skinForcesVels(const int _nDest) : nDest(_nDest), data(_alloc(5*_nDest))
{
memset(data, 0, sizeof(double)*5*nDest);
}
virtual ~skinForcesVels() { _dealloc(data); }
inline void storeNearest(const double fxP, const double fyP, const double fxV, const double fyV, const int i)
{
data[i+0*nDest] += fxP; data[i+1*nDest] += fyP;
data[i+2*nDest] += fxV; data[i+3*nDest] += fyV;
data[i+4*nDest] += 1.;
}
inline double fxP(const int i) { return data[i+0*nDest]; }
inline double fyP(const int i) { return data[i+1*nDest]; }
inline double fxV(const int i) { return data[i+2*nDest]; }
inline double fyV(const int i) { return data[i+3*nDest]; }
void synchronize(const MPI_Comm comm)
{
//int rank;
//MPI_Comm_rank(comm, &rank);
#ifndef CUP_SINGLE_PRECISION
MPI_Allreduce(MPI_IN_PLACE, data, 5*nDest, MPI_DOUBLE, MPI_SUM, comm);
#else //CUP_SINGLE_PRECISION
MPI_Allreduce(MPI_IN_PLACE, data, 5*nDest, MPI_FLOAT, MPI_SUM, comm);
#endif//
}
void print(const MPI_Comm comm, const int stepNumber)
{
int rank;
MPI_Comm_rank(comm, &rank);
if(rank) return;
ofstream fout;
char buf[500];
sprintf(buf, "midplaneData_%07d.txt", stepNumber);
string filename(buf);
fout.open(filename, ios::trunc);
for(int i=0; i<nDest; ++i)
fout<<fxP(i)<<"\t"<<fyP(i)<<"\t"<<fxV(i)<<"\t"<<fyV(i)<<"\t"<<std::endl;
fout.close();
}
private:
const int nDest;
double*const data;
double * _alloc(const int N) { return new double[N]; }
void _dealloc(double * ptr) {
if(ptr not_eq nullptr) {
delete [] ptr;
ptr=nullptr;
}
}
};
typedef const Real*const constAry;
void nearestGridPoints(
const std::map<int,ObstacleBlock*>& obstacleBlocks,
const vector<BlockInfo>& vInfo, const int Nskin,
constAry xU, constAry yU, constAry xL, constAry yL,
constAry nxU, constAry nyU, constAry nxL, constAry nyL,
const double zObst, const double h, const MPI_Comm comm)
{
constexpr int BS = FluidBlock::BS;
skinForcesVels data(Nskin*2);
const double eps = 10*std::numeric_limits<double>::epsilon();
const unsigned NB = vInfo.size();
#pragma omp parallel for schedule(dynamic)
for (int j=0; j<2*Nskin; j++)
{
const double X = j>=Nskin ? xL[j-Nskin] : xU[j];
const double Y = j>=Nskin ? yL[j-Nskin] : yU[j];
for(unsigned i=0; i<NB; i++) {
const BlockInfo I = vInfo[i];
const auto pos = obstacleBlocks.find(I.blockID);
if(pos == obstacleBlocks.end()) continue;
if(pos->second->nPoints == 0) continue;
const auto& o = pos->second;
assert(o->filled);
double max_pos[3], min_pos[3];
I.pos(min_pos, 0, 0, 0);
I.pos(max_pos, BS-1, BS-1, BS-1);
if(zObst-max_pos[2]>h+eps || min_pos[2]-zObst>h+eps) continue;
if(Y -max_pos[1]>h+eps || min_pos[1]-Y >h+eps) continue;
if(X -max_pos[0]>h+eps || min_pos[0]-X >h+eps) continue;
for(int k=0; k<pos->second->nPoints; k++) {
if(std::fabs(o->pZ[k]-zObst)>h+eps) continue;
if(std::fabs(o->pY[k]-Y) >h+eps) continue;
if(std::fabs(o->pX[k]-X) >h+eps) continue;
//printf("%f %f %f %f\n",o->fxP[k],o->fyP[k],o->fxV[k],o->fyV[k]);
data.storeNearest(o->fxP[k], o->fyP[k], o->fxV[k], o->fyV[k], j);
}
}
}
data.synchronize(comm);
//data.print(comm,stepId);
/*
int rank;
MPI_Comm_rank(comm, &rank);
if(!rank) {
ofstream fileskin;
char buf[500];
sprintf(buf, "skinPoints_%07d.txt", stepId);
string filename(buf);
fileskin.open(filename, ios::trunc);
for (int j=0; j<Nskin; j++)
fileskin<<xU[j]<<"\t"<<yU[j]<<std::endl;
for (int j=Nskin-1; j>=0; j--)
fileskin<<xL[j]<<"\t"<<yL[j]<<std::endl;
fileskin.close();
}
*/
vector<double> NxAbove(NpLatLine,0), NyAbove(NpLatLine,0);
vector<double> NxBelow(NpLatLine,0), NyBelow(NpLatLine,0);
//now, feed the sensors
for (int k=0; k<NpLatLine; k++)
{
const int first = k *(double)Nskin/(double)NpLatLine;
const int last = (k+1)*(double)Nskin/(double)NpLatLine;
double FPxAbove=0, FPxBelow=0, FPyAbove=0, FPyBelow=0;
double FVxAbove=0, FVxBelow=0, FVyAbove=0, FVyBelow=0;
for (int j=first; j<last; j++) {
FPxAbove += data.fxP(j); FPxBelow += data.fxP(j+Nskin);
FPyAbove += data.fyP(j); FPyBelow += data.fyP(j+Nskin);
FVxAbove += data.fxV(j); FVxBelow += data.fxV(j+Nskin);
FVyAbove += data.fyV(j); FVyBelow += data.fyV(j+Nskin);
}
const int mid = 0.5*(first+last);
PXAbove[k] = xU[mid]; PYAbove[k] = yU[mid];
PXBelow[k] = xL[mid]; PYBelow[k] = yL[mid];
const double nxAbove = nxU[mid]; // ^ ^
const double nyAbove = nyU[mid]; // ` /
const double txAbove = nyU[mid]; // n ` / t
const double tyAbove =-nxU[mid]; // `
NxAbove[k] = nxAbove;
NyAbove[k] = nyAbove;
const double nxBelow = nxL[mid]; // /`
const double nyBelow = nyL[mid]; // n / ` t
const double txBelow =-nyL[mid]; // / `
const double tyBelow = nxL[mid]; // v v
NxBelow[k] = nxBelow;
NyBelow[k] = nyBelow;
FPAbove[k] = FPxAbove*nxAbove + FPyAbove*nyAbove;
FVAbove[k] = FVxAbove*txAbove + FVyAbove*tyAbove;
FPBelow[k] = FPxBelow*nxBelow + FPyBelow*nyBelow;
FVBelow[k] = FVxBelow*txBelow + FVyBelow*tyBelow;
}
if(0){
ofstream fileskin;
char buf[500];
sprintf(buf, "sensorDistrib_%07d.txt", stepId);
string filename(buf);
fileskin.open(filename, ios::trunc);
// int k=0;
for(int i=0; i<NpLatLine; ++i)
fileskin<<PXAbove[i]<<"\t"<<PYAbove[i]<<"\t"<<NxAbove[i]
<<"\t"<<NyAbove[i]<<"\t"<<FPAbove[i]<<"\t"<<FVAbove[i]
//<<"\t"<<raySight[k++]
<<std::endl;
for(int i=0; i<NpLatLine; ++i)
fileskin<<PXBelow[i]<<"\t"<<PYBelow[i]<<"\t"<<NxBelow[i]
<<"\t"<<NyBelow[i]<<"\t"<<FPBelow[i]<<"\t"<<FVBelow[i]
//<<"\t"<<raySight[k++]
<<std::endl;
fileskin.close();
}
}
void save(const int step_id, string filename)
{
ofstream savestream;
savestream.setf(std::ios::scientific);
savestream.precision(std::numeric_limits<double>::digits10 + 1);
string fullFileName = filename==string() ? "restart_IF2D_Stefan" : filename;
savestream.open(fullFileName+"_save_data.txt");
savestream << bRestart << "\t" << info << "\t" << avg_wght << "\t" << t_next_comm << "\t"
<< Xrel << "\t" << Xabs << "\t" << Yrel << "\t" << Yabs << "\t"
<< Theta << "\t" << VxAvg << "\t" << VyAvg<< "\t" << AvAvg << "\t"
<< thExp << "\t" << vxExp << "\t" << vyExp<< "\t" << avExp << "\t"
<< VxInst << "\t" << VyInst<< "\t" << AvInst << "\t"
<< Dist << "\t" << Quad << "\t" << RelAng<< "\t"
<< VX << "\t" << VY << "\t" << AV << "\t"
<< ThetaAvg<<"\t"<<ThetaVel<<"\t"<<PoutBnd<<"\t"<<Pout << "\t"
<< defPowerBnd<<"\t"<<defPower<<"\t"<<EffPDefBnd<<"\t"<<EffPDef << "\t"
<< Pthrust << "\t" << Pdrag << "\t" << ToD << std::endl;
for (int i=0; i<NpLatLine; i++) {
savestream <<
PXAbove[i] << "\t" << PYAbove[i] << "\t" <<
PXBelow[i] << "\t" << PYBelow[i] << "\t" <<
FPAbove[i] << "\t" << FVAbove[i] << "\t" <<
FPBelow[i] << "\t" << FVBelow[i] << std::endl;
}
savestream.close();
}
void restart(string filename)
{
ifstream restartstream;
string fullFileName = filename;
restartstream.open(fullFileName+"_save_data.txt");
if(not restartstream.good()) return;
restartstream >> bRestart >> info >> avg_wght >> t_next_comm >>
Xrel >> Xabs >> Yrel >> Yabs >>
Theta >> VxAvg >> VyAvg >> AvAvg >>
thExp >> vxExp >> vyExp >> avExp >>
VxInst >> VyInst >> AvInst >>
Dist >> Quad >> RelAng >>
VX >> VY >> AV >>
ThetaAvg >> ThetaVel >> PoutBnd >> Pout >>
defPowerBnd >> defPower >> EffPDefBnd>> EffPDef >>
Pthrust >> Pdrag >> ToD;
for (int i=0; i<NpLatLine; i++) {
restartstream >>
PXAbove[i] >> PYAbove[i] >>
PXBelow[i] >> PYBelow[i] >>
FPAbove[i] >> FVAbove[i] >>
FPBelow[i] >> FVBelow[i];
}
restartstream.close();
int rank;
MPI_Comm_rank(MPI_COMM_WORLD,&rank);
if (rank==0)
{
cout << bRestart << "\t" << info << "\t" << avg_wght << "\t" << t_next_comm << "\t"
<< Xrel << "\t" << Xabs << "\t" << Yrel << "\t" << Yabs << "\t"
<< Theta << "\t" << VxAvg << "\t" << VyAvg<< "\t" << AvAvg << "\t"
<< thExp << "\t" << vxExp << "\t" << vyExp<< "\t" << avExp << "\t"
<< VxInst << "\t" << VyInst<< "\t" << AvInst << "\t"
<< Dist << "\t" << Quad << "\t" << RelAng<< "\t"
<< VX << "\t" << VY << "\t" << AV << "\t"
<< ThetaAvg << "\t" << ThetaVel << "\t" << PoutBnd << "\t" << Pout << "\t"
<< defPowerBnd << "\t" << defPower << "\t" << EffPDefBnd<< "\t" << EffPDef << "\t"
<< Pthrust << "\t" << Pdrag << "\t" << ToD << std::endl;
for (int i=0; i<NpLatLine; i++) {
cout << PXAbove[i] << "\t" << PYAbove[i] << "\t" <<
PXBelow[i] << "\t" << PYBelow[i] << "\t" <<
FPAbove[i] << "\t" << FVAbove[i] << "\t" <<
FPBelow[i] << "\t" << FVBelow[i] << std::endl;
}
}
}
void print(const int ID, const int stepNumber, const double time)
{
//int rank;
//MPI_Comm_rank(MPI_COMM_WORLD,&rank);
//if (rank) return;
{
ofstream fileskin;
char buf[500];
sprintf(buf, "sensorDistrib_%1d_%07d.txt", ID, stepNumber);
string filename(buf);
fileskin.open(filename, ios::trunc);
int k=0;
for(int i=0; i<NpLatLine; ++i)
fileskin<<PXAbove[i]<<"\t"<<PYAbove[i]<<"\t"<<FPAbove[i]<<"\t"<<FVAbove[i]<<"\t"<<raySight[k++]<<std::endl;
for(int i=0; i<NpLatLine; ++i)
fileskin<<PXBelow[i]<<"\t"<<PYBelow[i]<<"\t"<<FPBelow[i]<<"\t"<<FVBelow[i]<<"\t"<<raySight[k++]<<std::endl;
fileskin.close();
}
{
ofstream fileskin;
char buf[500];
sprintf(buf, "avgSensors_%1d.txt",ID);
string filename(buf);
fileskin.open(filename, ios::app);
fileskin<< avg_wght << "\t" << t_next_comm << "\t"
<< Xrel << "\t" << Xabs << "\t" << Yrel << "\t" << Yabs << "\t"
<< Theta << "\t" << VxAvg << "\t" << VyAvg<< "\t" << AvAvg << "\t"
<< thExp << "\t" << vxExp << "\t" << vyExp<< "\t" << avExp << "\t"
<< VxInst << "\t" << VyInst<< "\t" << AvInst << "\t"
<< Dist << "\t" << Quad << "\t" << RelAng<< "\t"
<< VX << "\t" << VY << "\t" << AV << "\t"
<< ThetaAvg << "\t" << ThetaVel << "\t" << PoutBnd << "\t" << Pout << "\t"
<< defPowerBnd << "\t" << defPower << "\t" << EffPDefBnd<< "\t" << EffPDef << "\t"
<< Pthrust << "\t" << Pdrag << "\t" << ToD << std::endl;
fileskin.close();
}
}
vector<double> fillState(const double time, const int nStateVars, const int nActions = 2)
{
if(bRestart) {
if(info==1) printf("Reached termination before first action!!!\n");
info = 2;
}
vector<double> state(nStateVars, 0);
int k = 0;
//state[k++] = sr.Xpov*invlscale - GoalDX;
state[k++] = Xpov / lengthscale;
state[k++] = Ypov / lengthscale;
state[k++] = RelAng;
state[k++] = std::fmod(time, timescale); //1 is Tperiod of leader
state[k++] = new_curv;
state[k++] = old_curv;
if(nActions==2)
{
state[k++] = new_Tp;
state[k++] = phaseShift;
state[k++] = VX / velscale;
state[k++] = VY / velscale;
state[k++] = AV / velscale;
}
state[k++] = Dist / lengthscale;
state[k++] = Quad;
state[k++] = VxAvg / velscale;
state[k++] = VyAvg / velscale;
state[k++] = AvAvg / velscale;
state[k++] = Pout / powerscale;
state[k++] = defPower / powerscale;
state[k++] = EffPDef;
state[k++] = PoutBnd / powerscale;
state[k++] = defPowerBnd / powerscale;
state[k++] = EffPDefBnd;
state[k++] = Pthrust / powerscale;
state[k++] = Pdrag / powerscale;
state[k++] = ToD;
if(nStateVars>=k+4*NpLatLine) {
for (int j=0; j<NpLatLine; j++) state[k++] = FPAbove[j] / forcescale;
//for (int j=0; j<NpLatLine; j++) printf("FPAbove %d %f\n",j,FPAbove[j]);
for (int j=0; j<NpLatLine; j++) state[k++] = FVAbove[j] / forcescale;
//for (int j=0; j<NpLatLine; j++) printf("FVAbove %d %f\n",j,FVAbove[j]);
for (int j=0; j<NpLatLine; j++) state[k++] = FPBelow[j] / forcescale;
//for (int j=0; j<NpLatLine; j++) printf("FPBelow %d %f\n",j,FPBelow[j]);
for (int j=0; j<NpLatLine; j++) state[k++] = FVBelow[j] / forcescale;
//for (int j=0; j<NpLatLine; j++) printf("FVBelow %d %f\n",j,FVBelow[j]);
}
if(nStateVars>=k+2*NpLatLine)
for (int j=0;j<2*NpLatLine;j++) state[k++] = raySight[j] / lengthscale;
return state;
}
};
CubismUP_3D_NAMESPACE_END
#endif // CubismUP_3D_StateRewardData_h
|
mxnet_op.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2017 by Contributors
* \file mxnet_op.h
* \brief
* \author Junyuan Xie
*/
#ifndef MXNET_OPERATOR_MXNET_OP_H_
#define MXNET_OPERATOR_MXNET_OP_H_
#include <dmlc/omp.h>
#include <mxnet/base.h>
#include <mxnet/engine.h>
#include <mxnet/op_attr_types.h>
#include <algorithm>
#include "./operator_tune.h"
#include "../engine/openmp.h"
#ifdef __CUDACC__
#include "../common/cuda_utils.h"
#endif // __CUDACC__
namespace mxnet {
namespace op {
namespace mxnet_op {
using namespace mshadow;
#ifdef __CUDA_ARCH__
__constant__ const float PI = 3.14159265358979323846;
#else
const float PI = 3.14159265358979323846;
using std::isnan;
#endif
template<typename xpu>
int get_num_threads(const int N);
#ifdef __CUDACC__
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
i < (n); \
i += blockDim.x * gridDim.x)
inline cudaDeviceProp cuda_get_device_prop() {
int device;
CUDA_CALL(cudaGetDevice(&device));
cudaDeviceProp deviceProp;
CUDA_CALL(cudaGetDeviceProperties(&deviceProp, device));
return deviceProp;
}
/*!
* \brief Get the number of blocks for cuda kernel given N
*/
inline int cuda_get_num_blocks(const int N) {
using namespace mshadow::cuda;
return std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum);
}
template<>
inline int get_num_threads<gpu>(const int N) {
using namespace mshadow::cuda;
return kBaseThreadNum * cuda_get_num_blocks(N);
}
#endif // __CUDACC__
template<>
inline int get_num_threads<cpu>(const int N) {
return engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
}
/*! \brief operator request type switch */
#define MXNET_ASSIGN_REQ_SWITCH(req, ReqType, ...) \
switch (req) { \
case kNullOp: \
break; \
case kWriteInplace: \
case kWriteTo: \
{ \
const OpReqType ReqType = kWriteTo; \
{__VA_ARGS__} \
} \
break; \
case kAddTo: \
{ \
const OpReqType ReqType = kAddTo; \
{__VA_ARGS__} \
} \
break; \
default: \
break; \
}
#define MXNET_NDIM_SWITCH(NDim, ndim, ...) \
if (NDim == 0) { \
} else if (NDim == 1) { \
const int ndim = 1; \
{__VA_ARGS__} \
} else if (NDim == 2) { \
const int ndim = 2; \
{__VA_ARGS__} \
} else if (NDim == 3) { \
const int ndim = 3; \
{__VA_ARGS__} \
} else if (NDim == 4) { \
const int ndim = 4; \
{__VA_ARGS__} \
} else if (NDim == 5) { \
const int ndim = 5; \
{__VA_ARGS__} \
} else { \
LOG(FATAL) << "ndim=" << NDim << "too large "; \
}
#define MXNET_NO_INT8_TYPE_SWITCH(type, DType, ...) \
switch (type) { \
case mshadow::kFloat32: \
{ \
typedef float DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kFloat64: \
{ \
typedef double DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kFloat16: \
{ \
typedef mshadow::half::half_t DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kUint8: \
LOG(FATAL) << "This operation does not " \
"support int8 or uint8"; \
break; \
case mshadow::kInt8: \
LOG(FATAL) << "This operation does not " \
"support int8 or uint8"; \
break; \
case mshadow::kInt32: \
{ \
typedef int32_t DType; \
{__VA_ARGS__} \
} \
break; \
case mshadow::kInt64: \
{ \
typedef int64_t DType; \
{__VA_ARGS__} \
} \
break; \
default: \
LOG(FATAL) << "Unknown type enum " << type; \
}
/*!
* \brief assign the val to out according
* to request in Kernel::Launch
* \param out the data to be assigned
* \param req the assignment request
* \param val the value to be assigned to out
* \tparam OType output type
* \tparam VType value type
*/
#define KERNEL_ASSIGN(out, req, val) \
{ \
switch (req) { \
case kNullOp: \
break; \
case kWriteTo: \
case kWriteInplace: \
(out) = (val); \
break; \
case kAddTo: \
(out) += (val); \
break; \
default: \
break; \
} \
}
#define MXNET_ADD_ALL_TYPES \
.add_enum("float32", mshadow::kFloat32) \
.add_enum("float64", mshadow::kFloat64) \
.add_enum("float16", mshadow::kFloat16) \
.add_enum("uint8", mshadow::kUint8) \
.add_enum("int8", mshadow::kInt8) \
.add_enum("int32", mshadow::kInt32) \
.add_enum("int64", mshadow::kInt64)
/* \brief Compute flattened index given coordinates and shape. */
template<int ndim>
MSHADOW_XINLINE int ravel(const Shape<ndim>& coord, const Shape<ndim>& shape) {
int ret = 0;
#pragma unroll
for (int i = 0; i < ndim; ++i) {
ret = ret * shape[i] + (shape[i] > coord[i]) * coord[i];
}
return ret;
}
/* Compute coordinates from flattened index given shape */
template<int ndim>
MSHADOW_XINLINE Shape<ndim> unravel(const int idx, const Shape<ndim>& shape) {
Shape<ndim> ret;
#pragma unroll
for (int i = ndim-1, j = idx; i >=0; --i) {
int tmp = j / shape[i];
ret[i] = j - tmp*shape[i];
j = tmp;
}
return ret;
}
/* Compute dot product of two vector */
template<int ndim>
MSHADOW_XINLINE int dot(const Shape<ndim>& coord, const Shape<ndim>& stride) {
int ret = 0;
#pragma unroll
for (int i = 0; i < ndim; ++i) {
ret += coord[i] * stride[i];
}
return ret;
}
/* Combining unravel and dot */
template<int ndim>
MSHADOW_XINLINE int unravel_dot(const int idx, const Shape<ndim>& shape,
const Shape<ndim>& stride) {
int ret = 0;
#pragma unroll
for (int i = ndim-1, j = idx; i >=0; --i) {
int tmp = j / shape[i];
ret += (j - tmp*shape[i])*stride[i];
j = tmp;
}
return ret;
}
/* Calculate stride of each dim from shape */
template<int ndim>
MSHADOW_XINLINE Shape<ndim> calc_stride(const Shape<ndim>& shape) {
Shape<ndim> stride;
index_t cumprod = 1;
#pragma unroll
for (int i = ndim - 1; i >= 0; --i) {
stride[i] = (shape[i] > 1) ? cumprod : 0;
cumprod *= shape[i];
}
return stride;
}
/* Increment coordinates and modify index */
template<int ndim>
MSHADOW_XINLINE void inc(Shape<ndim>* coord, const Shape<ndim>& shape,
index_t* idx, const Shape<ndim>& stride) {
++(*coord)[ndim-1];
*idx += stride[ndim-1];
#pragma unroll
for (int i = ndim - 1; i > 0 && (*coord)[i] >= shape[i]; --i) {
(*coord)[i] -= shape[i];
++(*coord)[i-1];
*idx = *idx + stride[i-1] - shape[i] * stride[i];
}
}
/* Increment coordinates and modify index */
template<int ndim>
MSHADOW_XINLINE void inc(Shape<ndim>* coord, const Shape<ndim>& shape,
index_t* idx1, const Shape<ndim>& stride1,
index_t* idx2, const Shape<ndim>& stride2) {
++(*coord)[ndim-1];
*idx1 += stride1[ndim-1];
*idx2 += stride2[ndim-1];
#pragma unroll
for (int i = ndim - 1; i > 0 && (*coord)[i] >= shape[i]; --i) {
(*coord)[i] -= shape[i];
++(*coord)[i-1];
*idx1 = *idx1 + stride1[i-1] - shape[i] * stride1[i];
*idx2 = *idx2 + stride2[i-1] - shape[i] * stride2[i];
}
}
/*!
* \brief Simple copy data from one blob to another
* \param to Destination blob
* \param from Source blob
*/
template <typename xpu>
MSHADOW_CINLINE void copy(mshadow::Stream<xpu> *s, const TBlob& to, const TBlob& from) {
CHECK_EQ(from.Size(), to.Size());
CHECK_EQ(from.dev_mask(), to.dev_mask());
MSHADOW_TYPE_SWITCH(to.type_flag_, DType, {
if (to.type_flag_ == from.type_flag_) {
mshadow::Copy(to.FlatTo1D<xpu, DType>(s), from.FlatTo1D<xpu, DType>(s), s);
} else {
MSHADOW_TYPE_SWITCH(from.type_flag_, SrcDType, {
to.FlatTo1D<xpu, DType>(s) = mshadow::expr::tcast<DType>(from.FlatTo1D<xpu, SrcDType>(s));
})
}
})
}
/*! \brief Binary op backward gradient OP wrapper */
template<typename GRAD_OP>
struct backward_grad {
/* \brief Backward calc with grad
* \param a - output grad
* \param args... - data to grad calculation op (what this is -- input, output, etc. -- varies)
* \return input grad
*/
template<typename DType, typename ...Args>
MSHADOW_XINLINE static DType Map(DType a, Args... args) {
return DType(a * GRAD_OP::Map(args...));
}
};
/*! \brief Binary op backward gradient OP wrapper (tuned) */
template<typename GRAD_OP>
struct backward_grad_tuned : public backward_grad<GRAD_OP>, public tunable {
using backward_grad<GRAD_OP>::Map;
};
/*! \brief Select assignment operation based upon the req value
* Also useful for mapping mshadow Compute (F<OP>) to Kernel<OP>::Launch
*/
template<typename OP, int req>
struct op_with_req {
typedef OP Operation;
/*! \brief input is one tensor */
template<typename DType>
MSHADOW_XINLINE static void Map(int i, DType *out, const DType *in) {
KERNEL_ASSIGN(out[i], req, OP::Map(in[i]));
}
/*! \brief inputs are two tensors */
template<typename DType>
MSHADOW_XINLINE static void Map(int i, DType *out, const DType *lhs, const DType *rhs) {
KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i]));
}
/*! \brief input is tensor and a scalar value */
template<typename DType>
MSHADOW_XINLINE static void Map(int i, DType *out, const DType *in, const DType value) {
KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value));
}
/*! \brief input is tensor and two scalar value */
template<typename DType>
MSHADOW_XINLINE static void Map(int i, DType *out, const DType *in,
const DType value_1, const DType value_2) {
KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value_1, value_2));
}
/*! \brief No inputs (ie fill to constant value) */
template<typename DType>
MSHADOW_XINLINE static void Map(int i, DType *out) {
KERNEL_ASSIGN(out[i], req, OP::Map());
}
/*! \brief input is single scalar value */
template<typename DType>
MSHADOW_XINLINE static void Map(int i, DType *out, const DType value) {
KERNEL_ASSIGN(out[i], req, OP::Map(value));
}
/*! \brief inputs are two tensors and a scalar value */
template<typename DType>
MSHADOW_XINLINE static void Map(int i, DType *out,
const DType *input_1, const DType *input_2, const DType value) {
KERNEL_ASSIGN(out[i], req, OP::Map(input_1[i], input_2[i], value));
}
/*! \brief inputs are three tensors (ie backward grad with binary grad function) */
template<typename DType>
MSHADOW_XINLINE static void Map(int i, DType *out,
const DType *input_1,
const DType *input_2,
const DType *input_3) {
KERNEL_ASSIGN(out[i], req, OP::Map(input_1[i], input_2[i], input_3[i]));
}
};
template<typename OP, typename xpu>
struct Kernel;
/*!
* \brief CPU Kernel launcher
* \tparam OP Operator to launch
*/
template<typename OP>
struct Kernel<OP, cpu> {
/*!
* \brief Launch a generic CPU kernel.
* When using this for a new kernel op, add declaration and tuning objects to
* operator_tune.cc
* \tparam Args Varargs type to eventually pass to the OP::Map() functoion
* \param N Number of iterations
* \param args Varargs to eventually pass to the OP::Map() functoion
*/
template<typename ...Args>
inline static bool Launch(mshadow::Stream<cpu> *, const int N, Args... args) {
#ifdef _OPENMP
const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
if (omp_threads < 2) {
for (int i = 0; i < N; ++i) {
OP::Map(i, args...);
}
} else {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
OP::Map(i, args...);
}
}
#else
for (int i = 0; i < N; ++i) {
OP::Map(i, args...);
}
#endif
return true;
}
/*!
* \brief Launch CPU kernel which has OMP tuning data available.
* When using this for a new kernel op, add declaration and tuning objects to
* operator_tune.cc
* \tparam PRIMITIVE_OP The primitive operation to use for tuning
* \tparam DType Data type
* \tparam Args Varargs type to eventually pass to the OP::Map() functoion
* \param N Number of iterations
* \param dest Destination pointer (used to infer DType)
* \param args Varargs to eventually pass to the OP::Map() functoion
*/
template<typename PRIMITIVE_OP, typename DType, typename ...Args>
static void LaunchTuned(mshadow::Stream<cpu> *, const int N, Args... args) {
#ifdef _OPENMP
const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
if (omp_threads < 2 || !tuned_op<PRIMITIVE_OP, DType>::UseOMP(
static_cast<size_t>(N), static_cast<size_t>(omp_threads))) {
for (int i = 0; i < N; ++i) {
OP::Map(i, args...);
}
} else {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; ++i) {
OP::Map(i, args...);
}
}
#else
for (int i = 0; i < N; ++i) {
OP::Map(i, args...);
}
#endif
}
/*!
* \brief Launch custom-tuned kernel where each thread is set to
* operate on a contiguous partition
* \tparam Args Varargs type to eventually pass to the OP::Map() functoion
* \param N Number of iterations
* \param args Varargs to eventually pass to the UseOMP() and OP::Map() functions
*/
template<typename ...Args>
inline static void LaunchEx(mshadow::Stream<cpu> *s, const int N, Args... args) {
#ifdef _OPENMP
const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
if (omp_threads < 2) {
OP::Map(0, N, args...);
} else {
const int length = (N + omp_threads - 1) / omp_threads;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < N; i += length) {
OP::Map(i, i + length > N ? N - i : length, args...);
}
}
#else
OP::Map(0, N, args...);
#endif
}
/*!
* \brief Launch a tunable OP with implicitly-supplied data type
* \tparam DType Data type
* \tparam T OP type
* \tparam Args Varargs type to eventually pass to the OP::Map() functoion
* \param s Stream (usually null for CPU)
* \param N Number of iterations
* \param args Varargs to eventually pass to the OP::Map() functoion
* \return Always true
*/
template<typename DType, typename T = OP, typename ...Args>
static MSHADOW_CINLINE
typename std::enable_if<std::is_base_of<tunable, T>::value, bool>::type
Launch(mshadow::Stream<cpu> *s, const int N, DType *dest, Args... args) {
LaunchTuned<T, DType>(s, N, dest, args...);
return true;
}
/*!
* \brief Launch a tunable OP wrapper with explicitly-supplied data type (ie op_with_req)
* \tparam DType Data type
* \tparam T Wrapper type
* \tparam Args Varargs type to eventually pass to the OP::Map() functoion
* \param s Stream (usually null for CPU)
* \param N Number of iterations
* \param args Varargs to eventually pass to the OP::Map() functoion
* \return Always true
*/
template<typename DType, typename T = OP, typename ...Args>
static MSHADOW_CINLINE
typename std::enable_if<std::is_base_of<tunable, typename T::Operation>::value, bool>::type
Launch(mshadow::Stream<cpu> *s, const int N, DType *dest, Args... args) {
LaunchTuned<typename T::Operation, DType>(s, N, dest, args...);
return true;
}
};
#ifdef __CUDACC__
template<typename OP, typename ...Args>
__global__ void mxnet_generic_kernel(int N, Args... args) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) {
OP::Map(i, args...);
}
}
template<typename OP, typename ...Args>
__global__ void mxnet_generic_kernel_ex(int N, Args... args) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) {
OP::Map(i, 1, args...);
}
}
template<typename OP>
struct Kernel<OP, gpu> {
/*! \brief Launch GPU kernel */
template<typename ...Args>
inline static void Launch(mshadow::Stream<gpu> *s, int N, Args... args) {
using namespace mshadow::cuda;
int ngrid = std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum);
mxnet_generic_kernel<OP, Args...>
<<<ngrid, kBaseThreadNum, 0, mshadow::Stream<gpu>::GetStream(s)>>>(
N, args...);
MSHADOW_CUDA_POST_KERNEL_CHECK(mxnet_generic_kernel);
}
template<typename ...Args>
inline static void LaunchEx(mshadow::Stream<gpu> *s, const int N, Args... args) {
using namespace mshadow::cuda;
int ngrid = std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum);
mxnet_generic_kernel_ex<OP, Args...>
<<<ngrid, kBaseThreadNum, 0, mshadow::Stream<gpu>::GetStream(s)>>>(
N, args...);
MSHADOW_CUDA_POST_KERNEL_CHECK(mxnet_generic_kernel_ex);
}
};
#endif // __CUDACC__
/*!
* \brief Set to immediate scalar value kernel
* \tparam val Scalar immediate
*/
template<int val>
struct set_to_int : public tunable {
// mxnet_op version (when used directly with Kernel<>::Launch()) */
template<typename DType>
MSHADOW_XINLINE static void Map(int i, DType *out) {
out[i] = DType(val);
}
// mshadow_op version (when used with op_with_req<>)
MSHADOW_XINLINE static int Map() {
return val;
}
};
/*!
* \brief Special-case kernel shortcut for setting to zero and one
*/
using set_zero = set_to_int<0>;
using set_one = set_to_int<1>;
} // namespace mxnet_op
} // namespace op
} // namespace mxnet
#endif // MXNET_OPERATOR_MXNET_OP_H_
|
assignment.h | /* Portions Copyright 2019-2021 Xuesong Zhou and Peiheng Li, Cafer Avci
* If you help write or modify the code, please also list your names here.
* The reason of having Copyright info here is to ensure all the modified version, as a whole, under the GPL
* and further prevent a violation of the GPL.
*
* More about "How to use GNU licenses for your own software"
* http://www.gnu.org/licenses/gpl-howto.html
*/
// Peiheng, 02/03/21, remove them later after adopting better casting
#pragma warning(disable : 4305 4267 4018)
// stop warning: "conversion from 'int' to 'float', possible loss of data"
#pragma warning(disable: 4244)
#ifdef _WIN32
#include "pch.h"
#endif
#include <iostream>
#include <fstream>
#include <sstream>
#include <iomanip>
#include <string>
#include <cstring>
#include <cstdio>
#include <ctime>
#include <cmath>
#include <algorithm>
#include <functional>
#include <stack>
#include <list>
#include <vector>
#include <map>
#include <omp.h>
#include "config.h"
#include "utils.h"
using std::max;
using std::min;
using std::cout;
using std::endl;
using std::string;
using std::vector;
using std::map;
using std::ifstream;
using std::ofstream;
using std::istringstream;
#include "DTA.h"
void g_reset_and_update_link_volume_based_on_columns(int number_of_links, int iteration_index, bool b_self_reducing_path_volume)
{
for (int i = 0; i < number_of_links; ++i)
{
for (int tau = 0; tau < assignment.g_number_of_demand_periods; ++tau)
{
// used in travel time calculation
g_link_vector[i].flow_volume_per_period[tau] = 0;
// reserved for BPR-X
g_link_vector[i].queue_link_distance_in_km_perslot[tau] = 0;
for (int at = 0; at < assignment.g_AgentTypeVector.size(); ++at)
g_link_vector[i].volume_per_period_per_at[tau][at] = 0;
}
}
if (iteration_index >= 0)
{
for (int at = 0; at < assignment.g_AgentTypeVector.size(); ++at) //m
{
//#pragma omp parallel for
std::map<int, CColumnPath>::iterator it;
int zone_size = g_zone_vector.size();
int tau_size = assignment.g_DemandPeriodVector.size();
float link_volume_contributed_by_path_volume;
int link_seq_no;
float PCE_ratio;
int nl;
std::map<int, CColumnPath>::iterator it_begin;
std::map<int, CColumnPath>::iterator it_end;
int column_vector_size;
CColumnVector* p_column_pool;
for (int orig = 0; orig < zone_size; ++orig) // o
{
for (int dest = 0; dest < zone_size; ++dest) //d
{
for (int tau = 0; tau < tau_size; ++tau) //tau
{
p_column_pool = &(assignment.g_column_pool[orig][dest][at][tau]);
if (p_column_pool->od_volume > 0)
{
column_vector_size = p_column_pool->path_node_sequence_map.size();
it_begin = p_column_pool->path_node_sequence_map.begin();
it_end = p_column_pool->path_node_sequence_map.end();
for (it = it_begin; it != it_end; ++it)
{
link_volume_contributed_by_path_volume = it->second.path_volume; // assign all OD flow to this first path
// add path volume to link volume
for (nl = 0; nl < it->second.m_link_size; ++nl) // arc a
{
link_seq_no = it->second.path_link_vector[nl];
// MSA updating for the existing column pools
// if iteration_index = 0; then update no flow discount is used (for the column pool case)
PCE_ratio = g_link_vector[link_seq_no].VDF_period[tau].pce[at]; // updated on 08/16/2021 for link dependent and agent type dependent pce factor mainly for trucks
//#pragma omp critical
{
g_link_vector[link_seq_no].flow_volume_per_period[tau] += link_volume_contributed_by_path_volume * PCE_ratio;
g_link_vector[link_seq_no].volume_per_period_per_at[tau][at] += link_volume_contributed_by_path_volume; // pure volume, not consider PCE
}
}
// this self-deducting action does not agents with fixed routing policies.
if (!p_column_pool->bfixed_route && b_self_reducing_path_volume)
{
//after link volumn "tally", self-deducting the path volume by 1/(k+1) (i.e. keep k/(k+1) ratio of previous flow) so that the following shortes path will be receiving 1/(k+1) flow
it->second.path_volume = it->second.path_volume * (float(iteration_index) / float(iteration_index + 1));
}
}
}
}
}
}
}
}
}
double update_link_travel_time_and_cost(int inner_iteration_number)
{
if (assignment.assignment_mode == 2)
{
//compute the time-dependent delay from simulation
//for (int l = 0; l < g_link_vector.size(); l++)
//{
// float volume = assignment.m_LinkCumulativeDepartureVector[l][assignment.g_number_of_simulation_intervals - 1]; // link flow rates
// float waiting_time_count = 0;
//for (int tt = 0; tt < assignment.g_number_of_simulation_intervals; tt++)
//{
// waiting_time_count += assignment.m_LinkTDWaitingTime[l][tt/number_of_simu_intervals_in_min]; // tally total waiting cou
//}
//for (int tau = 0; tau < assignment.g_DemandPeriodVector.size(); tau++)
//{
// float travel_time = g_link_vector[l].free_flow_travel_time_in_min + waiting_time_count* number_of_seconds_per_interval / max(1, volume) / 60;
// g_link_vector[l].travel_time_per_period[tau] = travel_time;
//}
}
#pragma omp parallel for
for (int i = 0; i < g_link_vector.size(); ++i)
{
// step 1: travel time based on VDF
g_link_vector[i].calculate_dynamic_VDFunction(inner_iteration_number, false, g_link_vector[i].VDF_type_no);
for (int tau = 0; tau < assignment.g_DemandPeriodVector.size(); ++tau)
{
for (int at = 0; at < assignment.g_AgentTypeVector.size(); ++at)
{
float PCE_agent_type = assignment.g_AgentTypeVector[at].PCE;
// step 2: marginal cost for SO
g_link_vector[i].calculate_marginal_cost_for_agent_type(tau, at, PCE_agent_type);
//if (g_debug_level >= 3 && assignment.assignment_mode >= 2 && assignment.g_pFileDebugLog != NULL)
// fprintf(assignment.g_pFileDebugLog, "Update link cost: link %d->%d: tau = %d, at = %d, travel_marginal = %.3f\n",
// g_node_vector[g_link_vector[l].from_node_seq_no].node_id,
// g_node_vector[g_link_vector[l].to_node_seq_no].node_id,
// tau, at,
// g_link_vector[l].travel_marginal_cost_per_period[tau][at]);
}
}
}
double total_network_travel_time = 0;
for (int i = 0; i < g_link_vector.size(); ++i)
{
for (int tau = 0; tau < assignment.g_DemandPeriodVector.size(); ++tau)
{
total_network_travel_time += g_link_vector[i].VDF_period[tau].total_travel_time;
}
}
return total_network_travel_time;
}
// changes here are also for odmes, don't need to implement the changes in this function for now
double g_reset_and_update_link_volume_based_on_ODME_columns(int number_of_links, int iteration_no, double& system_gap)
{
float total_gap = 0;
float sub_total_gap_link_count = 0;
float sub_total_system_gap_count = 0;
system_gap = 0;
float sub_total_gap_P_count = 0;
float sub_total_gap_A_count = 0;
// reset the link volume
for (int i = 0; i < number_of_links; ++i)
{
for (int tau = 0; tau < assignment.g_number_of_demand_periods; ++tau)
{
// used in travel time calculation
g_link_vector[i].flow_volume_per_period[tau] = 0;
}
}
// reset the estimated production and attraction
for (int orig = 0; orig < g_zone_vector.size(); ++orig) // o
{
g_zone_vector[orig].est_attraction = 0;
g_zone_vector[orig].est_production = 0;
}
for (int at = 0; at < assignment.g_AgentTypeVector.size(); ++at) //m
{
//#pragma omp parallel for
std::map<int, CColumnPath>::iterator it;
int zone_size = g_zone_vector.size();
int tau_size = assignment.g_DemandPeriodVector.size();
float link_volume_contributed_by_path_volume;
int link_seq_no;
float PCE_ratio;
int nl;
std::map<int, CColumnPath>::iterator it_begin;
std::map<int, CColumnPath>::iterator it_end;
int column_vector_size;
CColumnVector* p_column_pool;
for (int orig = 0; orig < zone_size; ++orig) // o
{
for (int dest = 0; dest < zone_size; ++dest) //d
{
for (int tau = 0; tau < tau_size; ++tau) //tau
{
p_column_pool = &(assignment.g_column_pool[orig][dest][at][tau]);
if (p_column_pool->od_volume > 0)
{
// continuous: type 0
column_vector_size = p_column_pool->path_node_sequence_map.size();
it_begin = p_column_pool->path_node_sequence_map.begin();
it_end = p_column_pool->path_node_sequence_map.end();
for (it = it_begin; it != it_end; ++it) // path k
{
link_volume_contributed_by_path_volume = it->second.path_volume; // assign all OD flow to this first path
g_zone_vector[orig].est_production += it->second.path_volume;
g_zone_vector[dest].est_attraction += it->second.path_volume;
// add path volume to link volume
for (nl = 0; nl < it->second.m_link_size; ++nl) // arc a
{
link_seq_no = it->second.path_link_vector[nl];
// MSA updating for the existing column pools
// if iteration_index = 0; then update no flow discount is used (for the column pool case)
PCE_ratio = 1;
//#pragma omp critical
{
g_link_vector[link_seq_no].flow_volume_per_period[tau] += link_volume_contributed_by_path_volume * PCE_ratio;
g_link_vector[link_seq_no].volume_per_period_per_at[tau][at] += link_volume_contributed_by_path_volume; // pure volume, not consider PCE
}
}
}
}
}
}
}
}
int total_link_count = 0;
// calcualte deviation for each measurement type
for (int i = 0; i < number_of_links; ++i)
{
g_link_vector[i].calculate_dynamic_VDFunction(iteration_no,false, g_link_vector[i].VDF_type_no);
if (g_link_vector[i].obs_count >= 1) // with data
{
int tau = 0;
g_link_vector[i].est_count_dev = g_link_vector[i].flow_volume_per_period[tau] + g_link_vector[i].VDF_period[tau].preload - g_link_vector[i].obs_count;
if (dtalog.debug_level() == 2)
{
dtalog.output() << "link " << g_node_vector[g_link_vector[i].from_node_seq_no].node_id
<< "->" << g_node_vector[g_link_vector[i].to_node_seq_no].node_id
<< "obs:, " << g_link_vector[i].obs_count << "est:, " << g_link_vector[i].flow_volume_per_period[tau]
<< "dev:," << g_link_vector[i].est_count_dev << endl;
}
if (g_link_vector[i].upper_bound_flag == 0)
{
total_gap += abs(g_link_vector[i].est_count_dev);
sub_total_gap_link_count += fabs(g_link_vector[i].est_count_dev / g_link_vector[i].obs_count);
sub_total_system_gap_count += g_link_vector[i].est_count_dev / g_link_vector[i].obs_count;
}
else
{ // upper bound constraints
if (g_link_vector[i].est_count_dev > 0)
{
total_gap += abs(g_link_vector[i].est_count_dev);
sub_total_gap_link_count += fabs(g_link_vector[i].est_count_dev / g_link_vector[i].obs_count);
sub_total_system_gap_count += g_link_vector[i].est_count_dev / g_link_vector[i].obs_count;
}
}
total_link_count += 1;
}
}
for (int orig = 0; orig < g_zone_vector.size(); ++orig) // o
{
if (g_zone_vector[orig].obs_attraction >= 1) // with observation
{
g_zone_vector[orig].est_attraction_dev = g_zone_vector[orig].est_attraction - g_zone_vector[orig].obs_attraction;
if (dtalog.debug_level() == 2)
{
dtalog.output() << "zone " << g_zone_vector[orig].zone_id << "A: obs:" << g_zone_vector[orig].obs_attraction
<< ",est:," << g_zone_vector[orig].est_attraction << ",dev:," << g_zone_vector[orig].est_attraction_dev << endl;
}
total_gap += abs(g_zone_vector[orig].est_attraction_dev);
sub_total_gap_A_count += g_zone_vector[orig].est_attraction_dev / g_zone_vector[orig].obs_attraction;
}
if (g_zone_vector[orig].obs_production >= 1) // with observation
{
g_zone_vector[orig].est_production_dev = g_zone_vector[orig].est_production - g_zone_vector[orig].obs_production;
if (dtalog.debug_level() == 2)
{
dtalog.output() << "zone " << g_zone_vector[orig].zone_id << "P: obs:" << g_zone_vector[orig].obs_production
<< ",est:," << g_zone_vector[orig].est_production << ",dev:," << g_zone_vector[orig].est_production_dev << endl;
}
total_gap += abs(g_zone_vector[orig].est_production_dev);
sub_total_gap_P_count += g_zone_vector[orig].est_production_dev / g_zone_vector[orig].obs_production;
}
}
dtalog.output() << "ODME #" << iteration_no/*<< " total abs gap= " << total_gap*/
<< " ,%link_MAPE: " << (sub_total_gap_link_count) / max(1, total_link_count) * 100 <<
" ,%system_MPE: " << (sub_total_system_gap_count) / max(1, total_link_count) * 100 << endl;
double gap = sub_total_gap_link_count / max(1, total_link_count);
system_gap = sub_total_system_gap_count / max(1, total_link_count);
return gap;
}
void g_update_gradient_cost_and_assigned_flow_in_column_pool(Assignment& assignment, int inner_iteration_number)
{
double total_system_cost_gap = 0;
float total_relative_gap = 0;
double total_system_travel_cost = 0;
// we can have a recursive formulat to reupdate the current link volume by a factor of k/(k+1),
// and use the newly generated path flow to add the additional 1/(k+1)
g_reset_and_update_link_volume_based_on_columns(g_link_vector.size(), inner_iteration_number, false);
// step 4: based on newly calculated path volumn, update volume based travel time, and update volume based resource balance, update gradie
update_link_travel_time_and_cost(inner_iteration_number);
// step 0
//step 1: calculate shortest path at inner iteration of column flow updating
#pragma omp parallel for
for (int orig = 0; orig < g_zone_vector.size(); ++orig) // o
{
CColumnVector* p_column_pool;
std::map<int, CColumnPath>::iterator it, it_begin, it_end;
int column_vector_size;
float least_gradient_cost = 999999;
int least_gradient_cost_path_seq_no = -1;
int least_gradient_cost_path_node_sum_index = -1;
int path_seq_count = 0;
double path_toll = 0;
double path_gradient_cost = 0;
double path_distance = 0;
double path_travel_time = 0;
int link_seq_no;
double link_travel_time;
double total_switched_out_path_volume = 0;
double step_size = 0;
double previous_path_volume = 0;
for (int dest = 0; dest < g_zone_vector.size(); ++dest) //d
{
for (int at = 0; at < assignment.g_AgentTypeVector.size(); ++at) //m
{
for (int tau = 0; tau < assignment.g_DemandPeriodVector.size(); ++tau) //tau
{
p_column_pool = &(assignment.g_column_pool[orig][dest][at][tau]);
if (p_column_pool->od_volume > 0)
{
column_vector_size = p_column_pool->path_node_sequence_map.size();
// scan through the map with different node sum for different paths
/// step 1: update gradient cost for each column path
least_gradient_cost = 999999;
least_gradient_cost_path_seq_no = -1;
least_gradient_cost_path_node_sum_index = -1;
path_seq_count = 0;
it_begin = p_column_pool->path_node_sequence_map.begin();
it_end = p_column_pool->path_node_sequence_map.end();
for (it = it_begin; it != it_end; ++it)
{
path_toll = 0;
path_gradient_cost = 0;
path_distance = 0;
path_travel_time = 0;
for (int nl = 0; nl < it->second.m_link_size; ++nl) // arc a
{
link_seq_no = it->second.path_link_vector[nl];
path_toll += g_link_vector[link_seq_no].VDF_period[tau].toll[at];
path_distance += g_link_vector[link_seq_no].link_distance_in_km;
link_travel_time = g_link_vector[link_seq_no].travel_time_per_period[tau];
path_travel_time += link_travel_time;
path_gradient_cost += g_link_vector[link_seq_no].get_generalized_first_order_gradient_cost_of_second_order_loss_for_agent_type(tau, at);
}
it->second.path_toll = path_toll;
it->second.path_travel_time = path_travel_time;
it->second.path_gradient_cost = path_gradient_cost;
it->second.path_gradient_cost_per_iteration_map[inner_iteration_number] = path_gradient_cost;
if (column_vector_size == 1) // only one path
{
total_system_travel_cost += (it->second.path_gradient_cost * it->second.path_volume);
break;
}
if (path_gradient_cost < least_gradient_cost)
{
least_gradient_cost = path_gradient_cost;
least_gradient_cost_path_seq_no = it->second.path_seq_no;
least_gradient_cost_path_node_sum_index = it->first;
}
}
if (column_vector_size >= 2)
{
// step 2: calculate gradient cost difference for each column path
total_switched_out_path_volume = 0;
for (it = it_begin; it != it_end; ++it)
{
if (it->second.path_seq_no != least_gradient_cost_path_seq_no) //for non-least cost path
{
it->second.path_gradient_cost_difference = it->second.path_gradient_cost - least_gradient_cost;
it->second.path_gradient_cost_relative_difference = it->second.path_gradient_cost_difference / max(0.0001f, least_gradient_cost);
total_system_cost_gap += (it->second.path_gradient_cost_difference * it->second.path_volume);
total_system_travel_cost += (it->second.path_gradient_cost * it->second.path_volume);
step_size = 1.0 / (inner_iteration_number + 2) * p_column_pool->od_volume;
previous_path_volume = it->second.path_volume;
double flow_shift = step_size * it->second.path_gradient_cost_relative_difference;
if (flow_shift > it->second.path_volume*0.5)
{
flow_shift = it->second.path_volume * 0.5;
}
//recall that it->second.path_gradient_cost_difference >=0
// step 3.1: shift flow from nonshortest path to shortest path
it->second.path_volume = max(0.0, it->second.path_volume - flow_shift);
it->second.path_volume_per_iteration_map[inner_iteration_number] = it->second.path_volume;
//we use min(step_size to ensure a path is not switching more than 1/n proportion of flow
it->second.path_switch_volume = (previous_path_volume - it->second.path_volume);
total_switched_out_path_volume += (previous_path_volume - it->second.path_volume);
}
}
//step 3.2 consider least cost path, receive all volume shifted from non-shortest path
if (least_gradient_cost_path_seq_no != -1)
{
p_column_pool->path_node_sequence_map[least_gradient_cost_path_node_sum_index].path_volume += total_switched_out_path_volume;
p_column_pool->path_node_sequence_map[least_gradient_cost_path_node_sum_index].path_volume_per_iteration_map[inner_iteration_number] = p_column_pool->path_node_sequence_map[least_gradient_cost_path_node_sum_index].path_volume;
total_system_travel_cost += (p_column_pool->path_node_sequence_map[least_gradient_cost_path_node_sum_index].path_gradient_cost *
p_column_pool->path_node_sequence_map[least_gradient_cost_path_node_sum_index].path_volume);
}
}
}
}
}
}
}
dtalog.output() << "column updating: iteration= " << inner_iteration_number << ", total_gap=" << total_system_cost_gap
<< ",total_relative_gap=" << total_system_cost_gap / max(0.00001, total_system_travel_cost) << endl;
}
void g_column_pool_optimization(Assignment& assignment, int column_updating_iterations)
{
// column_updating_iterations is internal numbers of column updating
for (int n = 0; n < column_updating_iterations; ++n)
{
g_update_gradient_cost_and_assigned_flow_in_column_pool(assignment, n);
if (dtalog.debug_level() >= 3)
{
for (int i = 0; i < g_link_vector.size(); ++i)
{
dtalog.output() << "link: " << g_node_vector[g_link_vector[i].from_node_seq_no].node_id << "-->"
<< g_node_vector[g_link_vector[i].to_node_seq_no].node_id << ", "
<< "flow count:" << g_link_vector[i].flow_volume_per_period[0] << endl;
}
}
}
}
void g_column_pool_route_scheduling(Assignment& assignment, int inner_iteration_number)
{
//step 1: calculate shortest path at inner iteration of column flow updating
#pragma omp parallel for
for (int orig = 0; orig < g_zone_vector.size(); ++orig) // o
{
CColumnVector* p_column_pool;
std::map<int, CColumnPath>::iterator it, it_begin, it_end;
int column_vector_size;
int path_seq_count = 0;
double path_toll = 0;
double path_gradient_cost = 0;
double path_distance = 0;
double path_travel_time = 0;
int link_seq_no;
double link_travel_time;
for (int dest = 0; dest < g_zone_vector.size(); ++dest) //d
{
for (int at = 0; at < assignment.g_AgentTypeVector.size(); ++at) //m
{
for (int tau = 0; tau < assignment.g_DemandPeriodVector.size(); ++tau) //tau
{
p_column_pool = &(assignment.g_column_pool[orig][dest][at][tau]);
if (p_column_pool->od_volume > 0)
{
if (assignment.g_AgentTypeVector[at].real_time_information == 1) // case of VMS
{
column_vector_size = p_column_pool->path_node_sequence_map.size();
// scan through the map with different node sum for different paths
path_seq_count = 0;
it_begin = p_column_pool->path_node_sequence_map.begin();
it_end = p_column_pool->path_node_sequence_map.end();
//test condition 1: passing through information zone
bool b_passing_information_zone = false;
int new_orig_zone_id = 0;
std::vector <int> link_seq_vector;
//test condition 2: passing through capacity impact area
bool b_passing_capacity_impact_area = false;
for (it = it_begin; it != it_end; ++it) // scan each first-stage original path
{
if (it->second.path_volume < 0.00001)
continue;
for (int nl = 0; nl < it->second.m_link_size; ++nl) // arc a
{
link_seq_no = it->second.path_link_vector[nl];
CLink* pCurrentLink = &(g_link_vector[link_seq_no]);
if (b_passing_information_zone == false &&
assignment.node_seq_no_2_info_zone_id_mapping.find(pCurrentLink->to_node_seq_no) != assignment.node_seq_no_2_info_zone_id_mapping.end()) // this node been defined as zone
{
int zone_id = assignment.node_seq_no_2_info_zone_id_mapping[pCurrentLink->to_node_seq_no];
int zone_no = assignment.g_zoneid_to_zone_seq_no_mapping[zone_id];
if(assignment.zone_seq_no_2_info_mapping.find(zone_no) != assignment.zone_seq_no_2_info_mapping.end()) // as information zone
{
b_passing_information_zone = true;
new_orig_zone_id = zone_id; // zone id to zone no.
for (int nl2 = 0; nl2 <= nl; ++nl2) // arc a
{ // copy the existing link sequence up to the downstream node id corresponding to the info zone
link_seq_no = it->second.path_link_vector[nl2];
link_seq_vector.push_back(link_seq_no);
}
}
}
if (pCurrentLink->capacity_reduction_map.find(tau) != pCurrentLink->capacity_reduction_map.end())
{
b_passing_capacity_impact_area = true;
}
}
if (b_passing_capacity_impact_area == true && b_passing_information_zone == true)
{
CColumnVector* p_2_stage_column_pool;
int info_orig = assignment.g_zoneid_to_zone_seq_no_mapping[new_orig_zone_id];
//step 2: fetch the related column pool from the information node/zone
p_2_stage_column_pool = &(assignment.g_column_pool[info_orig][dest][at][tau]); // we come from info_orig but going to the same destination with same at, and assignment period tau
// scan through the map with different node sum for different continuous paths
std::map<int, CColumnPath>::iterator it2, it_begin2, it_end2;
it_begin2 = p_2_stage_column_pool->path_node_sequence_map.begin();
it_end2 = p_2_stage_column_pool->path_node_sequence_map.end();
for (it2 = it_begin2; it2 != it_end2; ++it2) // we can still have k-path from the info zone to to final destination so we need to random select one
{
for (int nl = 1; nl < it2->second.m_link_size; ++nl) // arc a // exclude virtual link at the end;
{
link_seq_vector.push_back(it2->second.path_link_vector[nl]);
}
break; // only connect with the first available second stage path
}
if (it->second.path_link_vector != NULL)
{
// copy the updated path (stage1 + stage 2) back to the path link vector
delete it->second.path_link_vector;
it->second.path_link_vector = new int[link_seq_vector.size()];
for (int l = 0; l < link_seq_vector.size(); l++)
{
it->second.path_link_vector[l] = link_seq_vector[l];
}
it->second.m_link_size = link_seq_vector.size();
// copy the updated path (stage1 + stage 2) back to the path node vector
delete it->second.path_node_vector;
it->second.path_node_vector = new int[link_seq_vector.size()+1];
// first node
it->second.path_node_vector[0] = g_link_vector[link_seq_vector[0]].from_node_seq_no;
// remaining nodes to the end of path
for (int l = 0; l < link_seq_vector.size(); l++)
{
it->second.path_node_vector[l+1] = g_link_vector[link_seq_vector[l]].to_node_seq_no;
}
it->second.m_node_size = link_seq_vector.size() + 1;
}
p_2_stage_column_pool->od_volume += it->second.path_volume;// carry over the switching path flow to the second path volume count
p_2_stage_column_pool->information_type = 1;
it2->second.path_volume += it->second.path_volume;// carry over the switching path flow to the second path volume count
} // two conditions satisified
} //end of scanning for the first stage path in the column pool
} // agent type is real time agent type
} // with positve OD volume
} // tau
} //agent type
} //dest
} // orig
dtalog.output() << " updating";
}
void g_column_pool_activity_scheduling(Assignment& assignment, int inner_iteration_number)
{
//step 1: calculate shortest path at inner iteration of column flow updating
for (int orig = 0; orig < g_zone_vector.size(); ++orig) // o
{
CColumnVector* p_column_pool;
int column_vector_size;
int path_seq_count = 0;
double path_toll = 0;
double path_gradient_cost = 0;
double path_distance = 0;
double path_travel_time = 0;
int link_seq_no;
double link_travel_time;
for (int dest = 0; dest < g_zone_vector.size(); ++dest) //d
{
for (int at = 0; at < assignment.g_AgentTypeVector.size(); ++at) //m
{
for (int tau = 0; tau < assignment.g_DemandPeriodVector.size(); ++tau) //tau
{
p_column_pool = &(assignment.g_column_pool[orig][dest][at][tau]);
if (p_column_pool->od_volume > 0)
{
if (p_column_pool->activity_zone_no_vector.size()) // case of activity zones
{
p_column_pool->path_node_sequence_map.clear(); // remove existing single OD pair based routes
int aat = p_column_pool->activity_agent_type_no;
std::vector <int> link_seq_vector;
// for each origin and detination pair in activity zone no to perform routing continuously
for(int az = 0; az < p_column_pool->activity_zone_no_vector.size()-1; az++) // key step: go through each activty OD pair
{ // 0 will the origin
// last one will destination
CColumnVector* p_2_stage_column_pool;
int activity_orig = p_column_pool->activity_zone_no_vector[az];
int activity_dest = p_column_pool->activity_zone_no_vector[az+1];
//step 2: fetch the related column pool from the information node/zone
p_2_stage_column_pool = &(assignment.g_column_pool[activity_orig][activity_dest][aat][tau]); // we come from info_orig but going to the same destination with same at, and assignment period tau
// scan through the map with different node sum for different continuous paths
std::map<int, CColumnPath>::iterator it2, it_begin2, it_end2;
it_begin2 = p_2_stage_column_pool->path_node_sequence_map.begin();
it_end2 = p_2_stage_column_pool->path_node_sequence_map.end();
for (it2 = it_begin2; it2 != it_end2; ++it2) // we can still have k-path from the info zone to to final destination so we need to random select one
{
for (int nl = 1; nl < it2->second.m_link_size-1; ++nl) // arc a // exclude virtual link in the beginning and at the end;
{
link_seq_vector.push_back(it2->second.path_link_vector[nl]);
}
break; // only connect with the first available second stage path
}
}
if (link_seq_vector.size() == 0)
{
int i_debug = 1;
continue;
}
int node_sum = 0;
for (int l = 0; l < link_seq_vector.size(); l++)
{
node_sum+= link_seq_vector[l];
}
// add this unique path // later we can add k activity paths
int path_count = p_column_pool->path_node_sequence_map.size();
p_column_pool->path_node_sequence_map[node_sum].path_seq_no = path_count;
p_column_pool->path_node_sequence_map[node_sum].path_volume = p_column_pool->od_volume;
//assignment.g_column_pool[m_origin_zone_seq_no][destination_zone_seq_no][agent_type][tau].time = m_label_time_array[i];
//assignment.g_column_pool[m_origin_zone_seq_no][destination_zone_seq_no][agent_type][tau].path_node_sequence_map[node_sum].path_distance = m_label_distance_array[i];
p_column_pool->path_node_sequence_map[node_sum].path_toll = 0;
p_column_pool->path_node_sequence_map[node_sum].path_link_vector = new int[link_seq_vector.size()];
p_column_pool->path_node_sequence_map[node_sum].path_node_vector = new int[link_seq_vector.size() + 1];
for (int l = 0; l < link_seq_vector.size(); l++)
{
p_column_pool->path_node_sequence_map[node_sum].path_link_vector[l] = link_seq_vector[l];
p_column_pool->path_node_sequence_map[node_sum].path_link_STL_vector.push_back(link_seq_vector[l]);
}
p_column_pool->path_node_sequence_map[node_sum].m_link_size = link_seq_vector.size();
// copy the updated path (stage1 + stage 2) back to the path node vector
// first node
p_column_pool->path_node_sequence_map[node_sum].path_node_vector[0] = g_link_vector[link_seq_vector[0]].from_node_seq_no;
// remaining nodes to the end of path
for (int l = 0; l < link_seq_vector.size(); l++)
{
p_column_pool->path_node_sequence_map[node_sum].path_node_vector[l + 1] = g_link_vector[link_seq_vector[l]].to_node_seq_no;
}
p_column_pool->path_node_sequence_map[node_sum].m_node_size = link_seq_vector.size() + 1;
} //end of conditions for activity chain
} // with positve OD volume
} // tau
} //agent type
} //dest
} // orig
dtalog.output() << " updating";
}
|
NeighborhoodGraph.h | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#ifndef _SPTAG_COMMON_NG_H_
#define _SPTAG_COMMON_NG_H_
#include "../VectorIndex.h"
#include "CommonUtils.h"
#include "Dataset.h"
#include "FineGrainedLock.h"
#include "QueryResultSet.h"
#include <chrono>
#include <queue>
#include <atomic>
#if defined(GPU)
#include <cuda.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <typeinfo>
#include <cuda_fp16.h>
#include "inc/Core/Common/cuda/KNN.hxx"
#include "inc/Core/Common/cuda/params.h"
#endif
namespace SPTAG
{
namespace COMMON
{
class NeighborhoodGraph
{
public:
NeighborhoodGraph() : m_iTPTNumber(32),
m_iTPTLeafSize(2000),
m_iSamples(1000),
m_numTopDimensionTPTSplit(5),
m_iNeighborhoodSize(32),
m_fNeighborhoodScale(2.0),
m_fCEFScale(2.0),
m_fRNGFactor(1.0),
m_iRefineIter(2),
m_iCEF(1000),
m_iAddCEF(500),
m_iMaxCheckForRefineGraph(10000),
m_iGPUGraphType(2),
m_iGPURefineSteps(0),
m_iGPURefineDepth(2),
m_iGPULeafSize(500),
m_iheadNumGPUs(1),
m_iTPTBalanceFactor(2),
m_rebuild(0)
{}
~NeighborhoodGraph() {}
virtual void InsertNeighbors(VectorIndex* index, const SizeType node, SizeType insertNode, float insertDist) = 0;
virtual void RebuildNeighbors(VectorIndex* index, const SizeType node, SizeType* nodes, const BasicResult* queryResults, const int numResults) = 0;
virtual float GraphAccuracyEstimation(VectorIndex* index, const SizeType samples, const std::unordered_map<SizeType, SizeType>* idmap = nullptr)
{
DimensionType* correct = new DimensionType[samples];
#pragma omp parallel for schedule(dynamic)
for (SizeType i = 0; i < samples; i++)
{
SizeType x = COMMON::Utils::rand(m_iGraphSize);
//int x = i;
COMMON::QueryResultSet<void> query(nullptr, m_iCEF);
for (SizeType y = 0; y < m_iGraphSize; y++)
{
if ((idmap != nullptr && idmap->find(y) != idmap->end())) continue;
float dist = index->ComputeDistance(index->GetSample(x), index->GetSample(y));
query.AddPoint(y, dist);
}
query.SortResult();
SizeType* exact_rng = new SizeType[m_iNeighborhoodSize];
RebuildNeighbors(index, x, exact_rng, query.GetResults(), m_iCEF);
correct[i] = 0;
for (DimensionType j = 0; j < m_iNeighborhoodSize; j++) {
if (exact_rng[j] == -1) {
correct[i] += m_iNeighborhoodSize - j;
break;
}
for (DimensionType k = 0; k < m_iNeighborhoodSize; k++)
if ((m_pNeighborhoodGraph)[x][k] == exact_rng[j]) {
correct[i]++;
break;
}
}
delete[] exact_rng;
}
float acc = 0;
for (SizeType i = 0; i < samples; i++) acc += float(correct[i]);
acc = acc / samples / m_iNeighborhoodSize;
delete[] correct;
return acc;
}
#if defined(GPU)
template <typename T>
void BuildInitKNNGraph(VectorIndex* index, const std::unordered_map<SizeType, SizeType>* idmap)
{
SizeType initSize;
SPTAG::Helper::Convert::ConvertStringTo(index->GetParameter("NumberOfInitialDynamicPivots").c_str(), initSize);
// Build the entire RNG graph, both builds the KNN and refines it to RNG
buildGraph<T>(index, m_iGraphSize, m_iNeighborhoodSize, m_iTPTNumber, (int*)m_pNeighborhoodGraph[0], m_iGPURefineSteps, m_iGPURefineDepth, m_iGPUGraphType, m_iGPULeafSize, initSize, m_iheadNumGPUs, m_iTPTBalanceFactor);
if (idmap != nullptr) {
std::unordered_map<SizeType, SizeType>::const_iterator iter;
for (SizeType i = 0; i < m_iGraphSize; i++) {
for (DimensionType j = 0; j < m_iNeighborhoodSize; j++) {
if ((iter = idmap->find(m_pNeighborhoodGraph[i][j])) != idmap->end())
m_pNeighborhoodGraph[i][j] = iter->second;
}
}
}
}
#else
template <typename T>
void PartitionByTptree(VectorIndex* index, std::vector<SizeType>& indices, const SizeType first, const SizeType last,
std::vector<std::pair<SizeType, SizeType>>& leaves)
{
if (COMMON::DistanceUtils::Quantizer)
{
switch (COMMON::DistanceUtils::Quantizer->GetReconstructType())
{
#define DefineVectorValueType(Name, Type) \
case VectorValueType::Name: \
PartitionByTptreeCore<T, Type>(index, indices, first, last, leaves); \
break;
#include "inc/Core/DefinitionList.h"
#undef DefineVectorValueType
default: break;
}
}
else
{
PartitionByTptreeCore<T, T>(index, indices, first, last, leaves);
}
}
template <typename T, typename R>
void PartitionByTptreeCore(VectorIndex* index, std::vector<SizeType>& indices, const SizeType first, const SizeType last,
std::vector<std::pair<SizeType, SizeType>>& leaves)
{
if (last - first <= m_iTPTLeafSize)
{
leaves.emplace_back(first, last);
}
else
{
SizeType cols = index->GetFeatureDim();
bool quantizer_exists = (bool)COMMON::DistanceUtils::Quantizer;
R* v_holder = nullptr;
if (quantizer_exists) {
cols = COMMON::DistanceUtils::Quantizer->ReconstructDim();
v_holder = (R*)_mm_malloc(COMMON::DistanceUtils::Quantizer->ReconstructSize(), ALIGN_SPTAG);
}
std::vector<float> Mean(cols, 0);
int iIteration = 100;
SizeType end = min(first + m_iSamples, last);
SizeType count = end - first + 1;
// calculate the mean of each dimension
for (SizeType j = first; j <= end; j++)
{
R* v;
if (quantizer_exists)
{
COMMON::DistanceUtils::Quantizer->ReconstructVector((uint8_t*)index->GetSample(indices[j]), v_holder);
v = v_holder;
}
else
{
v = (R*)index->GetSample(indices[j]);
}
for (DimensionType k = 0; k < cols; k++)
{
Mean[k] += v[k];
}
}
for (DimensionType k = 0; k < cols; k++)
{
Mean[k] /= count;
}
std::vector<BasicResult> Variance;
Variance.reserve(cols);
for (DimensionType j = 0; j < cols; j++)
{
Variance.emplace_back(j, 0.0f);
}
// calculate the variance of each dimension
for (SizeType j = first; j <= end; j++)
{
R* v;
if (quantizer_exists)
{
COMMON::DistanceUtils::Quantizer->ReconstructVector((uint8_t*)index->GetSample(indices[j]), v_holder);
v = v_holder;
}
else
{
v = (R*)index->GetSample(indices[j]);
}
for (DimensionType k = 0; k < cols; k++)
{
float dist = v[k] - Mean[k];
Variance[k].Dist += dist * dist;
}
}
std::sort(Variance.begin(), Variance.end(), COMMON::Compare);
std::vector<SizeType> indexs(m_numTopDimensionTPTSplit);
std::vector<float> weight(m_numTopDimensionTPTSplit), bestweight(m_numTopDimensionTPTSplit);
float bestvariance = Variance[cols - 1].Dist;
for (int i = 0; i < m_numTopDimensionTPTSplit; i++)
{
indexs[i] = Variance[cols - 1 - i].VID;
bestweight[i] = 0;
}
bestweight[0] = 1;
float bestmean = Mean[indexs[0]];
std::vector<float> Val(count);
for (int i = 0; i < iIteration; i++)
{
float sumweight = 0;
for (int j = 0; j < m_numTopDimensionTPTSplit; j++)
{
weight[j] = float(rand() % 10000) / 5000.0f - 1.0f;
sumweight += weight[j] * weight[j];
}
sumweight = sqrt(sumweight);
for (int j = 0; j < m_numTopDimensionTPTSplit; j++)
{
weight[j] /= sumweight;
}
float mean = 0;
for (SizeType j = 0; j < count; j++)
{
Val[j] = 0;
R* v;
if (quantizer_exists)
{
COMMON::DistanceUtils::Quantizer->ReconstructVector((uint8_t*)index->GetSample(indices[first + j]), v_holder);
v = v_holder;
}
else
{
v = (R*)index->GetSample(indices[first + j]);
}
for (int k = 0; k < m_numTopDimensionTPTSplit; k++)
{
Val[j] += weight[k] * v[indexs[k]];
}
mean += Val[j];
}
mean /= count;
float var = 0;
for (SizeType j = 0; j < count; j++)
{
float dist = Val[j] - mean;
var += dist * dist;
}
if (var > bestvariance)
{
bestvariance = var;
bestmean = mean;
for (int j = 0; j < m_numTopDimensionTPTSplit; j++)
{
bestweight[j] = weight[j];
}
}
}
SizeType i = first;
SizeType j = last;
// decide which child one point belongs
while (i <= j)
{
float val = 0;
R* v;
if (quantizer_exists)
{
COMMON::DistanceUtils::Quantizer->ReconstructVector((uint8_t*)index->GetSample(indices[i]), v_holder);
v = v_holder;
}
else
{
v = (R*)index->GetSample(indices[i]);
}
for (int k = 0; k < m_numTopDimensionTPTSplit; k++)
{
val += bestweight[k] * v[indexs[k]];
}
if (val < bestmean)
{
i++;
}
else
{
std::swap(indices[i], indices[j]);
j--;
}
}
// if all the points in the node are equal,equally split the node into 2
if ((i == first) || (i == last + 1))
{
i = (first + last + 1) / 2;
}
Mean.clear();
Variance.clear();
Val.clear();
indexs.clear();
weight.clear();
bestweight.clear();
if (v_holder) _mm_free(v_holder);
PartitionByTptreeCore<T, R>(index, indices, first, i - 1, leaves);
PartitionByTptreeCore<T, R>(index, indices, i, last, leaves);
}
}
template <typename T>
void BuildInitKNNGraph(VectorIndex* index, const std::unordered_map<SizeType, SizeType>* idmap)
{
COMMON::Dataset<float> NeighborhoodDists(m_iGraphSize, m_iNeighborhoodSize, index->m_iDataBlockSize, index->m_iDataCapacity);
std::vector<std::vector<SizeType>> TptreeDataIndices(m_iTPTNumber, std::vector<SizeType>(m_iGraphSize));
std::vector<std::vector<std::pair<SizeType, SizeType>>> TptreeLeafNodes(m_iTPTNumber, std::vector<std::pair<SizeType, SizeType>>());
for (SizeType i = 0; i < m_iGraphSize; i++)
for (DimensionType j = 0; j < m_iNeighborhoodSize; j++)
(NeighborhoodDists)[i][j] = MaxDist;
auto t1 = std::chrono::high_resolution_clock::now();
LOG(Helper::LogLevel::LL_Info, "Parallel TpTree Partition begin\n");
#pragma omp parallel for schedule(dynamic)
for (int i = 0; i < m_iTPTNumber; i++)
{
Sleep(i * 100); std::srand(clock());
for (SizeType j = 0; j < m_iGraphSize; j++) TptreeDataIndices[i][j] = j;
std::random_shuffle(TptreeDataIndices[i].begin(), TptreeDataIndices[i].end());
PartitionByTptree<T>(index, TptreeDataIndices[i], 0, m_iGraphSize - 1, TptreeLeafNodes[i]);
LOG(Helper::LogLevel::LL_Info, "Finish Getting Leaves for Tree %d\n", i);
}
LOG(Helper::LogLevel::LL_Info, "Parallel TpTree Partition done\n");
auto t2 = std::chrono::high_resolution_clock::now();
LOG(Helper::LogLevel::LL_Info, "Build TPTree time (s): %lld\n", std::chrono::duration_cast<std::chrono::seconds>(t2 - t1).count());
for (int i = 0; i < m_iTPTNumber; i++)
{
#pragma omp parallel for schedule(dynamic)
for (SizeType j = 0; j < (SizeType)TptreeLeafNodes[i].size(); j++)
{
SizeType start_index = TptreeLeafNodes[i][j].first;
SizeType end_index = TptreeLeafNodes[i][j].second;
if ((j * 5) % TptreeLeafNodes[i].size() == 0) LOG(Helper::LogLevel::LL_Info, "Processing Tree %d %d%%\n", i, static_cast<int>(j * 1.0 / TptreeLeafNodes[i].size() * 100));
for (SizeType x = start_index; x < end_index; x++)
{
for (SizeType y = x + 1; y <= end_index; y++)
{
SizeType p1 = TptreeDataIndices[i][x];
SizeType p2 = TptreeDataIndices[i][y];
float dist = index->ComputeDistance(index->GetSample(p1), index->GetSample(p2));
if (idmap != nullptr) {
p1 = (idmap->find(p1) == idmap->end()) ? p1 : idmap->at(p1);
p2 = (idmap->find(p2) == idmap->end()) ? p2 : idmap->at(p2);
}
COMMON::Utils::AddNeighbor(p2, dist, (m_pNeighborhoodGraph)[p1], (NeighborhoodDists)[p1], m_iNeighborhoodSize);
COMMON::Utils::AddNeighbor(p1, dist, (m_pNeighborhoodGraph)[p2], (NeighborhoodDists)[p2], m_iNeighborhoodSize);
}
}
}
TptreeDataIndices[i].clear();
TptreeLeafNodes[i].clear();
}
TptreeDataIndices.clear();
TptreeLeafNodes.clear();
auto t3 = std::chrono::high_resolution_clock::now();
LOG(Helper::LogLevel::LL_Info, "Process TPTree time (s): %lld\n", std::chrono::duration_cast<std::chrono::seconds>(t3 - t2).count());
}
#endif
template <typename T>
void BuildGraph(VectorIndex* index, const std::unordered_map<SizeType, SizeType>* idmap = nullptr)
{
LOG(Helper::LogLevel::LL_Info, "build RNG graph!\n");
m_iGraphSize = index->GetNumSamples();
m_iNeighborhoodSize = (DimensionType)(ceil(m_iNeighborhoodSize * m_fNeighborhoodScale) * (m_rebuild + 1));
m_pNeighborhoodGraph.Initialize(m_iGraphSize, m_iNeighborhoodSize, index->m_iDataBlockSize, index->m_iDataCapacity);
if (m_iGraphSize < 1000) {
RefineGraph<T>(index, idmap);
LOG(Helper::LogLevel::LL_Info, "Build RNG Graph end!\n");
return;
}
auto t1 = std::chrono::high_resolution_clock::now();
BuildInitKNNGraph<T>(index, idmap);
auto t2 = std::chrono::high_resolution_clock::now();
LOG(Helper::LogLevel::LL_Info, "BuildInitKNNGraph time (s): %lld\n", std::chrono::duration_cast<std::chrono::seconds>(t2 - t1).count());
RefineGraph<T>(index, idmap);
auto t3 = std::chrono::high_resolution_clock::now();
LOG(Helper::LogLevel::LL_Info, "BuildGraph time (s): %lld\n", std::chrono::duration_cast<std::chrono::seconds>(t3 - t1).count());
if (m_rebuild) {
m_iNeighborhoodSize = m_iNeighborhoodSize / 2;
RebuildGraph<T>(index, idmap);
auto t4 = std::chrono::high_resolution_clock::now();
LOG(Helper::LogLevel::LL_Info, "ReBuildGraph time (s): %lld\n", std::chrono::duration_cast<std::chrono::seconds>(t4 - t3).count());
}
if (idmap != nullptr) {
for (auto iter = idmap->begin(); iter != idmap->end(); iter++)
if (iter->first < 0)
{
m_pNeighborhoodGraph[-1 - iter->first][m_iNeighborhoodSize - 1] = -2 - iter->second;
}
}
}
template <typename T>
void RebuildGraph(VectorIndex* index, const std::unordered_map<SizeType, SizeType>* idmap = nullptr)
{
std::vector<int> indegree(m_iGraphSize);
#pragma omp parallel for schedule(dynamic)
for (SizeType i = 0; i < m_iGraphSize; i++) indegree[i] = 0;
auto t0 = std::chrono::high_resolution_clock::now();
#pragma omp parallel for schedule(dynamic)
for (SizeType i = 0; i < m_iGraphSize; i++)
{
SizeType* outnodes = m_pNeighborhoodGraph[i];
for (SizeType j = 0; j < m_iNeighborhoodSize; j++)
{
int node = outnodes[j];
if (node >= 0) {
indegree[node]++;
}
}
}
auto t1 = std::chrono::high_resolution_clock::now();
LOG(Helper::LogLevel::LL_Info, "Calculate Indegree time (s): %lld\n", std::chrono::duration_cast<std::chrono::seconds>(t1 - t0).count());
int rebuild_threshold = m_iNeighborhoodSize / 2;
int rebuildstart = m_iNeighborhoodSize / 2;
#pragma omp parallel for schedule(dynamic)
for (SizeType i = 0; i < m_iGraphSize; i++)
{
SizeType* outnodes = m_pNeighborhoodGraph[i];
std::vector<bool> reserve(2 * m_iNeighborhoodSize, false);
int total = 0;
for (SizeType j = rebuildstart; j < m_iNeighborhoodSize * 2; j++)
if ( outnodes[j] >= 0 && indegree[outnodes[j]] < rebuild_threshold) {
reserve[j] = true;
total++;
}
for (SizeType j = rebuildstart; j < m_iNeighborhoodSize * 2 && total < m_iNeighborhoodSize - rebuildstart; j++) {
if (!reserve[j]) {
reserve[j] = true;
total++;
}
}
for (SizeType j = rebuildstart, z = rebuildstart; j < m_iNeighborhoodSize; j++) {
while (!reserve[z]) z++;
if(outnodes[j] >= 0) indegree[outnodes[j]] = indegree[outnodes[j]] - 1;
if(outnodes[z] >= 0) indegree[outnodes[z]] = indegree[outnodes[z]] + 1;
outnodes[j] = outnodes[z];
z++;
}
if ((i * 5) % m_iGraphSize == 0) LOG(Helper::LogLevel::LL_Info, "Rebuild %d%%\n", static_cast<int>(i * 1.0 / m_iGraphSize * 100));
}
auto t2 = std::chrono::high_resolution_clock::now();
LOG(Helper::LogLevel::LL_Info, "Rebuild RNG time (s): %lld Graph Acc: %f\n", std::chrono::duration_cast<std::chrono::seconds>(t2 - t1).count(), GraphAccuracyEstimation(index, 100, idmap));
}
template <typename T>
void RefineGraph(VectorIndex* index, const std::unordered_map<SizeType, SizeType>* idmap = nullptr)
{
for (int iter = 0; iter < m_iRefineIter - 1; iter++)
{
auto t1 = std::chrono::high_resolution_clock::now();
#pragma omp parallel for schedule(dynamic)
for (SizeType i = 0; i < m_iGraphSize; i++)
{
RefineNode<T>(index, i, false, false, (int)(m_iCEF * m_fCEFScale));
if ((i * 5) % m_iGraphSize == 0) LOG(Helper::LogLevel::LL_Info, "Refine %d %d%%\n", iter, static_cast<int>(i * 1.0 / m_iGraphSize * 100));
}
auto t2 = std::chrono::high_resolution_clock::now();
LOG(Helper::LogLevel::LL_Info, "Refine RNG time (s): %lld Graph Acc: %f\n", std::chrono::duration_cast<std::chrono::seconds>(t2 - t1).count(), GraphAccuracyEstimation(index, 100, idmap));
}
m_iNeighborhoodSize = (DimensionType)(m_iNeighborhoodSize / m_fNeighborhoodScale);
if (m_iRefineIter > 0) {
auto t1 = std::chrono::high_resolution_clock::now();
#pragma omp parallel for schedule(dynamic)
for (SizeType i = 0; i < m_iGraphSize; i++)
{
RefineNode<T>(index, i, false, false, m_iCEF);
if ((i * 5) % m_iGraphSize == 0) LOG(Helper::LogLevel::LL_Info, "Refine %d %d%%\n", m_iRefineIter - 1, static_cast<int>(i * 1.0 / m_iGraphSize * 100));
}
auto t2 = std::chrono::high_resolution_clock::now();
LOG(Helper::LogLevel::LL_Info, "Refine RNG time (s): %lld Graph Acc: %f\n", std::chrono::duration_cast<std::chrono::seconds>(t2 - t1).count(), GraphAccuracyEstimation(index, 100, idmap));
}
else {
LOG(Helper::LogLevel::LL_Info, "Graph Acc: %f\n", GraphAccuracyEstimation(index, 100, idmap));
}
}
template <typename T>
ErrorCode RefineGraph(VectorIndex* index, std::vector<SizeType>& indices, std::vector<SizeType>& reverseIndices,
std::shared_ptr<Helper::DiskPriorityIO> output, NeighborhoodGraph* newGraph, const std::unordered_map<SizeType, SizeType>* idmap = nullptr)
{
std::shared_ptr<NeighborhoodGraph> tmp;
if (newGraph == nullptr) {
tmp = NeighborhoodGraph::CreateInstance(Type());
newGraph = tmp.get();
}
SizeType R = (SizeType)indices.size();
newGraph->m_pNeighborhoodGraph.Initialize(R, m_iNeighborhoodSize, index->m_iDataBlockSize, index->m_iDataCapacity);
newGraph->m_iGraphSize = R;
newGraph->m_iNeighborhoodSize = m_iNeighborhoodSize;
#pragma omp parallel for schedule(dynamic)
for (SizeType i = 0; i < R; i++)
{
if ((i * 5) % R == 0) LOG(Helper::LogLevel::LL_Info, "Refine %d%%\n", static_cast<int>(i * 1.0 / R * 100));
SizeType* outnodes = newGraph->m_pNeighborhoodGraph[i];
COMMON::QueryResultSet<T> query((const T*)index->GetSample(indices[i]), m_iCEF + 1);
index->RefineSearchIndex(query, false);
RebuildNeighbors(index, indices[i], outnodes, query.GetResults(), m_iCEF + 1);
std::unordered_map<SizeType, SizeType>::const_iterator iter;
for (DimensionType j = 0; j < m_iNeighborhoodSize; j++)
{
if (outnodes[j] >= 0 && outnodes[j] < reverseIndices.size()) outnodes[j] = reverseIndices[outnodes[j]];
if (idmap != nullptr && (iter = idmap->find(outnodes[j])) != idmap->end()) outnodes[j] = iter->second;
}
if (idmap != nullptr && (iter = idmap->find(-1 - i)) != idmap->end())
outnodes[m_iNeighborhoodSize - 1] = -2 - iter->second;
}
if (output != nullptr) newGraph->SaveGraph(output);
return ErrorCode::Success;
}
template <typename T>
void RefineNode(VectorIndex* index, const SizeType node, bool updateNeighbors, bool searchDeleted, int CEF)
{
COMMON::QueryResultSet<T> query((const T*)index->GetSample(node), CEF + 1);
void* rec_query = nullptr;
if (COMMON::DistanceUtils::Quantizer) {
rec_query = _mm_malloc(COMMON::DistanceUtils::Quantizer->ReconstructSize(), ALIGN_SPTAG);
COMMON::DistanceUtils::Quantizer->ReconstructVector((const uint8_t*)query.GetTarget(), rec_query);
query.SetTarget((T*)rec_query);
}
index->RefineSearchIndex(query, searchDeleted);
RebuildNeighbors(index, node, m_pNeighborhoodGraph[node], query.GetResults(), CEF + 1);
if (rec_query)
{
_mm_free(rec_query);
}
if (updateNeighbors) {
// update neighbors
for (int j = 0; j <= CEF; j++)
{
BasicResult* item = query.GetResult(j);
if (item->VID < 0) break;
if (item->VID == node) continue;
InsertNeighbors(index, item->VID, node, item->Dist);
}
}
}
inline std::uint64_t BufferSize() const
{
return m_pNeighborhoodGraph.BufferSize();
}
ErrorCode LoadGraph(std::shared_ptr<Helper::DiskPriorityIO> input, SizeType blockSize, SizeType capacity)
{
ErrorCode ret = ErrorCode::Success;
if ((ret = m_pNeighborhoodGraph.Load(input, blockSize, capacity)) != ErrorCode::Success) return ret;
m_iGraphSize = m_pNeighborhoodGraph.R();
m_iNeighborhoodSize = m_pNeighborhoodGraph.C();
return ret;
}
ErrorCode LoadGraph(std::string sGraphFilename, SizeType blockSize, SizeType capacity)
{
ErrorCode ret = ErrorCode::Success;
if ((ret = m_pNeighborhoodGraph.Load(sGraphFilename, blockSize, capacity)) != ErrorCode::Success) return ret;
m_iGraphSize = m_pNeighborhoodGraph.R();
m_iNeighborhoodSize = m_pNeighborhoodGraph.C();
return ret;
}
ErrorCode LoadGraph(char* pGraphMemFile, SizeType blockSize, SizeType capacity)
{
ErrorCode ret = ErrorCode::Success;
if ((ret = m_pNeighborhoodGraph.Load(pGraphMemFile, blockSize, capacity)) != ErrorCode::Success) return ret;
m_iGraphSize = m_pNeighborhoodGraph.R();
m_iNeighborhoodSize = m_pNeighborhoodGraph.C();
return ErrorCode::Success;
}
ErrorCode SaveGraph(std::string sGraphFilename) const
{
LOG(Helper::LogLevel::LL_Info, "Save %s To %s\n", m_pNeighborhoodGraph.Name().c_str(), sGraphFilename.c_str());
auto ptr = f_createIO();
if (ptr == nullptr || !ptr->Initialize(sGraphFilename.c_str(), std::ios::binary | std::ios::out)) return ErrorCode::FailedCreateFile;
return SaveGraph(ptr);
}
ErrorCode SaveGraph(std::shared_ptr<Helper::DiskPriorityIO> output) const
{
IOBINARY(output, WriteBinary, sizeof(SizeType), (char*)&m_iGraphSize);
IOBINARY(output, WriteBinary, sizeof(DimensionType), (char*)&m_iNeighborhoodSize);
for (int i = 0; i < m_iGraphSize; i++)
IOBINARY(output, WriteBinary, sizeof(SizeType) * m_iNeighborhoodSize, (char*)m_pNeighborhoodGraph[i]);
LOG(Helper::LogLevel::LL_Info, "Save %s (%d,%d) Finish!\n", m_pNeighborhoodGraph.Name().c_str(), m_iGraphSize, m_iNeighborhoodSize);
return ErrorCode::Success;
}
inline ErrorCode AddBatch(SizeType num)
{
ErrorCode ret = m_pNeighborhoodGraph.AddBatch(num);
if (ret != ErrorCode::Success) return ret;
m_iGraphSize += num;
return ErrorCode::Success;
}
inline SizeType* operator[](SizeType index) { return m_pNeighborhoodGraph[index]; }
inline const SizeType* operator[](SizeType index) const { return m_pNeighborhoodGraph[index]; }
void Update(SizeType row, DimensionType col, SizeType val) {
std::lock_guard<std::mutex> lock(m_dataUpdateLock[row]);
m_pNeighborhoodGraph[row][col] = val;
}
inline void SetR(SizeType rows) {
m_pNeighborhoodGraph.SetR(rows);
m_iGraphSize = rows;
}
inline SizeType R() const { return m_iGraphSize; }
inline std::string Type() const { return m_pNeighborhoodGraph.Name(); }
static std::shared_ptr<NeighborhoodGraph> CreateInstance(std::string type);
protected:
// Graph structure
SizeType m_iGraphSize;
COMMON::Dataset<SizeType> m_pNeighborhoodGraph;
FineGrainedLock m_dataUpdateLock;
public:
int m_iTPTNumber, m_iTPTLeafSize, m_iSamples, m_numTopDimensionTPTSplit;
DimensionType m_iNeighborhoodSize;
float m_fNeighborhoodScale, m_fCEFScale, m_fRNGFactor;
int m_iRefineIter, m_iCEF, m_iAddCEF, m_iMaxCheckForRefineGraph, m_iGPUGraphType, m_iGPURefineSteps, m_iGPURefineDepth, m_iGPULeafSize, m_iheadNumGPUs, m_iTPTBalanceFactor, m_rebuild;
};
}
}
#endif
|
StmtOpenMP.h | //===- StmtOpenMP.h - Classes for OpenMP directives ------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
/// This file defines OpenMP AST classes for executable directives and
/// clauses.
///
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_AST_STMTOPENMP_H
#define LLVM_CLANG_AST_STMTOPENMP_H
#include "clang/AST/ASTContext.h"
#include "clang/AST/Expr.h"
#include "clang/AST/OpenMPClause.h"
#include "clang/AST/Stmt.h"
#include "clang/AST/StmtCXX.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/SourceLocation.h"
namespace clang {
//===----------------------------------------------------------------------===//
// AST classes for directives.
//===----------------------------------------------------------------------===//
/// Representation of an OpenMP canonical loop.
///
/// OpenMP 1.0 C/C++, section 2.4.1 for Construct; canonical-shape
/// OpenMP 2.0 C/C++, section 2.4.1 for Construct; canonical-shape
/// OpenMP 2.5, section 2.5.1 Loop Construct; canonical form
/// OpenMP 3.1, section 2.5.1 Loop Construct; canonical form
/// OpenMP 4.0, section 2.6 Canonical Loop Form
/// OpenMP 4.5, section 2.6 Canonical Loop Form
/// OpenMP 5.0, section 2.9.1 Canonical Loop Form
/// OpenMP 5.1, section 2.11.1 Canonical Loop Nest Form
///
/// An OpenMP canonical loop is a for-statement or range-based for-statement
/// with additional requirements that ensure that the number of iterations is
/// known before entering the loop and allow skipping to an arbitrary iteration.
/// The OMPCanonicalLoop AST node wraps a ForStmt or CXXForRangeStmt that is
/// known to fulfill OpenMP's canonical loop requirements because of being
/// associated to an OMPLoopBasedDirective. That is, the general structure is:
///
/// OMPLoopBasedDirective
/// [`- CapturedStmt ]
/// [ `- CapturedDecl]
/// ` OMPCanonicalLoop
/// `- ForStmt/CXXForRangeStmt
/// `- Stmt
///
/// One or multiple CapturedStmt/CapturedDecl pairs may be inserted by some
/// directives such as OMPParallelForDirective, but others do not need them
/// (such as OMPTileDirective). In The OMPCanonicalLoop and
/// ForStmt/CXXForRangeStmt pair is repeated for loop associated with the
/// directive. A OMPCanonicalLoop must not appear in the AST unless associated
/// with a OMPLoopBasedDirective. In an imperfectly nested loop nest, the
/// OMPCanonicalLoop may also be wrapped in a CompoundStmt:
///
/// [...]
/// ` OMPCanonicalLoop
/// `- ForStmt/CXXForRangeStmt
/// `- CompoundStmt
/// |- Leading in-between code (if any)
/// |- OMPCanonicalLoop
/// | `- ForStmt/CXXForRangeStmt
/// | `- ...
/// `- Trailing in-between code (if any)
///
/// The leading/trailing in-between code must not itself be a OMPCanonicalLoop
/// to avoid confusion which loop belongs to the nesting.
///
/// There are three different kinds of iteration variables for different
/// purposes:
/// * Loop user variable: The user-accessible variable with different value for
/// each iteration.
/// * Loop iteration variable: The variable used to identify a loop iteration;
/// for range-based for-statement, this is the hidden iterator '__begin'. For
/// other loops, it is identical to the loop user variable. Must be a
/// random-access iterator, pointer or integer type.
/// * Logical iteration counter: Normalized loop counter starting at 0 and
/// incrementing by one at each iteration. Allows abstracting over the type
/// of the loop iteration variable and is always an unsigned integer type
/// appropriate to represent the range of the loop iteration variable. Its
/// value corresponds to the logical iteration number in the OpenMP
/// specification.
///
/// This AST node provides two captured statements:
/// * The distance function which computes the number of iterations.
/// * The loop user variable function that computes the loop user variable when
/// given a logical iteration number.
///
/// These captured statements provide the link between C/C++ semantics and the
/// logical iteration counters used by the OpenMPIRBuilder which is
/// language-agnostic and therefore does not know e.g. how to advance a
/// random-access iterator. The OpenMPIRBuilder will use this information to
/// apply simd, workshare-loop, distribute, taskloop and loop directives to the
/// loop. For compatibility with the non-OpenMPIRBuilder codegen path, an
/// OMPCanonicalLoop can itself also be wrapped into the CapturedStmts of an
/// OMPLoopDirective and skipped when searching for the associated syntactical
/// loop.
///
/// Example:
/// <code>
/// std::vector<std::string> Container{1,2,3};
/// for (std::string Str : Container)
/// Body(Str);
/// </code>
/// which is syntactic sugar for approximately:
/// <code>
/// auto &&__range = Container;
/// auto __begin = std::begin(__range);
/// auto __end = std::end(__range);
/// for (; __begin != __end; ++__begin) {
/// std::String Str = *__begin;
/// Body(Str);
/// }
/// </code>
/// In this example, the loop user variable is `Str`, the loop iteration
/// variable is `__begin` of type `std::vector<std::string>::iterator` and the
/// logical iteration number type is `size_t` (unsigned version of
/// `std::vector<std::string>::iterator::difference_type` aka `ptrdiff_t`).
/// Therefore, the distance function will be
/// <code>
/// [&](size_t &Result) { Result = __end - __begin; }
/// </code>
/// and the loop variable function is
/// <code>
/// [&,__begin](std::vector<std::string>::iterator &Result, size_t Logical) {
/// Result = __begin + Logical;
/// }
/// </code>
/// The variable `__begin`, aka the loop iteration variable, is captured by
/// value because it is modified in the loop body, but both functions require
/// the initial value. The OpenMP specification explicitly leaves unspecified
/// when the loop expressions are evaluated such that a capture by reference is
/// sufficient.
class OMPCanonicalLoop : public Stmt {
friend class ASTStmtReader;
friend class ASTStmtWriter;
/// Children of this AST node.
enum {
LOOP_STMT,
DISTANCE_FUNC,
LOOPVAR_FUNC,
LOOPVAR_REF,
LastSubStmt = LOOPVAR_REF
};
private:
/// This AST node's children.
Stmt *SubStmts[LastSubStmt + 1] = {};
OMPCanonicalLoop() : Stmt(StmtClass::OMPCanonicalLoopClass) {}
public:
/// Create a new OMPCanonicalLoop.
static OMPCanonicalLoop *create(const ASTContext &Ctx, Stmt *LoopStmt,
CapturedStmt *DistanceFunc,
CapturedStmt *LoopVarFunc,
DeclRefExpr *LoopVarRef) {
OMPCanonicalLoop *S = new (Ctx) OMPCanonicalLoop();
S->setLoopStmt(LoopStmt);
S->setDistanceFunc(DistanceFunc);
S->setLoopVarFunc(LoopVarFunc);
S->setLoopVarRef(LoopVarRef);
return S;
}
/// Create an empty OMPCanonicalLoop for deserialization.
static OMPCanonicalLoop *createEmpty(const ASTContext &Ctx) {
return new (Ctx) OMPCanonicalLoop();
}
static bool classof(const Stmt *S) {
return S->getStmtClass() == StmtClass::OMPCanonicalLoopClass;
}
SourceLocation getBeginLoc() const { return getLoopStmt()->getBeginLoc(); }
SourceLocation getEndLoc() const { return getLoopStmt()->getEndLoc(); }
/// Return this AST node's children.
/// @{
child_range children() {
return child_range(&SubStmts[0], &SubStmts[0] + LastSubStmt + 1);
}
const_child_range children() const {
return const_child_range(&SubStmts[0], &SubStmts[0] + LastSubStmt + 1);
}
/// @}
/// The wrapped syntactic loop statement (ForStmt or CXXForRangeStmt).
/// @{
Stmt *getLoopStmt() { return SubStmts[LOOP_STMT]; }
const Stmt *getLoopStmt() const { return SubStmts[LOOP_STMT]; }
void setLoopStmt(Stmt *S) {
assert((isa<ForStmt>(S) || isa<CXXForRangeStmt>(S)) &&
"Canonical loop must be a for loop (range-based or otherwise)");
SubStmts[LOOP_STMT] = S;
}
/// @}
/// The function that computes the number of loop iterations. Can be evaluated
/// before entering the loop but after the syntactical loop's init
/// statement(s).
///
/// Function signature: void(LogicalTy &Result)
/// Any values necessary to compute the distance are captures of the closure.
/// @{
CapturedStmt *getDistanceFunc() {
return cast<CapturedStmt>(SubStmts[DISTANCE_FUNC]);
}
const CapturedStmt *getDistanceFunc() const {
return cast<CapturedStmt>(SubStmts[DISTANCE_FUNC]);
}
void setDistanceFunc(CapturedStmt *S) {
assert(S && "Expected non-null captured statement");
SubStmts[DISTANCE_FUNC] = S;
}
/// @}
/// The function that computes the loop user variable from a logical iteration
/// counter. Can be evaluated as first statement in the loop.
///
/// Function signature: void(LoopVarTy &Result, LogicalTy Number)
/// Any other values required to compute the loop user variable (such as start
/// value, step size) are captured by the closure. In particular, the initial
/// value of loop iteration variable is captured by value to be unaffected by
/// previous iterations.
/// @{
CapturedStmt *getLoopVarFunc() {
return cast<CapturedStmt>(SubStmts[LOOPVAR_FUNC]);
}
const CapturedStmt *getLoopVarFunc() const {
return cast<CapturedStmt>(SubStmts[LOOPVAR_FUNC]);
}
void setLoopVarFunc(CapturedStmt *S) {
assert(S && "Expected non-null captured statement");
SubStmts[LOOPVAR_FUNC] = S;
}
/// @}
/// Reference to the loop user variable as accessed in the loop body.
/// @{
DeclRefExpr *getLoopVarRef() {
return cast<DeclRefExpr>(SubStmts[LOOPVAR_REF]);
}
const DeclRefExpr *getLoopVarRef() const {
return cast<DeclRefExpr>(SubStmts[LOOPVAR_REF]);
}
void setLoopVarRef(DeclRefExpr *E) {
assert(E && "Expected non-null loop variable");
SubStmts[LOOPVAR_REF] = E;
}
/// @}
};
/// This is a basic class for representing single OpenMP executable
/// directive.
///
class OMPExecutableDirective : public Stmt {
friend class ASTStmtReader;
friend class ASTStmtWriter;
/// Kind of the directive.
OpenMPDirectiveKind Kind = llvm::omp::OMPD_unknown;
/// Starting location of the directive (directive keyword).
SourceLocation StartLoc;
/// Ending location of the directive.
SourceLocation EndLoc;
/// Get the clauses storage.
MutableArrayRef<OMPClause *> getClauses() {
if (!Data)
return llvm::None;
return Data->getClauses();
}
protected:
/// Data, associated with the directive.
OMPChildren *Data = nullptr;
/// Build instance of directive of class \a K.
///
/// \param SC Statement class.
/// \param K Kind of OpenMP directive.
/// \param StartLoc Starting location of the directive (directive keyword).
/// \param EndLoc Ending location of the directive.
///
OMPExecutableDirective(StmtClass SC, OpenMPDirectiveKind K,
SourceLocation StartLoc, SourceLocation EndLoc)
: Stmt(SC), Kind(K), StartLoc(std::move(StartLoc)),
EndLoc(std::move(EndLoc)) {}
template <typename T, typename... Params>
static T *createDirective(const ASTContext &C, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, unsigned NumChildren,
Params &&... P) {
void *Mem =
C.Allocate(sizeof(T) + OMPChildren::size(Clauses.size(), AssociatedStmt,
NumChildren),
alignof(T));
auto *Data = OMPChildren::Create(reinterpret_cast<T *>(Mem) + 1, Clauses,
AssociatedStmt, NumChildren);
auto *Inst = new (Mem) T(std::forward<Params>(P)...);
Inst->Data = Data;
return Inst;
}
template <typename T, typename... Params>
static T *createEmptyDirective(const ASTContext &C, unsigned NumClauses,
bool HasAssociatedStmt, unsigned NumChildren,
Params &&... P) {
void *Mem =
C.Allocate(sizeof(T) + OMPChildren::size(NumClauses, HasAssociatedStmt,
NumChildren),
alignof(T));
auto *Data =
OMPChildren::CreateEmpty(reinterpret_cast<T *>(Mem) + 1, NumClauses,
HasAssociatedStmt, NumChildren);
auto *Inst = new (Mem) T(std::forward<Params>(P)...);
Inst->Data = Data;
return Inst;
}
template <typename T>
static T *createEmptyDirective(const ASTContext &C, unsigned NumClauses,
bool HasAssociatedStmt = false,
unsigned NumChildren = 0) {
void *Mem =
C.Allocate(sizeof(T) + OMPChildren::size(NumClauses, HasAssociatedStmt,
NumChildren),
alignof(T));
auto *Data =
OMPChildren::CreateEmpty(reinterpret_cast<T *>(Mem) + 1, NumClauses,
HasAssociatedStmt, NumChildren);
auto *Inst = new (Mem) T;
Inst->Data = Data;
return Inst;
}
public:
/// Iterates over expressions/statements used in the construct.
class used_clauses_child_iterator
: public llvm::iterator_adaptor_base<
used_clauses_child_iterator, ArrayRef<OMPClause *>::iterator,
std::forward_iterator_tag, Stmt *, ptrdiff_t, Stmt *, Stmt *> {
ArrayRef<OMPClause *>::iterator End;
OMPClause::child_iterator ChildI, ChildEnd;
void MoveToNext() {
if (ChildI != ChildEnd)
return;
while (this->I != End) {
++this->I;
if (this->I != End) {
ChildI = (*this->I)->used_children().begin();
ChildEnd = (*this->I)->used_children().end();
if (ChildI != ChildEnd)
return;
}
}
}
public:
explicit used_clauses_child_iterator(ArrayRef<OMPClause *> Clauses)
: used_clauses_child_iterator::iterator_adaptor_base(Clauses.begin()),
End(Clauses.end()) {
if (this->I != End) {
ChildI = (*this->I)->used_children().begin();
ChildEnd = (*this->I)->used_children().end();
MoveToNext();
}
}
Stmt *operator*() const { return *ChildI; }
Stmt *operator->() const { return **this; }
used_clauses_child_iterator &operator++() {
++ChildI;
if (ChildI != ChildEnd)
return *this;
if (this->I != End) {
++this->I;
if (this->I != End) {
ChildI = (*this->I)->used_children().begin();
ChildEnd = (*this->I)->used_children().end();
}
}
MoveToNext();
return *this;
}
};
static llvm::iterator_range<used_clauses_child_iterator>
used_clauses_children(ArrayRef<OMPClause *> Clauses) {
return {used_clauses_child_iterator(Clauses),
used_clauses_child_iterator(llvm::makeArrayRef(Clauses.end(), 0))};
}
/// Iterates over a filtered subrange of clauses applied to a
/// directive.
///
/// This iterator visits only clauses of type SpecificClause.
template <typename SpecificClause>
class specific_clause_iterator
: public llvm::iterator_adaptor_base<
specific_clause_iterator<SpecificClause>,
ArrayRef<OMPClause *>::const_iterator, std::forward_iterator_tag,
const SpecificClause *, ptrdiff_t, const SpecificClause *,
const SpecificClause *> {
ArrayRef<OMPClause *>::const_iterator End;
void SkipToNextClause() {
while (this->I != End && !isa<SpecificClause>(*this->I))
++this->I;
}
public:
explicit specific_clause_iterator(ArrayRef<OMPClause *> Clauses)
: specific_clause_iterator::iterator_adaptor_base(Clauses.begin()),
End(Clauses.end()) {
SkipToNextClause();
}
const SpecificClause *operator*() const {
return cast<SpecificClause>(*this->I);
}
const SpecificClause *operator->() const { return **this; }
specific_clause_iterator &operator++() {
++this->I;
SkipToNextClause();
return *this;
}
};
template <typename SpecificClause>
static llvm::iterator_range<specific_clause_iterator<SpecificClause>>
getClausesOfKind(ArrayRef<OMPClause *> Clauses) {
return {specific_clause_iterator<SpecificClause>(Clauses),
specific_clause_iterator<SpecificClause>(
llvm::makeArrayRef(Clauses.end(), 0))};
}
template <typename SpecificClause>
llvm::iterator_range<specific_clause_iterator<SpecificClause>>
getClausesOfKind() const {
return getClausesOfKind<SpecificClause>(clauses());
}
/// Gets a single clause of the specified kind associated with the
/// current directive iff there is only one clause of this kind (and assertion
/// is fired if there is more than one clause is associated with the
/// directive). Returns nullptr if no clause of this kind is associated with
/// the directive.
template <typename SpecificClause>
const SpecificClause *getSingleClause() const {
auto Clauses = getClausesOfKind<SpecificClause>();
if (Clauses.begin() != Clauses.end()) {
assert(std::next(Clauses.begin()) == Clauses.end() &&
"There are at least 2 clauses of the specified kind");
return *Clauses.begin();
}
return nullptr;
}
/// Returns true if the current directive has one or more clauses of a
/// specific kind.
template <typename SpecificClause>
bool hasClausesOfKind() const {
auto Clauses = getClausesOfKind<SpecificClause>();
return Clauses.begin() != Clauses.end();
}
/// Returns starting location of directive kind.
SourceLocation getBeginLoc() const { return StartLoc; }
/// Returns ending location of directive.
SourceLocation getEndLoc() const { return EndLoc; }
/// Set starting location of directive kind.
///
/// \param Loc New starting location of directive.
///
void setLocStart(SourceLocation Loc) { StartLoc = Loc; }
/// Set ending location of directive.
///
/// \param Loc New ending location of directive.
///
void setLocEnd(SourceLocation Loc) { EndLoc = Loc; }
/// Get number of clauses.
unsigned getNumClauses() const {
if (!Data)
return 0;
return Data->getNumClauses();
}
/// Returns specified clause.
///
/// \param I Number of clause.
///
OMPClause *getClause(unsigned I) const { return clauses()[I]; }
/// Returns true if directive has associated statement.
bool hasAssociatedStmt() const { return Data && Data->hasAssociatedStmt(); }
/// Returns statement associated with the directive.
const Stmt *getAssociatedStmt() const {
return const_cast<OMPExecutableDirective *>(this)->getAssociatedStmt();
}
Stmt *getAssociatedStmt() {
assert(hasAssociatedStmt() &&
"Expected directive with the associated statement.");
return Data->getAssociatedStmt();
}
/// Returns the captured statement associated with the
/// component region within the (combined) directive.
///
/// \param RegionKind Component region kind.
const CapturedStmt *getCapturedStmt(OpenMPDirectiveKind RegionKind) const {
assert(hasAssociatedStmt() &&
"Expected directive with the associated statement.");
SmallVector<OpenMPDirectiveKind, 4> CaptureRegions;
getOpenMPCaptureRegions(CaptureRegions, getDirectiveKind());
return Data->getCapturedStmt(RegionKind, CaptureRegions);
}
/// Get innermost captured statement for the construct.
CapturedStmt *getInnermostCapturedStmt() {
assert(hasAssociatedStmt() &&
"Expected directive with the associated statement.");
SmallVector<OpenMPDirectiveKind, 4> CaptureRegions;
getOpenMPCaptureRegions(CaptureRegions, getDirectiveKind());
return Data->getInnermostCapturedStmt(CaptureRegions);
}
const CapturedStmt *getInnermostCapturedStmt() const {
return const_cast<OMPExecutableDirective *>(this)
->getInnermostCapturedStmt();
}
OpenMPDirectiveKind getDirectiveKind() const { return Kind; }
static bool classof(const Stmt *S) {
return S->getStmtClass() >= firstOMPExecutableDirectiveConstant &&
S->getStmtClass() <= lastOMPExecutableDirectiveConstant;
}
child_range children() {
if (!Data)
return child_range(child_iterator(), child_iterator());
return Data->getAssociatedStmtAsRange();
}
const_child_range children() const {
return const_cast<OMPExecutableDirective *>(this)->children();
}
ArrayRef<OMPClause *> clauses() const {
if (!Data)
return llvm::None;
return Data->getClauses();
}
/// Returns whether or not this is a Standalone directive.
///
/// Stand-alone directives are executable directives
/// that have no associated user code.
bool isStandaloneDirective() const;
/// Returns the AST node representing OpenMP structured-block of this
/// OpenMP executable directive,
/// Prerequisite: Executable Directive must not be Standalone directive.
const Stmt *getStructuredBlock() const {
return const_cast<OMPExecutableDirective *>(this)->getStructuredBlock();
}
Stmt *getStructuredBlock();
const Stmt *getRawStmt() const {
return const_cast<OMPExecutableDirective *>(this)->getRawStmt();
}
Stmt *getRawStmt() {
assert(hasAssociatedStmt() &&
"Expected directive with the associated statement.");
return Data->getRawStmt();
}
};
/// This represents '#pragma omp parallel' directive.
///
/// \code
/// #pragma omp parallel private(a,b) reduction(+: c,d)
/// \endcode
/// In this example directive '#pragma omp parallel' has clauses 'private'
/// with the variables 'a' and 'b' and 'reduction' with operator '+' and
/// variables 'c' and 'd'.
///
class OMPParallelDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// true if the construct has inner cancel directive.
bool HasCancel = false;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive (directive keyword).
/// \param EndLoc Ending Location of the directive.
///
OMPParallelDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPParallelDirectiveClass,
llvm::omp::OMPD_parallel, StartLoc, EndLoc) {}
/// Build an empty directive.
///
explicit OMPParallelDirective()
: OMPExecutableDirective(OMPParallelDirectiveClass,
llvm::omp::OMPD_parallel, SourceLocation(),
SourceLocation()) {}
/// Sets special task reduction descriptor.
void setTaskReductionRefExpr(Expr *E) { Data->getChildren()[0] = E; }
/// Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement associated with the directive.
/// \param TaskRedRef Task reduction special reference expression to handle
/// taskgroup descriptor.
/// \param HasCancel true if this directive has inner cancel directive.
///
static OMPParallelDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *TaskRedRef,
bool HasCancel);
/// Creates an empty directive with the place for \a N clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPParallelDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
/// Returns special task reduction reference expression.
Expr *getTaskReductionRefExpr() {
return cast_or_null<Expr>(Data->getChildren()[0]);
}
const Expr *getTaskReductionRefExpr() const {
return const_cast<OMPParallelDirective *>(this)->getTaskReductionRefExpr();
}
/// Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPParallelDirectiveClass;
}
};
/// The base class for all loop-based directives, including loop transformation
/// directives.
class OMPLoopBasedDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
protected:
/// Number of collapsed loops as specified by 'collapse' clause.
unsigned NumAssociatedLoops = 0;
/// Build instance of loop directive of class \a Kind.
///
/// \param SC Statement class.
/// \param Kind Kind of OpenMP directive.
/// \param StartLoc Starting location of the directive (directive keyword).
/// \param EndLoc Ending location of the directive.
/// \param NumAssociatedLoops Number of loops associated with the construct.
///
OMPLoopBasedDirective(StmtClass SC, OpenMPDirectiveKind Kind,
SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumAssociatedLoops)
: OMPExecutableDirective(SC, Kind, StartLoc, EndLoc),
NumAssociatedLoops(NumAssociatedLoops) {}
public:
/// The expressions built to support OpenMP loops in combined/composite
/// pragmas (e.g. pragma omp distribute parallel for)
struct DistCombinedHelperExprs {
/// DistributeLowerBound - used when composing 'omp distribute' with
/// 'omp for' in a same construct.
Expr *LB;
/// DistributeUpperBound - used when composing 'omp distribute' with
/// 'omp for' in a same construct.
Expr *UB;
/// DistributeEnsureUpperBound - used when composing 'omp distribute'
/// with 'omp for' in a same construct, EUB depends on DistUB
Expr *EUB;
/// Distribute loop iteration variable init used when composing 'omp
/// distribute'
/// with 'omp for' in a same construct
Expr *Init;
/// Distribute Loop condition used when composing 'omp distribute'
/// with 'omp for' in a same construct
Expr *Cond;
/// Update of LowerBound for statically scheduled omp loops for
/// outer loop in combined constructs (e.g. 'distribute parallel for')
Expr *NLB;
/// Update of UpperBound for statically scheduled omp loops for
/// outer loop in combined constructs (e.g. 'distribute parallel for')
Expr *NUB;
/// Distribute Loop condition used when composing 'omp distribute'
/// with 'omp for' in a same construct when schedule is chunked.
Expr *DistCond;
/// 'omp parallel for' loop condition used when composed with
/// 'omp distribute' in the same construct and when schedule is
/// chunked and the chunk size is 1.
Expr *ParForInDistCond;
};
/// The expressions built for the OpenMP loop CodeGen for the
/// whole collapsed loop nest.
struct HelperExprs {
/// Loop iteration variable.
Expr *IterationVarRef;
/// Loop last iteration number.
Expr *LastIteration;
/// Loop number of iterations.
Expr *NumIterations;
/// Calculation of last iteration.
Expr *CalcLastIteration;
/// Loop pre-condition.
Expr *PreCond;
/// Loop condition.
Expr *Cond;
/// Loop iteration variable init.
Expr *Init;
/// Loop increment.
Expr *Inc;
/// IsLastIteration - local flag variable passed to runtime.
Expr *IL;
/// LowerBound - local variable passed to runtime.
Expr *LB;
/// UpperBound - local variable passed to runtime.
Expr *UB;
/// Stride - local variable passed to runtime.
Expr *ST;
/// EnsureUpperBound -- expression UB = min(UB, NumIterations).
Expr *EUB;
/// Update of LowerBound for statically scheduled 'omp for' loops.
Expr *NLB;
/// Update of UpperBound for statically scheduled 'omp for' loops.
Expr *NUB;
/// PreviousLowerBound - local variable passed to runtime in the
/// enclosing schedule or null if that does not apply.
Expr *PrevLB;
/// PreviousUpperBound - local variable passed to runtime in the
/// enclosing schedule or null if that does not apply.
Expr *PrevUB;
/// DistInc - increment expression for distribute loop when found
/// combined with a further loop level (e.g. in 'distribute parallel for')
/// expression IV = IV + ST
Expr *DistInc;
/// PrevEUB - expression similar to EUB but to be used when loop
/// scheduling uses PrevLB and PrevUB (e.g. in 'distribute parallel for'
/// when ensuring that the UB is either the calculated UB by the runtime or
/// the end of the assigned distribute chunk)
/// expression UB = min (UB, PrevUB)
Expr *PrevEUB;
/// Counters Loop counters.
SmallVector<Expr *, 4> Counters;
/// PrivateCounters Loop counters.
SmallVector<Expr *, 4> PrivateCounters;
/// Expressions for loop counters inits for CodeGen.
SmallVector<Expr *, 4> Inits;
/// Expressions for loop counters update for CodeGen.
SmallVector<Expr *, 4> Updates;
/// Final loop counter values for GodeGen.
SmallVector<Expr *, 4> Finals;
/// List of counters required for the generation of the non-rectangular
/// loops.
SmallVector<Expr *, 4> DependentCounters;
/// List of initializers required for the generation of the non-rectangular
/// loops.
SmallVector<Expr *, 4> DependentInits;
/// List of final conditions required for the generation of the
/// non-rectangular loops.
SmallVector<Expr *, 4> FinalsConditions;
/// Init statement for all captured expressions.
Stmt *PreInits;
/// Expressions used when combining OpenMP loop pragmas
DistCombinedHelperExprs DistCombinedFields;
/// Check if all the expressions are built (does not check the
/// worksharing ones).
bool builtAll() {
return IterationVarRef != nullptr && LastIteration != nullptr &&
NumIterations != nullptr && PreCond != nullptr &&
Cond != nullptr && Init != nullptr && Inc != nullptr;
}
/// Initialize all the fields to null.
/// \param Size Number of elements in the
/// counters/finals/updates/dependent_counters/dependent_inits/finals_conditions
/// arrays.
void clear(unsigned Size) {
IterationVarRef = nullptr;
LastIteration = nullptr;
CalcLastIteration = nullptr;
PreCond = nullptr;
Cond = nullptr;
Init = nullptr;
Inc = nullptr;
IL = nullptr;
LB = nullptr;
UB = nullptr;
ST = nullptr;
EUB = nullptr;
NLB = nullptr;
NUB = nullptr;
NumIterations = nullptr;
PrevLB = nullptr;
PrevUB = nullptr;
DistInc = nullptr;
PrevEUB = nullptr;
Counters.resize(Size);
PrivateCounters.resize(Size);
Inits.resize(Size);
Updates.resize(Size);
Finals.resize(Size);
DependentCounters.resize(Size);
DependentInits.resize(Size);
FinalsConditions.resize(Size);
for (unsigned I = 0; I < Size; ++I) {
Counters[I] = nullptr;
PrivateCounters[I] = nullptr;
Inits[I] = nullptr;
Updates[I] = nullptr;
Finals[I] = nullptr;
DependentCounters[I] = nullptr;
DependentInits[I] = nullptr;
FinalsConditions[I] = nullptr;
}
PreInits = nullptr;
DistCombinedFields.LB = nullptr;
DistCombinedFields.UB = nullptr;
DistCombinedFields.EUB = nullptr;
DistCombinedFields.Init = nullptr;
DistCombinedFields.Cond = nullptr;
DistCombinedFields.NLB = nullptr;
DistCombinedFields.NUB = nullptr;
DistCombinedFields.DistCond = nullptr;
DistCombinedFields.ParForInDistCond = nullptr;
}
};
/// Get number of collapsed loops.
unsigned getLoopsNumber() const { return NumAssociatedLoops; }
/// Try to find the next loop sub-statement in the specified statement \p
/// CurStmt.
/// \param TryImperfectlyNestedLoops true, if we need to try to look for the
/// imperfectly nested loop.
static Stmt *tryToFindNextInnerLoop(Stmt *CurStmt,
bool TryImperfectlyNestedLoops);
static const Stmt *tryToFindNextInnerLoop(const Stmt *CurStmt,
bool TryImperfectlyNestedLoops) {
return tryToFindNextInnerLoop(const_cast<Stmt *>(CurStmt),
TryImperfectlyNestedLoops);
}
/// Calls the specified callback function for all the loops in \p CurStmt,
/// from the outermost to the innermost.
static bool
doForAllLoops(Stmt *CurStmt, bool TryImperfectlyNestedLoops,
unsigned NumLoops,
llvm::function_ref<bool(unsigned, Stmt *)> Callback);
static bool
doForAllLoops(const Stmt *CurStmt, bool TryImperfectlyNestedLoops,
unsigned NumLoops,
llvm::function_ref<bool(unsigned, const Stmt *)> Callback) {
auto &&NewCallback = [Callback](unsigned Cnt, Stmt *CurStmt) {
return Callback(Cnt, CurStmt);
};
return doForAllLoops(const_cast<Stmt *>(CurStmt), TryImperfectlyNestedLoops,
NumLoops, NewCallback);
}
/// Calls the specified callback function for all the loop bodies in \p
/// CurStmt, from the outermost loop to the innermost.
static void doForAllLoopsBodies(
Stmt *CurStmt, bool TryImperfectlyNestedLoops, unsigned NumLoops,
llvm::function_ref<void(unsigned, Stmt *, Stmt *)> Callback);
static void doForAllLoopsBodies(
const Stmt *CurStmt, bool TryImperfectlyNestedLoops, unsigned NumLoops,
llvm::function_ref<void(unsigned, const Stmt *, const Stmt *)> Callback) {
auto &&NewCallback = [Callback](unsigned Cnt, Stmt *Loop, Stmt *Body) {
Callback(Cnt, Loop, Body);
};
doForAllLoopsBodies(const_cast<Stmt *>(CurStmt), TryImperfectlyNestedLoops,
NumLoops, NewCallback);
}
static bool classof(const Stmt *T) {
if (auto *D = dyn_cast<OMPExecutableDirective>(T))
return isOpenMPLoopDirective(D->getDirectiveKind());
return false;
}
};
/// This is a common base class for loop directives ('omp simd', 'omp
/// for', 'omp for simd' etc.). It is responsible for the loop code generation.
///
class OMPLoopDirective : public OMPLoopBasedDirective {
friend class ASTStmtReader;
/// Offsets to the stored exprs.
/// This enumeration contains offsets to all the pointers to children
/// expressions stored in OMPLoopDirective.
/// The first 9 children are necessary for all the loop directives,
/// the next 8 are specific to the worksharing ones, and the next 11 are
/// used for combined constructs containing two pragmas associated to loops.
/// After the fixed children, three arrays of length NumAssociatedLoops are
/// allocated: loop counters, their updates and final values.
/// PrevLowerBound and PrevUpperBound are used to communicate blocking
/// information in composite constructs which require loop blocking
/// DistInc is used to generate the increment expression for the distribute
/// loop when combined with a further nested loop
/// PrevEnsureUpperBound is used as the EnsureUpperBound expression for the
/// for loop when combined with a previous distribute loop in the same pragma
/// (e.g. 'distribute parallel for')
///
enum {
IterationVariableOffset = 0,
LastIterationOffset = 1,
CalcLastIterationOffset = 2,
PreConditionOffset = 3,
CondOffset = 4,
InitOffset = 5,
IncOffset = 6,
PreInitsOffset = 7,
// The '...End' enumerators do not correspond to child expressions - they
// specify the offset to the end (and start of the following counters/
// updates/finals/dependent_counters/dependent_inits/finals_conditions
// arrays).
DefaultEnd = 8,
// The following 8 exprs are used by worksharing and distribute loops only.
IsLastIterVariableOffset = 8,
LowerBoundVariableOffset = 9,
UpperBoundVariableOffset = 10,
StrideVariableOffset = 11,
EnsureUpperBoundOffset = 12,
NextLowerBoundOffset = 13,
NextUpperBoundOffset = 14,
NumIterationsOffset = 15,
// Offset to the end for worksharing loop directives.
WorksharingEnd = 16,
PrevLowerBoundVariableOffset = 16,
PrevUpperBoundVariableOffset = 17,
DistIncOffset = 18,
PrevEnsureUpperBoundOffset = 19,
CombinedLowerBoundVariableOffset = 20,
CombinedUpperBoundVariableOffset = 21,
CombinedEnsureUpperBoundOffset = 22,
CombinedInitOffset = 23,
CombinedConditionOffset = 24,
CombinedNextLowerBoundOffset = 25,
CombinedNextUpperBoundOffset = 26,
CombinedDistConditionOffset = 27,
CombinedParForInDistConditionOffset = 28,
// Offset to the end (and start of the following
// counters/updates/finals/dependent_counters/dependent_inits/finals_conditions
// arrays) for combined distribute loop directives.
CombinedDistributeEnd = 29,
};
/// Get the counters storage.
MutableArrayRef<Expr *> getCounters() {
auto **Storage = reinterpret_cast<Expr **>(
&Data->getChildren()[getArraysOffset(getDirectiveKind())]);
return llvm::makeMutableArrayRef(Storage, getLoopsNumber());
}
/// Get the private counters storage.
MutableArrayRef<Expr *> getPrivateCounters() {
auto **Storage = reinterpret_cast<Expr **>(
&Data->getChildren()[getArraysOffset(getDirectiveKind()) +
getLoopsNumber()]);
return llvm::makeMutableArrayRef(Storage, getLoopsNumber());
}
/// Get the updates storage.
MutableArrayRef<Expr *> getInits() {
auto **Storage = reinterpret_cast<Expr **>(
&Data->getChildren()[getArraysOffset(getDirectiveKind()) +
2 * getLoopsNumber()]);
return llvm::makeMutableArrayRef(Storage, getLoopsNumber());
}
/// Get the updates storage.
MutableArrayRef<Expr *> getUpdates() {
auto **Storage = reinterpret_cast<Expr **>(
&Data->getChildren()[getArraysOffset(getDirectiveKind()) +
3 * getLoopsNumber()]);
return llvm::makeMutableArrayRef(Storage, getLoopsNumber());
}
/// Get the final counter updates storage.
MutableArrayRef<Expr *> getFinals() {
auto **Storage = reinterpret_cast<Expr **>(
&Data->getChildren()[getArraysOffset(getDirectiveKind()) +
4 * getLoopsNumber()]);
return llvm::makeMutableArrayRef(Storage, getLoopsNumber());
}
/// Get the dependent counters storage.
MutableArrayRef<Expr *> getDependentCounters() {
auto **Storage = reinterpret_cast<Expr **>(
&Data->getChildren()[getArraysOffset(getDirectiveKind()) +
5 * getLoopsNumber()]);
return llvm::makeMutableArrayRef(Storage, getLoopsNumber());
}
/// Get the dependent inits storage.
MutableArrayRef<Expr *> getDependentInits() {
auto **Storage = reinterpret_cast<Expr **>(
&Data->getChildren()[getArraysOffset(getDirectiveKind()) +
6 * getLoopsNumber()]);
return llvm::makeMutableArrayRef(Storage, getLoopsNumber());
}
/// Get the finals conditions storage.
MutableArrayRef<Expr *> getFinalsConditions() {
auto **Storage = reinterpret_cast<Expr **>(
&Data->getChildren()[getArraysOffset(getDirectiveKind()) +
7 * getLoopsNumber()]);
return llvm::makeMutableArrayRef(Storage, getLoopsNumber());
}
protected:
/// Build instance of loop directive of class \a Kind.
///
/// \param SC Statement class.
/// \param Kind Kind of OpenMP directive.
/// \param StartLoc Starting location of the directive (directive keyword).
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed loops from 'collapse' clause.
///
OMPLoopDirective(StmtClass SC, OpenMPDirectiveKind Kind,
SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopBasedDirective(SC, Kind, StartLoc, EndLoc, CollapsedNum) {}
/// Offset to the start of children expression arrays.
static unsigned getArraysOffset(OpenMPDirectiveKind Kind) {
if (isOpenMPLoopBoundSharingDirective(Kind))
return CombinedDistributeEnd;
if (isOpenMPWorksharingDirective(Kind) || isOpenMPTaskLoopDirective(Kind) ||
isOpenMPDistributeDirective(Kind))
return WorksharingEnd;
return DefaultEnd;
}
/// Children number.
static unsigned numLoopChildren(unsigned CollapsedNum,
OpenMPDirectiveKind Kind) {
return getArraysOffset(Kind) +
8 * CollapsedNum; // Counters, PrivateCounters, Inits,
// Updates, Finals, DependentCounters,
// DependentInits, FinalsConditions.
}
void setIterationVariable(Expr *IV) {
Data->getChildren()[IterationVariableOffset] = IV;
}
void setLastIteration(Expr *LI) {
Data->getChildren()[LastIterationOffset] = LI;
}
void setCalcLastIteration(Expr *CLI) {
Data->getChildren()[CalcLastIterationOffset] = CLI;
}
void setPreCond(Expr *PC) { Data->getChildren()[PreConditionOffset] = PC; }
void setCond(Expr *Cond) { Data->getChildren()[CondOffset] = Cond; }
void setInit(Expr *Init) { Data->getChildren()[InitOffset] = Init; }
void setInc(Expr *Inc) { Data->getChildren()[IncOffset] = Inc; }
void setPreInits(Stmt *PreInits) {
Data->getChildren()[PreInitsOffset] = PreInits;
}
void setIsLastIterVariable(Expr *IL) {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
Data->getChildren()[IsLastIterVariableOffset] = IL;
}
void setLowerBoundVariable(Expr *LB) {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
Data->getChildren()[LowerBoundVariableOffset] = LB;
}
void setUpperBoundVariable(Expr *UB) {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
Data->getChildren()[UpperBoundVariableOffset] = UB;
}
void setStrideVariable(Expr *ST) {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
Data->getChildren()[StrideVariableOffset] = ST;
}
void setEnsureUpperBound(Expr *EUB) {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
Data->getChildren()[EnsureUpperBoundOffset] = EUB;
}
void setNextLowerBound(Expr *NLB) {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
Data->getChildren()[NextLowerBoundOffset] = NLB;
}
void setNextUpperBound(Expr *NUB) {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
Data->getChildren()[NextUpperBoundOffset] = NUB;
}
void setNumIterations(Expr *NI) {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
Data->getChildren()[NumIterationsOffset] = NI;
}
void setPrevLowerBoundVariable(Expr *PrevLB) {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
Data->getChildren()[PrevLowerBoundVariableOffset] = PrevLB;
}
void setPrevUpperBoundVariable(Expr *PrevUB) {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
Data->getChildren()[PrevUpperBoundVariableOffset] = PrevUB;
}
void setDistInc(Expr *DistInc) {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
Data->getChildren()[DistIncOffset] = DistInc;
}
void setPrevEnsureUpperBound(Expr *PrevEUB) {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
Data->getChildren()[PrevEnsureUpperBoundOffset] = PrevEUB;
}
void setCombinedLowerBoundVariable(Expr *CombLB) {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
Data->getChildren()[CombinedLowerBoundVariableOffset] = CombLB;
}
void setCombinedUpperBoundVariable(Expr *CombUB) {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
Data->getChildren()[CombinedUpperBoundVariableOffset] = CombUB;
}
void setCombinedEnsureUpperBound(Expr *CombEUB) {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
Data->getChildren()[CombinedEnsureUpperBoundOffset] = CombEUB;
}
void setCombinedInit(Expr *CombInit) {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
Data->getChildren()[CombinedInitOffset] = CombInit;
}
void setCombinedCond(Expr *CombCond) {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
Data->getChildren()[CombinedConditionOffset] = CombCond;
}
void setCombinedNextLowerBound(Expr *CombNLB) {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
Data->getChildren()[CombinedNextLowerBoundOffset] = CombNLB;
}
void setCombinedNextUpperBound(Expr *CombNUB) {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
Data->getChildren()[CombinedNextUpperBoundOffset] = CombNUB;
}
void setCombinedDistCond(Expr *CombDistCond) {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound distribute sharing directive");
Data->getChildren()[CombinedDistConditionOffset] = CombDistCond;
}
void setCombinedParForInDistCond(Expr *CombParForInDistCond) {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound distribute sharing directive");
Data->getChildren()[CombinedParForInDistConditionOffset] =
CombParForInDistCond;
}
void setCounters(ArrayRef<Expr *> A);
void setPrivateCounters(ArrayRef<Expr *> A);
void setInits(ArrayRef<Expr *> A);
void setUpdates(ArrayRef<Expr *> A);
void setFinals(ArrayRef<Expr *> A);
void setDependentCounters(ArrayRef<Expr *> A);
void setDependentInits(ArrayRef<Expr *> A);
void setFinalsConditions(ArrayRef<Expr *> A);
public:
Expr *getIterationVariable() const {
return cast<Expr>(Data->getChildren()[IterationVariableOffset]);
}
Expr *getLastIteration() const {
return cast<Expr>(Data->getChildren()[LastIterationOffset]);
}
Expr *getCalcLastIteration() const {
return cast<Expr>(Data->getChildren()[CalcLastIterationOffset]);
}
Expr *getPreCond() const {
return cast<Expr>(Data->getChildren()[PreConditionOffset]);
}
Expr *getCond() const { return cast<Expr>(Data->getChildren()[CondOffset]); }
Expr *getInit() const { return cast<Expr>(Data->getChildren()[InitOffset]); }
Expr *getInc() const { return cast<Expr>(Data->getChildren()[IncOffset]); }
const Stmt *getPreInits() const {
return Data->getChildren()[PreInitsOffset];
}
Stmt *getPreInits() { return Data->getChildren()[PreInitsOffset]; }
Expr *getIsLastIterVariable() const {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
return cast<Expr>(Data->getChildren()[IsLastIterVariableOffset]);
}
Expr *getLowerBoundVariable() const {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
return cast<Expr>(Data->getChildren()[LowerBoundVariableOffset]);
}
Expr *getUpperBoundVariable() const {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
return cast<Expr>(Data->getChildren()[UpperBoundVariableOffset]);
}
Expr *getStrideVariable() const {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
return cast<Expr>(Data->getChildren()[StrideVariableOffset]);
}
Expr *getEnsureUpperBound() const {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
return cast<Expr>(Data->getChildren()[EnsureUpperBoundOffset]);
}
Expr *getNextLowerBound() const {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
return cast<Expr>(Data->getChildren()[NextLowerBoundOffset]);
}
Expr *getNextUpperBound() const {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
return cast<Expr>(Data->getChildren()[NextUpperBoundOffset]);
}
Expr *getNumIterations() const {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
return cast<Expr>(Data->getChildren()[NumIterationsOffset]);
}
Expr *getPrevLowerBoundVariable() const {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
return cast<Expr>(Data->getChildren()[PrevLowerBoundVariableOffset]);
}
Expr *getPrevUpperBoundVariable() const {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
return cast<Expr>(Data->getChildren()[PrevUpperBoundVariableOffset]);
}
Expr *getDistInc() const {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
return cast<Expr>(Data->getChildren()[DistIncOffset]);
}
Expr *getPrevEnsureUpperBound() const {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
return cast<Expr>(Data->getChildren()[PrevEnsureUpperBoundOffset]);
}
Expr *getCombinedLowerBoundVariable() const {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
return cast<Expr>(Data->getChildren()[CombinedLowerBoundVariableOffset]);
}
Expr *getCombinedUpperBoundVariable() const {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
return cast<Expr>(Data->getChildren()[CombinedUpperBoundVariableOffset]);
}
Expr *getCombinedEnsureUpperBound() const {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
return cast<Expr>(Data->getChildren()[CombinedEnsureUpperBoundOffset]);
}
Expr *getCombinedInit() const {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
return cast<Expr>(Data->getChildren()[CombinedInitOffset]);
}
Expr *getCombinedCond() const {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
return cast<Expr>(Data->getChildren()[CombinedConditionOffset]);
}
Expr *getCombinedNextLowerBound() const {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
return cast<Expr>(Data->getChildren()[CombinedNextLowerBoundOffset]);
}
Expr *getCombinedNextUpperBound() const {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound sharing directive");
return cast<Expr>(Data->getChildren()[CombinedNextUpperBoundOffset]);
}
Expr *getCombinedDistCond() const {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound distribute sharing directive");
return cast<Expr>(Data->getChildren()[CombinedDistConditionOffset]);
}
Expr *getCombinedParForInDistCond() const {
assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) &&
"expected loop bound distribute sharing directive");
return cast<Expr>(Data->getChildren()[CombinedParForInDistConditionOffset]);
}
Stmt *getBody();
const Stmt *getBody() const {
return const_cast<OMPLoopDirective *>(this)->getBody();
}
ArrayRef<Expr *> counters() { return getCounters(); }
ArrayRef<Expr *> counters() const {
return const_cast<OMPLoopDirective *>(this)->getCounters();
}
ArrayRef<Expr *> private_counters() { return getPrivateCounters(); }
ArrayRef<Expr *> private_counters() const {
return const_cast<OMPLoopDirective *>(this)->getPrivateCounters();
}
ArrayRef<Expr *> inits() { return getInits(); }
ArrayRef<Expr *> inits() const {
return const_cast<OMPLoopDirective *>(this)->getInits();
}
ArrayRef<Expr *> updates() { return getUpdates(); }
ArrayRef<Expr *> updates() const {
return const_cast<OMPLoopDirective *>(this)->getUpdates();
}
ArrayRef<Expr *> finals() { return getFinals(); }
ArrayRef<Expr *> finals() const {
return const_cast<OMPLoopDirective *>(this)->getFinals();
}
ArrayRef<Expr *> dependent_counters() { return getDependentCounters(); }
ArrayRef<Expr *> dependent_counters() const {
return const_cast<OMPLoopDirective *>(this)->getDependentCounters();
}
ArrayRef<Expr *> dependent_inits() { return getDependentInits(); }
ArrayRef<Expr *> dependent_inits() const {
return const_cast<OMPLoopDirective *>(this)->getDependentInits();
}
ArrayRef<Expr *> finals_conditions() { return getFinalsConditions(); }
ArrayRef<Expr *> finals_conditions() const {
return const_cast<OMPLoopDirective *>(this)->getFinalsConditions();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPSimdDirectiveClass ||
T->getStmtClass() == OMPForDirectiveClass ||
T->getStmtClass() == OMPForSimdDirectiveClass ||
T->getStmtClass() == OMPParallelForDirectiveClass ||
T->getStmtClass() == OMPParallelForSimdDirectiveClass ||
T->getStmtClass() == OMPTaskLoopDirectiveClass ||
T->getStmtClass() == OMPTaskLoopSimdDirectiveClass ||
T->getStmtClass() == OMPMasterTaskLoopDirectiveClass ||
T->getStmtClass() == OMPMasterTaskLoopSimdDirectiveClass ||
T->getStmtClass() == OMPParallelMasterTaskLoopDirectiveClass ||
T->getStmtClass() == OMPParallelMasterTaskLoopSimdDirectiveClass ||
T->getStmtClass() == OMPDistributeDirectiveClass ||
T->getStmtClass() == OMPTargetParallelForDirectiveClass ||
T->getStmtClass() == OMPDistributeParallelForDirectiveClass ||
T->getStmtClass() == OMPDistributeParallelForSimdDirectiveClass ||
T->getStmtClass() == OMPDistributeSimdDirectiveClass ||
T->getStmtClass() == OMPTargetParallelForSimdDirectiveClass ||
T->getStmtClass() == OMPTargetSimdDirectiveClass ||
T->getStmtClass() == OMPTeamsDistributeDirectiveClass ||
T->getStmtClass() == OMPTeamsDistributeSimdDirectiveClass ||
T->getStmtClass() ==
OMPTeamsDistributeParallelForSimdDirectiveClass ||
T->getStmtClass() == OMPTeamsDistributeParallelForDirectiveClass ||
T->getStmtClass() ==
OMPTargetTeamsDistributeParallelForDirectiveClass ||
T->getStmtClass() ==
OMPTargetTeamsDistributeParallelForSimdDirectiveClass ||
T->getStmtClass() == OMPTargetTeamsDistributeDirectiveClass ||
T->getStmtClass() == OMPTargetTeamsDistributeSimdDirectiveClass;
}
};
/// This represents '#pragma omp simd' directive.
///
/// \code
/// #pragma omp simd private(a,b) linear(i,j:s) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp simd' has clauses 'private'
/// with the variables 'a' and 'b', 'linear' with variables 'i', 'j' and
/// linear step 's', 'reduction' with operator '+' and variables 'c' and 'd'.
///
class OMPSimdDirective : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPSimdDirectiveClass, llvm::omp::OMPD_simd, StartLoc,
EndLoc, CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPSimdDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPSimdDirectiveClass, llvm::omp::OMPD_simd,
SourceLocation(), SourceLocation(), CollapsedNum) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPSimdDirective *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc, unsigned CollapsedNum,
ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt,
const HelperExprs &Exprs);
/// Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses,
unsigned CollapsedNum, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPSimdDirectiveClass;
}
};
/// This represents '#pragma omp for' directive.
///
/// \code
/// #pragma omp for private(a,b) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp for' has clauses 'private' with the
/// variables 'a' and 'b' and 'reduction' with operator '+' and variables 'c'
/// and 'd'.
///
class OMPForDirective : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// true if current directive has inner cancel directive.
bool HasCancel = false;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPForDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPForDirectiveClass, llvm::omp::OMPD_for, StartLoc,
EndLoc, CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPForDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPForDirectiveClass, llvm::omp::OMPD_for,
SourceLocation(), SourceLocation(), CollapsedNum) {}
/// Sets special task reduction descriptor.
void setTaskReductionRefExpr(Expr *E) {
Data->getChildren()[numLoopChildren(getLoopsNumber(),
llvm::omp::OMPD_for)] = E;
}
/// Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
/// \param TaskRedRef Task reduction special reference expression to handle
/// taskgroup descriptor.
/// \param HasCancel true if current directive has inner cancel directive.
///
static OMPForDirective *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc, unsigned CollapsedNum,
ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs,
Expr *TaskRedRef, bool HasCancel);
/// Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPForDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses,
unsigned CollapsedNum, EmptyShell);
/// Returns special task reduction reference expression.
Expr *getTaskReductionRefExpr() {
return cast_or_null<Expr>(Data->getChildren()[numLoopChildren(
getLoopsNumber(), llvm::omp::OMPD_for)]);
}
const Expr *getTaskReductionRefExpr() const {
return const_cast<OMPForDirective *>(this)->getTaskReductionRefExpr();
}
/// Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPForDirectiveClass;
}
};
/// This represents '#pragma omp for simd' directive.
///
/// \code
/// #pragma omp for simd private(a,b) linear(i,j:s) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp for simd' has clauses 'private'
/// with the variables 'a' and 'b', 'linear' with variables 'i', 'j' and
/// linear step 's', 'reduction' with operator '+' and variables 'c' and 'd'.
///
class OMPForSimdDirective : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPForSimdDirectiveClass, llvm::omp::OMPD_for_simd,
StartLoc, EndLoc, CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPForSimdDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPForSimdDirectiveClass, llvm::omp::OMPD_for_simd,
SourceLocation(), SourceLocation(), CollapsedNum) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPForSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPForSimdDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPForSimdDirectiveClass;
}
};
/// This represents '#pragma omp sections' directive.
///
/// \code
/// #pragma omp sections private(a,b) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp sections' has clauses 'private' with
/// the variables 'a' and 'b' and 'reduction' with operator '+' and variables
/// 'c' and 'd'.
///
class OMPSectionsDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// true if current directive has inner cancel directive.
bool HasCancel = false;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPSectionsDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPSectionsDirectiveClass,
llvm::omp::OMPD_sections, StartLoc, EndLoc) {}
/// Build an empty directive.
///
explicit OMPSectionsDirective()
: OMPExecutableDirective(OMPSectionsDirectiveClass,
llvm::omp::OMPD_sections, SourceLocation(),
SourceLocation()) {}
/// Sets special task reduction descriptor.
void setTaskReductionRefExpr(Expr *E) { Data->getChildren()[0] = E; }
/// Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param TaskRedRef Task reduction special reference expression to handle
/// taskgroup descriptor.
/// \param HasCancel true if current directive has inner directive.
///
static OMPSectionsDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *TaskRedRef,
bool HasCancel);
/// Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPSectionsDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
/// Returns special task reduction reference expression.
Expr *getTaskReductionRefExpr() {
return cast_or_null<Expr>(Data->getChildren()[0]);
}
const Expr *getTaskReductionRefExpr() const {
return const_cast<OMPSectionsDirective *>(this)->getTaskReductionRefExpr();
}
/// Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPSectionsDirectiveClass;
}
};
/// This represents '#pragma omp section' directive.
///
/// \code
/// #pragma omp section
/// \endcode
///
class OMPSectionDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// true if current directive has inner cancel directive.
bool HasCancel = false;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPSectionDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPSectionDirectiveClass,
llvm::omp::OMPD_section, StartLoc, EndLoc) {}
/// Build an empty directive.
///
explicit OMPSectionDirective()
: OMPExecutableDirective(OMPSectionDirectiveClass,
llvm::omp::OMPD_section, SourceLocation(),
SourceLocation()) {}
public:
/// Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param HasCancel true if current directive has inner directive.
///
static OMPSectionDirective *Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AssociatedStmt, bool HasCancel);
/// Creates an empty directive.
///
/// \param C AST context.
///
static OMPSectionDirective *CreateEmpty(const ASTContext &C, EmptyShell);
/// Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
/// Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPSectionDirectiveClass;
}
};
/// This represents '#pragma omp single' directive.
///
/// \code
/// #pragma omp single private(a,b) copyprivate(c,d)
/// \endcode
/// In this example directive '#pragma omp single' has clauses 'private' with
/// the variables 'a' and 'b' and 'copyprivate' with variables 'c' and 'd'.
///
class OMPSingleDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPSingleDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPSingleDirectiveClass, llvm::omp::OMPD_single,
StartLoc, EndLoc) {}
/// Build an empty directive.
///
explicit OMPSingleDirective()
: OMPExecutableDirective(OMPSingleDirectiveClass, llvm::omp::OMPD_single,
SourceLocation(), SourceLocation()) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPSingleDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPSingleDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPSingleDirectiveClass;
}
};
/// This represents '#pragma omp master' directive.
///
/// \code
/// #pragma omp master
/// \endcode
///
class OMPMasterDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPMasterDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPMasterDirectiveClass, llvm::omp::OMPD_master,
StartLoc, EndLoc) {}
/// Build an empty directive.
///
explicit OMPMasterDirective()
: OMPExecutableDirective(OMPMasterDirectiveClass, llvm::omp::OMPD_master,
SourceLocation(), SourceLocation()) {}
public:
/// Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPMasterDirective *Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AssociatedStmt);
/// Creates an empty directive.
///
/// \param C AST context.
///
static OMPMasterDirective *CreateEmpty(const ASTContext &C, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPMasterDirectiveClass;
}
};
/// This represents '#pragma omp critical' directive.
///
/// \code
/// #pragma omp critical
/// \endcode
///
class OMPCriticalDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Name of the directive.
DeclarationNameInfo DirName;
/// Build directive with the given start and end location.
///
/// \param Name Name of the directive.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPCriticalDirective(const DeclarationNameInfo &Name, SourceLocation StartLoc,
SourceLocation EndLoc)
: OMPExecutableDirective(OMPCriticalDirectiveClass,
llvm::omp::OMPD_critical, StartLoc, EndLoc),
DirName(Name) {}
/// Build an empty directive.
///
explicit OMPCriticalDirective()
: OMPExecutableDirective(OMPCriticalDirectiveClass,
llvm::omp::OMPD_critical, SourceLocation(),
SourceLocation()) {}
/// Set name of the directive.
///
/// \param Name Name of the directive.
///
void setDirectiveName(const DeclarationNameInfo &Name) { DirName = Name; }
public:
/// Creates directive.
///
/// \param C AST context.
/// \param Name Name of the directive.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPCriticalDirective *
Create(const ASTContext &C, const DeclarationNameInfo &Name,
SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// Creates an empty directive.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPCriticalDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
/// Return name of the directive.
///
DeclarationNameInfo getDirectiveName() const { return DirName; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPCriticalDirectiveClass;
}
};
/// This represents '#pragma omp parallel for' directive.
///
/// \code
/// #pragma omp parallel for private(a,b) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp parallel for' has clauses 'private'
/// with the variables 'a' and 'b' and 'reduction' with operator '+' and
/// variables 'c' and 'd'.
///
class OMPParallelForDirective : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// true if current region has inner cancel directive.
bool HasCancel = false;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPParallelForDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPParallelForDirectiveClass,
llvm::omp::OMPD_parallel_for, StartLoc, EndLoc,
CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPParallelForDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPParallelForDirectiveClass,
llvm::omp::OMPD_parallel_for, SourceLocation(),
SourceLocation(), CollapsedNum) {}
/// Sets special task reduction descriptor.
void setTaskReductionRefExpr(Expr *E) {
Data->getChildren()[numLoopChildren(getLoopsNumber(),
llvm::omp::OMPD_parallel_for)] = E;
}
/// Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
/// \param TaskRedRef Task reduction special reference expression to handle
/// taskgroup descriptor.
/// \param HasCancel true if current directive has inner cancel directive.
///
static OMPParallelForDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs, Expr *TaskRedRef,
bool HasCancel);
/// Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPParallelForDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
/// Returns special task reduction reference expression.
Expr *getTaskReductionRefExpr() {
return cast_or_null<Expr>(Data->getChildren()[numLoopChildren(
getLoopsNumber(), llvm::omp::OMPD_parallel_for)]);
}
const Expr *getTaskReductionRefExpr() const {
return const_cast<OMPParallelForDirective *>(this)
->getTaskReductionRefExpr();
}
/// Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPParallelForDirectiveClass;
}
};
/// This represents '#pragma omp parallel for simd' directive.
///
/// \code
/// #pragma omp parallel for simd private(a,b) linear(i,j:s) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp parallel for simd' has clauses
/// 'private' with the variables 'a' and 'b', 'linear' with variables 'i', 'j'
/// and linear step 's', 'reduction' with operator '+' and variables 'c' and
/// 'd'.
///
class OMPParallelForSimdDirective : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPParallelForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPParallelForSimdDirectiveClass,
llvm::omp::OMPD_parallel_for_simd, StartLoc, EndLoc,
CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPParallelForSimdDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPParallelForSimdDirectiveClass,
llvm::omp::OMPD_parallel_for_simd, SourceLocation(),
SourceLocation(), CollapsedNum) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPParallelForSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPParallelForSimdDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPParallelForSimdDirectiveClass;
}
};
/// This represents '#pragma omp parallel master' directive.
///
/// \code
/// #pragma omp parallel master private(a,b)
/// \endcode
/// In this example directive '#pragma omp parallel master' has clauses
/// 'private' with the variables 'a' and 'b'
///
class OMPParallelMasterDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
OMPParallelMasterDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPParallelMasterDirectiveClass,
llvm::omp::OMPD_parallel_master, StartLoc,
EndLoc) {}
explicit OMPParallelMasterDirective()
: OMPExecutableDirective(OMPParallelMasterDirectiveClass,
llvm::omp::OMPD_parallel_master,
SourceLocation(), SourceLocation()) {}
/// Sets special task reduction descriptor.
void setTaskReductionRefExpr(Expr *E) { Data->getChildren()[0] = E; }
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param TaskRedRef Task reduction special reference expression to handle
/// taskgroup descriptor.
///
static OMPParallelMasterDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *TaskRedRef);
/// Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPParallelMasterDirective *
CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell);
/// Returns special task reduction reference expression.
Expr *getTaskReductionRefExpr() {
return cast_or_null<Expr>(Data->getChildren()[0]);
}
const Expr *getTaskReductionRefExpr() const {
return const_cast<OMPParallelMasterDirective *>(this)
->getTaskReductionRefExpr();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPParallelMasterDirectiveClass;
}
};
/// This represents '#pragma omp parallel sections' directive.
///
/// \code
/// #pragma omp parallel sections private(a,b) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp parallel sections' has clauses
/// 'private' with the variables 'a' and 'b' and 'reduction' with operator '+'
/// and variables 'c' and 'd'.
///
class OMPParallelSectionsDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// true if current directive has inner cancel directive.
bool HasCancel = false;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPParallelSectionsDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPParallelSectionsDirectiveClass,
llvm::omp::OMPD_parallel_sections, StartLoc,
EndLoc) {}
/// Build an empty directive.
///
explicit OMPParallelSectionsDirective()
: OMPExecutableDirective(OMPParallelSectionsDirectiveClass,
llvm::omp::OMPD_parallel_sections,
SourceLocation(), SourceLocation()) {}
/// Sets special task reduction descriptor.
void setTaskReductionRefExpr(Expr *E) { Data->getChildren()[0] = E; }
/// Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param TaskRedRef Task reduction special reference expression to handle
/// taskgroup descriptor.
/// \param HasCancel true if current directive has inner cancel directive.
///
static OMPParallelSectionsDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *TaskRedRef,
bool HasCancel);
/// Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPParallelSectionsDirective *
CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell);
/// Returns special task reduction reference expression.
Expr *getTaskReductionRefExpr() {
return cast_or_null<Expr>(Data->getChildren()[0]);
}
const Expr *getTaskReductionRefExpr() const {
return const_cast<OMPParallelSectionsDirective *>(this)
->getTaskReductionRefExpr();
}
/// Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPParallelSectionsDirectiveClass;
}
};
/// This represents '#pragma omp task' directive.
///
/// \code
/// #pragma omp task private(a,b) final(d)
/// \endcode
/// In this example directive '#pragma omp task' has clauses 'private' with the
/// variables 'a' and 'b' and 'final' with condition 'd'.
///
class OMPTaskDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// true if this directive has inner cancel directive.
bool HasCancel = false;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPTaskDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPTaskDirectiveClass, llvm::omp::OMPD_task,
StartLoc, EndLoc) {}
/// Build an empty directive.
///
explicit OMPTaskDirective()
: OMPExecutableDirective(OMPTaskDirectiveClass, llvm::omp::OMPD_task,
SourceLocation(), SourceLocation()) {}
/// Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param HasCancel true, if current directive has inner cancel directive.
///
static OMPTaskDirective *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, bool HasCancel);
/// Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPTaskDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses,
EmptyShell);
/// Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTaskDirectiveClass;
}
};
/// This represents '#pragma omp taskyield' directive.
///
/// \code
/// #pragma omp taskyield
/// \endcode
///
class OMPTaskyieldDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPTaskyieldDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPTaskyieldDirectiveClass,
llvm::omp::OMPD_taskyield, StartLoc, EndLoc) {}
/// Build an empty directive.
///
explicit OMPTaskyieldDirective()
: OMPExecutableDirective(OMPTaskyieldDirectiveClass,
llvm::omp::OMPD_taskyield, SourceLocation(),
SourceLocation()) {}
public:
/// Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
///
static OMPTaskyieldDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc);
/// Creates an empty directive.
///
/// \param C AST context.
///
static OMPTaskyieldDirective *CreateEmpty(const ASTContext &C, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTaskyieldDirectiveClass;
}
};
/// This represents '#pragma omp barrier' directive.
///
/// \code
/// #pragma omp barrier
/// \endcode
///
class OMPBarrierDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPBarrierDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPBarrierDirectiveClass,
llvm::omp::OMPD_barrier, StartLoc, EndLoc) {}
/// Build an empty directive.
///
explicit OMPBarrierDirective()
: OMPExecutableDirective(OMPBarrierDirectiveClass,
llvm::omp::OMPD_barrier, SourceLocation(),
SourceLocation()) {}
public:
/// Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
///
static OMPBarrierDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc);
/// Creates an empty directive.
///
/// \param C AST context.
///
static OMPBarrierDirective *CreateEmpty(const ASTContext &C, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPBarrierDirectiveClass;
}
};
/// This represents '#pragma omp taskwait' directive.
///
/// \code
/// #pragma omp taskwait
/// \endcode
///
class OMPTaskwaitDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPTaskwaitDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPTaskwaitDirectiveClass,
llvm::omp::OMPD_taskwait, StartLoc, EndLoc) {}
/// Build an empty directive.
///
explicit OMPTaskwaitDirective()
: OMPExecutableDirective(OMPTaskwaitDirectiveClass,
llvm::omp::OMPD_taskwait, SourceLocation(),
SourceLocation()) {}
public:
/// Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
///
static OMPTaskwaitDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc);
/// Creates an empty directive.
///
/// \param C AST context.
///
static OMPTaskwaitDirective *CreateEmpty(const ASTContext &C, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTaskwaitDirectiveClass;
}
};
/// This represents '#pragma omp taskgroup' directive.
///
/// \code
/// #pragma omp taskgroup
/// \endcode
///
class OMPTaskgroupDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPTaskgroupDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPTaskgroupDirectiveClass,
llvm::omp::OMPD_taskgroup, StartLoc, EndLoc) {}
/// Build an empty directive.
///
explicit OMPTaskgroupDirective()
: OMPExecutableDirective(OMPTaskgroupDirectiveClass,
llvm::omp::OMPD_taskgroup, SourceLocation(),
SourceLocation()) {}
/// Sets the task_reduction return variable.
void setReductionRef(Expr *RR) { Data->getChildren()[0] = RR; }
public:
/// Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param ReductionRef Reference to the task_reduction return variable.
///
static OMPTaskgroupDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
Expr *ReductionRef);
/// Creates an empty directive.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPTaskgroupDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
/// Returns reference to the task_reduction return variable.
const Expr *getReductionRef() const {
return const_cast<OMPTaskgroupDirective *>(this)->getReductionRef();
}
Expr *getReductionRef() { return cast_or_null<Expr>(Data->getChildren()[0]); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTaskgroupDirectiveClass;
}
};
/// This represents '#pragma omp flush' directive.
///
/// \code
/// #pragma omp flush(a,b)
/// \endcode
/// In this example directive '#pragma omp flush' has 2 arguments- variables 'a'
/// and 'b'.
/// 'omp flush' directive does not have clauses but have an optional list of
/// variables to flush. This list of variables is stored within some fake clause
/// FlushClause.
class OMPFlushDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPFlushDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPFlushDirectiveClass, llvm::omp::OMPD_flush,
StartLoc, EndLoc) {}
/// Build an empty directive.
///
explicit OMPFlushDirective()
: OMPExecutableDirective(OMPFlushDirectiveClass, llvm::omp::OMPD_flush,
SourceLocation(), SourceLocation()) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses (only single OMPFlushClause clause is
/// allowed).
///
static OMPFlushDirective *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses);
/// Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPFlushDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPFlushDirectiveClass;
}
};
/// This represents '#pragma omp depobj' directive.
///
/// \code
/// #pragma omp depobj(a) depend(in:x,y)
/// \endcode
/// In this example directive '#pragma omp depobj' initializes a depobj object
/// 'a' with dependence type 'in' and a list with 'x' and 'y' locators.
class OMPDepobjDirective final : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPDepobjDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPDepobjDirectiveClass, llvm::omp::OMPD_depobj,
StartLoc, EndLoc) {}
/// Build an empty directive.
///
explicit OMPDepobjDirective()
: OMPExecutableDirective(OMPDepobjDirectiveClass, llvm::omp::OMPD_depobj,
SourceLocation(), SourceLocation()) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
///
static OMPDepobjDirective *Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses);
/// Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPDepobjDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPDepobjDirectiveClass;
}
};
/// This represents '#pragma omp ordered' directive.
///
/// \code
/// #pragma omp ordered
/// \endcode
///
class OMPOrderedDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPOrderedDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPOrderedDirectiveClass,
llvm::omp::OMPD_ordered, StartLoc, EndLoc) {}
/// Build an empty directive.
///
explicit OMPOrderedDirective()
: OMPExecutableDirective(OMPOrderedDirectiveClass,
llvm::omp::OMPD_ordered, SourceLocation(),
SourceLocation()) {}
public:
/// Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPOrderedDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// Creates an empty directive.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
/// \param IsStandalone true, if the the standalone directive is created.
///
static OMPOrderedDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
bool IsStandalone, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPOrderedDirectiveClass;
}
};
/// This represents '#pragma omp atomic' directive.
///
/// \code
/// #pragma omp atomic capture
/// \endcode
/// In this example directive '#pragma omp atomic' has clause 'capture'.
///
class OMPAtomicDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Used for 'atomic update' or 'atomic capture' constructs. They may
/// have atomic expressions of forms
/// \code
/// x = x binop expr;
/// x = expr binop x;
/// \endcode
/// This field is true for the first form of the expression and false for the
/// second. Required for correct codegen of non-associative operations (like
/// << or >>).
bool IsXLHSInRHSPart = false;
/// Used for 'atomic update' or 'atomic capture' constructs. They may
/// have atomic expressions of forms
/// \code
/// v = x; <update x>;
/// <update x>; v = x;
/// \endcode
/// This field is true for the first(postfix) form of the expression and false
/// otherwise.
bool IsPostfixUpdate = false;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPAtomicDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPAtomicDirectiveClass, llvm::omp::OMPD_atomic,
StartLoc, EndLoc) {}
/// Build an empty directive.
///
explicit OMPAtomicDirective()
: OMPExecutableDirective(OMPAtomicDirectiveClass, llvm::omp::OMPD_atomic,
SourceLocation(), SourceLocation()) {}
/// Set 'x' part of the associated expression/statement.
void setX(Expr *X) { Data->getChildren()[0] = X; }
/// Set helper expression of the form
/// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or
/// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'.
void setUpdateExpr(Expr *UE) { Data->getChildren()[1] = UE; }
/// Set 'v' part of the associated expression/statement.
void setV(Expr *V) { Data->getChildren()[2] = V; }
/// Set 'expr' part of the associated expression/statement.
void setExpr(Expr *E) { Data->getChildren()[3] = E; }
public:
/// Creates directive with a list of \a Clauses and 'x', 'v' and 'expr'
/// parts of the atomic construct (see Section 2.12.6, atomic Construct, for
/// detailed description of 'x', 'v' and 'expr').
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param X 'x' part of the associated expression/statement.
/// \param V 'v' part of the associated expression/statement.
/// \param E 'expr' part of the associated expression/statement.
/// \param UE Helper expression of the form
/// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or
/// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'.
/// \param IsXLHSInRHSPart true if \a UE has the first form and false if the
/// second.
/// \param IsPostfixUpdate true if original value of 'x' must be stored in
/// 'v', not an updated one.
static OMPAtomicDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *X, Expr *V,
Expr *E, Expr *UE, bool IsXLHSInRHSPart, bool IsPostfixUpdate);
/// Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPAtomicDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
/// Get 'x' part of the associated expression/statement.
Expr *getX() { return cast_or_null<Expr>(Data->getChildren()[0]); }
const Expr *getX() const {
return cast_or_null<Expr>(Data->getChildren()[0]);
}
/// Get helper expression of the form
/// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or
/// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'.
Expr *getUpdateExpr() { return cast_or_null<Expr>(Data->getChildren()[1]); }
const Expr *getUpdateExpr() const {
return cast_or_null<Expr>(Data->getChildren()[1]);
}
/// Return true if helper update expression has form
/// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' and false if it has form
/// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'.
bool isXLHSInRHSPart() const { return IsXLHSInRHSPart; }
/// Return true if 'v' expression must be updated to original value of
/// 'x', false if 'v' must be updated to the new value of 'x'.
bool isPostfixUpdate() const { return IsPostfixUpdate; }
/// Get 'v' part of the associated expression/statement.
Expr *getV() { return cast_or_null<Expr>(Data->getChildren()[2]); }
const Expr *getV() const {
return cast_or_null<Expr>(Data->getChildren()[2]);
}
/// Get 'expr' part of the associated expression/statement.
Expr *getExpr() { return cast_or_null<Expr>(Data->getChildren()[3]); }
const Expr *getExpr() const {
return cast_or_null<Expr>(Data->getChildren()[3]);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPAtomicDirectiveClass;
}
};
/// This represents '#pragma omp target' directive.
///
/// \code
/// #pragma omp target if(a)
/// \endcode
/// In this example directive '#pragma omp target' has clause 'if' with
/// condition 'a'.
///
class OMPTargetDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPTargetDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPTargetDirectiveClass, llvm::omp::OMPD_target,
StartLoc, EndLoc) {}
/// Build an empty directive.
///
explicit OMPTargetDirective()
: OMPExecutableDirective(OMPTargetDirectiveClass, llvm::omp::OMPD_target,
SourceLocation(), SourceLocation()) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPTargetDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPTargetDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetDirectiveClass;
}
};
/// This represents '#pragma omp target data' directive.
///
/// \code
/// #pragma omp target data device(0) if(a) map(b[:])
/// \endcode
/// In this example directive '#pragma omp target data' has clauses 'device'
/// with the value '0', 'if' with condition 'a' and 'map' with array
/// section 'b[:]'.
///
class OMPTargetDataDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
///
OMPTargetDataDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPTargetDataDirectiveClass,
llvm::omp::OMPD_target_data, StartLoc, EndLoc) {}
/// Build an empty directive.
///
explicit OMPTargetDataDirective()
: OMPExecutableDirective(OMPTargetDataDirectiveClass,
llvm::omp::OMPD_target_data, SourceLocation(),
SourceLocation()) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPTargetDataDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// Creates an empty directive with the place for \a N clauses.
///
/// \param C AST context.
/// \param N The number of clauses.
///
static OMPTargetDataDirective *CreateEmpty(const ASTContext &C, unsigned N,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetDataDirectiveClass;
}
};
/// This represents '#pragma omp target enter data' directive.
///
/// \code
/// #pragma omp target enter data device(0) if(a) map(b[:])
/// \endcode
/// In this example directive '#pragma omp target enter data' has clauses
/// 'device' with the value '0', 'if' with condition 'a' and 'map' with array
/// section 'b[:]'.
///
class OMPTargetEnterDataDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
///
OMPTargetEnterDataDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPTargetEnterDataDirectiveClass,
llvm::omp::OMPD_target_enter_data, StartLoc,
EndLoc) {}
/// Build an empty directive.
///
explicit OMPTargetEnterDataDirective()
: OMPExecutableDirective(OMPTargetEnterDataDirectiveClass,
llvm::omp::OMPD_target_enter_data,
SourceLocation(), SourceLocation()) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPTargetEnterDataDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// Creates an empty directive with the place for \a N clauses.
///
/// \param C AST context.
/// \param N The number of clauses.
///
static OMPTargetEnterDataDirective *CreateEmpty(const ASTContext &C,
unsigned N, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetEnterDataDirectiveClass;
}
};
/// This represents '#pragma omp target exit data' directive.
///
/// \code
/// #pragma omp target exit data device(0) if(a) map(b[:])
/// \endcode
/// In this example directive '#pragma omp target exit data' has clauses
/// 'device' with the value '0', 'if' with condition 'a' and 'map' with array
/// section 'b[:]'.
///
class OMPTargetExitDataDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
///
OMPTargetExitDataDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPTargetExitDataDirectiveClass,
llvm::omp::OMPD_target_exit_data, StartLoc,
EndLoc) {}
/// Build an empty directive.
///
explicit OMPTargetExitDataDirective()
: OMPExecutableDirective(OMPTargetExitDataDirectiveClass,
llvm::omp::OMPD_target_exit_data,
SourceLocation(), SourceLocation()) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPTargetExitDataDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// Creates an empty directive with the place for \a N clauses.
///
/// \param C AST context.
/// \param N The number of clauses.
///
static OMPTargetExitDataDirective *CreateEmpty(const ASTContext &C,
unsigned N, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetExitDataDirectiveClass;
}
};
/// This represents '#pragma omp target parallel' directive.
///
/// \code
/// #pragma omp target parallel if(a)
/// \endcode
/// In this example directive '#pragma omp target parallel' has clause 'if' with
/// condition 'a'.
///
class OMPTargetParallelDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// true if the construct has inner cancel directive.
bool HasCancel = false;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPTargetParallelDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPTargetParallelDirectiveClass,
llvm::omp::OMPD_target_parallel, StartLoc,
EndLoc) {}
/// Build an empty directive.
///
explicit OMPTargetParallelDirective()
: OMPExecutableDirective(OMPTargetParallelDirectiveClass,
llvm::omp::OMPD_target_parallel,
SourceLocation(), SourceLocation()) {}
/// Sets special task reduction descriptor.
void setTaskReductionRefExpr(Expr *E) { Data->getChildren()[0] = E; }
/// Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param TaskRedRef Task reduction special reference expression to handle
/// taskgroup descriptor.
/// \param HasCancel true if this directive has inner cancel directive.
///
static OMPTargetParallelDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *TaskRedRef,
bool HasCancel);
/// Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPTargetParallelDirective *
CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell);
/// Returns special task reduction reference expression.
Expr *getTaskReductionRefExpr() {
return cast_or_null<Expr>(Data->getChildren()[0]);
}
const Expr *getTaskReductionRefExpr() const {
return const_cast<OMPTargetParallelDirective *>(this)
->getTaskReductionRefExpr();
}
/// Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetParallelDirectiveClass;
}
};
/// This represents '#pragma omp target parallel for' directive.
///
/// \code
/// #pragma omp target parallel for private(a,b) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp target parallel for' has clauses
/// 'private' with the variables 'a' and 'b' and 'reduction' with operator '+'
/// and variables 'c' and 'd'.
///
class OMPTargetParallelForDirective : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// true if current region has inner cancel directive.
bool HasCancel = false;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPTargetParallelForDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPTargetParallelForDirectiveClass,
llvm::omp::OMPD_target_parallel_for, StartLoc, EndLoc,
CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPTargetParallelForDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPTargetParallelForDirectiveClass,
llvm::omp::OMPD_target_parallel_for, SourceLocation(),
SourceLocation(), CollapsedNum) {}
/// Sets special task reduction descriptor.
void setTaskReductionRefExpr(Expr *E) {
Data->getChildren()[numLoopChildren(
getLoopsNumber(), llvm::omp::OMPD_target_parallel_for)] = E;
}
/// Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
/// \param TaskRedRef Task reduction special reference expression to handle
/// taskgroup descriptor.
/// \param HasCancel true if current directive has inner cancel directive.
///
static OMPTargetParallelForDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs, Expr *TaskRedRef,
bool HasCancel);
/// Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTargetParallelForDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
/// Returns special task reduction reference expression.
Expr *getTaskReductionRefExpr() {
return cast_or_null<Expr>(Data->getChildren()[numLoopChildren(
getLoopsNumber(), llvm::omp::OMPD_target_parallel_for)]);
}
const Expr *getTaskReductionRefExpr() const {
return const_cast<OMPTargetParallelForDirective *>(this)
->getTaskReductionRefExpr();
}
/// Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetParallelForDirectiveClass;
}
};
/// This represents '#pragma omp teams' directive.
///
/// \code
/// #pragma omp teams if(a)
/// \endcode
/// In this example directive '#pragma omp teams' has clause 'if' with
/// condition 'a'.
///
class OMPTeamsDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPTeamsDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPTeamsDirectiveClass, llvm::omp::OMPD_teams,
StartLoc, EndLoc) {}
/// Build an empty directive.
///
explicit OMPTeamsDirective()
: OMPExecutableDirective(OMPTeamsDirectiveClass, llvm::omp::OMPD_teams,
SourceLocation(), SourceLocation()) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPTeamsDirective *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt);
/// Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPTeamsDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTeamsDirectiveClass;
}
};
/// This represents '#pragma omp cancellation point' directive.
///
/// \code
/// #pragma omp cancellation point for
/// \endcode
///
/// In this example a cancellation point is created for innermost 'for' region.
class OMPCancellationPointDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
OpenMPDirectiveKind CancelRegion = llvm::omp::OMPD_unknown;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// statements and child expressions.
///
OMPCancellationPointDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPCancellationPointDirectiveClass,
llvm::omp::OMPD_cancellation_point, StartLoc,
EndLoc) {}
/// Build an empty directive.
explicit OMPCancellationPointDirective()
: OMPExecutableDirective(OMPCancellationPointDirectiveClass,
llvm::omp::OMPD_cancellation_point,
SourceLocation(), SourceLocation()) {}
/// Set cancel region for current cancellation point.
/// \param CR Cancellation region.
void setCancelRegion(OpenMPDirectiveKind CR) { CancelRegion = CR; }
public:
/// Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
///
static OMPCancellationPointDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// Creates an empty directive.
///
/// \param C AST context.
///
static OMPCancellationPointDirective *CreateEmpty(const ASTContext &C,
EmptyShell);
/// Get cancellation region for the current cancellation point.
OpenMPDirectiveKind getCancelRegion() const { return CancelRegion; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPCancellationPointDirectiveClass;
}
};
/// This represents '#pragma omp cancel' directive.
///
/// \code
/// #pragma omp cancel for
/// \endcode
///
/// In this example a cancel is created for innermost 'for' region.
class OMPCancelDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
OpenMPDirectiveKind CancelRegion = llvm::omp::OMPD_unknown;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPCancelDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPCancelDirectiveClass, llvm::omp::OMPD_cancel,
StartLoc, EndLoc) {}
/// Build an empty directive.
///
explicit OMPCancelDirective()
: OMPExecutableDirective(OMPCancelDirectiveClass, llvm::omp::OMPD_cancel,
SourceLocation(), SourceLocation()) {}
/// Set cancel region for current cancellation point.
/// \param CR Cancellation region.
void setCancelRegion(OpenMPDirectiveKind CR) { CancelRegion = CR; }
public:
/// Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
///
static OMPCancelDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, OpenMPDirectiveKind CancelRegion);
/// Creates an empty directive.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPCancelDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
/// Get cancellation region for the current cancellation point.
OpenMPDirectiveKind getCancelRegion() const { return CancelRegion; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPCancelDirectiveClass;
}
};
/// This represents '#pragma omp taskloop' directive.
///
/// \code
/// #pragma omp taskloop private(a,b) grainsize(val) num_tasks(num)
/// \endcode
/// In this example directive '#pragma omp taskloop' has clauses 'private'
/// with the variables 'a' and 'b', 'grainsize' with expression 'val' and
/// 'num_tasks' with expression 'num'.
///
class OMPTaskLoopDirective : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// true if the construct has inner cancel directive.
bool HasCancel = false;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPTaskLoopDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPTaskLoopDirectiveClass, llvm::omp::OMPD_taskloop,
StartLoc, EndLoc, CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPTaskLoopDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPTaskLoopDirectiveClass, llvm::omp::OMPD_taskloop,
SourceLocation(), SourceLocation(), CollapsedNum) {}
/// Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
/// \param HasCancel true if this directive has inner cancel directive.
///
static OMPTaskLoopDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel);
/// Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTaskLoopDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum, EmptyShell);
/// Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTaskLoopDirectiveClass;
}
};
/// This represents '#pragma omp taskloop simd' directive.
///
/// \code
/// #pragma omp taskloop simd private(a,b) grainsize(val) num_tasks(num)
/// \endcode
/// In this example directive '#pragma omp taskloop simd' has clauses 'private'
/// with the variables 'a' and 'b', 'grainsize' with expression 'val' and
/// 'num_tasks' with expression 'num'.
///
class OMPTaskLoopSimdDirective : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPTaskLoopSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPTaskLoopSimdDirectiveClass,
llvm::omp::OMPD_taskloop_simd, StartLoc, EndLoc,
CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPTaskLoopSimdDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPTaskLoopSimdDirectiveClass,
llvm::omp::OMPD_taskloop_simd, SourceLocation(),
SourceLocation(), CollapsedNum) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPTaskLoopSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTaskLoopSimdDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTaskLoopSimdDirectiveClass;
}
};
/// This represents '#pragma omp master taskloop' directive.
///
/// \code
/// #pragma omp master taskloop private(a,b) grainsize(val) num_tasks(num)
/// \endcode
/// In this example directive '#pragma omp master taskloop' has clauses
/// 'private' with the variables 'a' and 'b', 'grainsize' with expression 'val'
/// and 'num_tasks' with expression 'num'.
///
class OMPMasterTaskLoopDirective : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// true if the construct has inner cancel directive.
bool HasCancel = false;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPMasterTaskLoopDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPMasterTaskLoopDirectiveClass,
llvm::omp::OMPD_master_taskloop, StartLoc, EndLoc,
CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPMasterTaskLoopDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPMasterTaskLoopDirectiveClass,
llvm::omp::OMPD_master_taskloop, SourceLocation(),
SourceLocation(), CollapsedNum) {}
/// Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
/// \param HasCancel true if this directive has inner cancel directive.
///
static OMPMasterTaskLoopDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel);
/// Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPMasterTaskLoopDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
/// Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPMasterTaskLoopDirectiveClass;
}
};
/// This represents '#pragma omp master taskloop simd' directive.
///
/// \code
/// #pragma omp master taskloop simd private(a,b) grainsize(val) num_tasks(num)
/// \endcode
/// In this example directive '#pragma omp master taskloop simd' has clauses
/// 'private' with the variables 'a' and 'b', 'grainsize' with expression 'val'
/// and 'num_tasks' with expression 'num'.
///
class OMPMasterTaskLoopSimdDirective : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPMasterTaskLoopSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPMasterTaskLoopSimdDirectiveClass,
llvm::omp::OMPD_master_taskloop_simd, StartLoc, EndLoc,
CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPMasterTaskLoopSimdDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPMasterTaskLoopSimdDirectiveClass,
llvm::omp::OMPD_master_taskloop_simd, SourceLocation(),
SourceLocation(), CollapsedNum) {}
public:
/// Creates directive with a list of \p Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPMasterTaskLoopSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place for \p NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPMasterTaskLoopSimdDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPMasterTaskLoopSimdDirectiveClass;
}
};
/// This represents '#pragma omp parallel master taskloop' directive.
///
/// \code
/// #pragma omp parallel master taskloop private(a,b) grainsize(val)
/// num_tasks(num)
/// \endcode
/// In this example directive '#pragma omp parallel master taskloop' has clauses
/// 'private' with the variables 'a' and 'b', 'grainsize' with expression 'val'
/// and 'num_tasks' with expression 'num'.
///
class OMPParallelMasterTaskLoopDirective : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// true if the construct has inner cancel directive.
bool HasCancel = false;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPParallelMasterTaskLoopDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPParallelMasterTaskLoopDirectiveClass,
llvm::omp::OMPD_parallel_master_taskloop, StartLoc,
EndLoc, CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPParallelMasterTaskLoopDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPParallelMasterTaskLoopDirectiveClass,
llvm::omp::OMPD_parallel_master_taskloop,
SourceLocation(), SourceLocation(), CollapsedNum) {}
/// Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
/// \param HasCancel true if this directive has inner cancel directive.
///
static OMPParallelMasterTaskLoopDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel);
/// Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPParallelMasterTaskLoopDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
/// Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPParallelMasterTaskLoopDirectiveClass;
}
};
/// This represents '#pragma omp parallel master taskloop simd' directive.
///
/// \code
/// #pragma omp parallel master taskloop simd private(a,b) grainsize(val)
/// num_tasks(num)
/// \endcode
/// In this example directive '#pragma omp parallel master taskloop simd' has
/// clauses 'private' with the variables 'a' and 'b', 'grainsize' with
/// expression 'val' and 'num_tasks' with expression 'num'.
///
class OMPParallelMasterTaskLoopSimdDirective : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPParallelMasterTaskLoopSimdDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPParallelMasterTaskLoopSimdDirectiveClass,
llvm::omp::OMPD_parallel_master_taskloop_simd,
StartLoc, EndLoc, CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPParallelMasterTaskLoopSimdDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPParallelMasterTaskLoopSimdDirectiveClass,
llvm::omp::OMPD_parallel_master_taskloop_simd,
SourceLocation(), SourceLocation(), CollapsedNum) {}
public:
/// Creates directive with a list of \p Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPParallelMasterTaskLoopSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPParallelMasterTaskLoopSimdDirective *
CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPParallelMasterTaskLoopSimdDirectiveClass;
}
};
/// This represents '#pragma omp distribute' directive.
///
/// \code
/// #pragma omp distribute private(a,b)
/// \endcode
/// In this example directive '#pragma omp distribute' has clauses 'private'
/// with the variables 'a' and 'b'
///
class OMPDistributeDirective : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPDistributeDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPDistributeDirectiveClass,
llvm::omp::OMPD_distribute, StartLoc, EndLoc,
CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPDistributeDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPDistributeDirectiveClass,
llvm::omp::OMPD_distribute, SourceLocation(),
SourceLocation(), CollapsedNum) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPDistributeDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPDistributeDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPDistributeDirectiveClass;
}
};
/// This represents '#pragma omp target update' directive.
///
/// \code
/// #pragma omp target update to(a) from(b) device(1)
/// \endcode
/// In this example directive '#pragma omp target update' has clause 'to' with
/// argument 'a', clause 'from' with argument 'b' and clause 'device' with
/// argument '1'.
///
class OMPTargetUpdateDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
///
OMPTargetUpdateDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPTargetUpdateDirectiveClass,
llvm::omp::OMPD_target_update, StartLoc,
EndLoc) {}
/// Build an empty directive.
///
explicit OMPTargetUpdateDirective()
: OMPExecutableDirective(OMPTargetUpdateDirectiveClass,
llvm::omp::OMPD_target_update, SourceLocation(),
SourceLocation()) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPTargetUpdateDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses The number of clauses.
///
static OMPTargetUpdateDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetUpdateDirectiveClass;
}
};
/// This represents '#pragma omp distribute parallel for' composite
/// directive.
///
/// \code
/// #pragma omp distribute parallel for private(a,b)
/// \endcode
/// In this example directive '#pragma omp distribute parallel for' has clause
/// 'private' with the variables 'a' and 'b'
///
class OMPDistributeParallelForDirective : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// true if the construct has inner cancel directive.
bool HasCancel = false;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPDistributeParallelForDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPDistributeParallelForDirectiveClass,
llvm::omp::OMPD_distribute_parallel_for, StartLoc,
EndLoc, CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPDistributeParallelForDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPDistributeParallelForDirectiveClass,
llvm::omp::OMPD_distribute_parallel_for,
SourceLocation(), SourceLocation(), CollapsedNum) {}
/// Sets special task reduction descriptor.
void setTaskReductionRefExpr(Expr *E) {
Data->getChildren()[numLoopChildren(
getLoopsNumber(), llvm::omp::OMPD_distribute_parallel_for)] = E;
}
/// Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
/// \param TaskRedRef Task reduction special reference expression to handle
/// taskgroup descriptor.
/// \param HasCancel true if this directive has inner cancel directive.
///
static OMPDistributeParallelForDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs, Expr *TaskRedRef,
bool HasCancel);
/// Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPDistributeParallelForDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
/// Returns special task reduction reference expression.
Expr *getTaskReductionRefExpr() {
return cast_or_null<Expr>(Data->getChildren()[numLoopChildren(
getLoopsNumber(), llvm::omp::OMPD_distribute_parallel_for)]);
}
const Expr *getTaskReductionRefExpr() const {
return const_cast<OMPDistributeParallelForDirective *>(this)
->getTaskReductionRefExpr();
}
/// Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPDistributeParallelForDirectiveClass;
}
};
/// This represents '#pragma omp distribute parallel for simd' composite
/// directive.
///
/// \code
/// #pragma omp distribute parallel for simd private(x)
/// \endcode
/// In this example directive '#pragma omp distribute parallel for simd' has
/// clause 'private' with the variables 'x'
///
class OMPDistributeParallelForSimdDirective final : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPDistributeParallelForSimdDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPDistributeParallelForSimdDirectiveClass,
llvm::omp::OMPD_distribute_parallel_for_simd, StartLoc,
EndLoc, CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPDistributeParallelForSimdDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPDistributeParallelForSimdDirectiveClass,
llvm::omp::OMPD_distribute_parallel_for_simd,
SourceLocation(), SourceLocation(), CollapsedNum) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPDistributeParallelForSimdDirective *Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPDistributeParallelForSimdDirective *CreateEmpty(
const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPDistributeParallelForSimdDirectiveClass;
}
};
/// This represents '#pragma omp distribute simd' composite directive.
///
/// \code
/// #pragma omp distribute simd private(x)
/// \endcode
/// In this example directive '#pragma omp distribute simd' has clause
/// 'private' with the variables 'x'
///
class OMPDistributeSimdDirective final : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPDistributeSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPDistributeSimdDirectiveClass,
llvm::omp::OMPD_distribute_simd, StartLoc, EndLoc,
CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPDistributeSimdDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPDistributeSimdDirectiveClass,
llvm::omp::OMPD_distribute_simd, SourceLocation(),
SourceLocation(), CollapsedNum) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPDistributeSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPDistributeSimdDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPDistributeSimdDirectiveClass;
}
};
/// This represents '#pragma omp target parallel for simd' directive.
///
/// \code
/// #pragma omp target parallel for simd private(a) map(b) safelen(c)
/// \endcode
/// In this example directive '#pragma omp target parallel for simd' has clauses
/// 'private' with the variable 'a', 'map' with the variable 'b' and 'safelen'
/// with the variable 'c'.
///
class OMPTargetParallelForSimdDirective final : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPTargetParallelForSimdDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPTargetParallelForSimdDirectiveClass,
llvm::omp::OMPD_target_parallel_for_simd, StartLoc,
EndLoc, CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPTargetParallelForSimdDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPTargetParallelForSimdDirectiveClass,
llvm::omp::OMPD_target_parallel_for_simd,
SourceLocation(), SourceLocation(), CollapsedNum) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPTargetParallelForSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTargetParallelForSimdDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetParallelForSimdDirectiveClass;
}
};
/// This represents '#pragma omp target simd' directive.
///
/// \code
/// #pragma omp target simd private(a) map(b) safelen(c)
/// \endcode
/// In this example directive '#pragma omp target simd' has clauses 'private'
/// with the variable 'a', 'map' with the variable 'b' and 'safelen' with
/// the variable 'c'.
///
class OMPTargetSimdDirective final : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPTargetSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPTargetSimdDirectiveClass,
llvm::omp::OMPD_target_simd, StartLoc, EndLoc,
CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPTargetSimdDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPTargetSimdDirectiveClass,
llvm::omp::OMPD_target_simd, SourceLocation(),
SourceLocation(), CollapsedNum) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPTargetSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTargetSimdDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetSimdDirectiveClass;
}
};
/// This represents '#pragma omp teams distribute' directive.
///
/// \code
/// #pragma omp teams distribute private(a,b)
/// \endcode
/// In this example directive '#pragma omp teams distribute' has clauses
/// 'private' with the variables 'a' and 'b'
///
class OMPTeamsDistributeDirective final : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPTeamsDistributeDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPTeamsDistributeDirectiveClass,
llvm::omp::OMPD_teams_distribute, StartLoc, EndLoc,
CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPTeamsDistributeDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPTeamsDistributeDirectiveClass,
llvm::omp::OMPD_teams_distribute, SourceLocation(),
SourceLocation(), CollapsedNum) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPTeamsDistributeDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTeamsDistributeDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTeamsDistributeDirectiveClass;
}
};
/// This represents '#pragma omp teams distribute simd'
/// combined directive.
///
/// \code
/// #pragma omp teams distribute simd private(a,b)
/// \endcode
/// In this example directive '#pragma omp teams distribute simd'
/// has clause 'private' with the variables 'a' and 'b'
///
class OMPTeamsDistributeSimdDirective final : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPTeamsDistributeSimdDirective(SourceLocation StartLoc,
SourceLocation EndLoc, unsigned CollapsedNum)
: OMPLoopDirective(OMPTeamsDistributeSimdDirectiveClass,
llvm::omp::OMPD_teams_distribute_simd, StartLoc,
EndLoc, CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPTeamsDistributeSimdDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPTeamsDistributeSimdDirectiveClass,
llvm::omp::OMPD_teams_distribute_simd,
SourceLocation(), SourceLocation(), CollapsedNum) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPTeamsDistributeSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTeamsDistributeSimdDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTeamsDistributeSimdDirectiveClass;
}
};
/// This represents '#pragma omp teams distribute parallel for simd' composite
/// directive.
///
/// \code
/// #pragma omp teams distribute parallel for simd private(x)
/// \endcode
/// In this example directive '#pragma omp teams distribute parallel for simd'
/// has clause 'private' with the variables 'x'
///
class OMPTeamsDistributeParallelForSimdDirective final
: public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPTeamsDistributeParallelForSimdDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPTeamsDistributeParallelForSimdDirectiveClass,
llvm::omp::OMPD_teams_distribute_parallel_for_simd,
StartLoc, EndLoc, CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPTeamsDistributeParallelForSimdDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPTeamsDistributeParallelForSimdDirectiveClass,
llvm::omp::OMPD_teams_distribute_parallel_for_simd,
SourceLocation(), SourceLocation(), CollapsedNum) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPTeamsDistributeParallelForSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTeamsDistributeParallelForSimdDirective *
CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTeamsDistributeParallelForSimdDirectiveClass;
}
};
/// This represents '#pragma omp teams distribute parallel for' composite
/// directive.
///
/// \code
/// #pragma omp teams distribute parallel for private(x)
/// \endcode
/// In this example directive '#pragma omp teams distribute parallel for'
/// has clause 'private' with the variables 'x'
///
class OMPTeamsDistributeParallelForDirective final : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// true if the construct has inner cancel directive.
bool HasCancel = false;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPTeamsDistributeParallelForDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPTeamsDistributeParallelForDirectiveClass,
llvm::omp::OMPD_teams_distribute_parallel_for,
StartLoc, EndLoc, CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPTeamsDistributeParallelForDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPTeamsDistributeParallelForDirectiveClass,
llvm::omp::OMPD_teams_distribute_parallel_for,
SourceLocation(), SourceLocation(), CollapsedNum) {}
/// Sets special task reduction descriptor.
void setTaskReductionRefExpr(Expr *E) {
Data->getChildren()[numLoopChildren(
getLoopsNumber(), llvm::omp::OMPD_teams_distribute_parallel_for)] = E;
}
/// Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
/// \param TaskRedRef Task reduction special reference expression to handle
/// taskgroup descriptor.
/// \param HasCancel true if this directive has inner cancel directive.
///
static OMPTeamsDistributeParallelForDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs, Expr *TaskRedRef,
bool HasCancel);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTeamsDistributeParallelForDirective *
CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum,
EmptyShell);
/// Returns special task reduction reference expression.
Expr *getTaskReductionRefExpr() {
return cast_or_null<Expr>(Data->getChildren()[numLoopChildren(
getLoopsNumber(), llvm::omp::OMPD_teams_distribute_parallel_for)]);
}
const Expr *getTaskReductionRefExpr() const {
return const_cast<OMPTeamsDistributeParallelForDirective *>(this)
->getTaskReductionRefExpr();
}
/// Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTeamsDistributeParallelForDirectiveClass;
}
};
/// This represents '#pragma omp target teams' directive.
///
/// \code
/// #pragma omp target teams if(a>0)
/// \endcode
/// In this example directive '#pragma omp target teams' has clause 'if' with
/// condition 'a>0'.
///
class OMPTargetTeamsDirective final : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPTargetTeamsDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPTargetTeamsDirectiveClass,
llvm::omp::OMPD_target_teams, StartLoc, EndLoc) {
}
/// Build an empty directive.
///
explicit OMPTargetTeamsDirective()
: OMPExecutableDirective(OMPTargetTeamsDirectiveClass,
llvm::omp::OMPD_target_teams, SourceLocation(),
SourceLocation()) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPTargetTeamsDirective *Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPTargetTeamsDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetTeamsDirectiveClass;
}
};
/// This represents '#pragma omp target teams distribute' combined directive.
///
/// \code
/// #pragma omp target teams distribute private(x)
/// \endcode
/// In this example directive '#pragma omp target teams distribute' has clause
/// 'private' with the variables 'x'
///
class OMPTargetTeamsDistributeDirective final : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPTargetTeamsDistributeDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPTargetTeamsDistributeDirectiveClass,
llvm::omp::OMPD_target_teams_distribute, StartLoc,
EndLoc, CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPTargetTeamsDistributeDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPTargetTeamsDistributeDirectiveClass,
llvm::omp::OMPD_target_teams_distribute,
SourceLocation(), SourceLocation(), CollapsedNum) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPTargetTeamsDistributeDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTargetTeamsDistributeDirective *
CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetTeamsDistributeDirectiveClass;
}
};
/// This represents '#pragma omp target teams distribute parallel for' combined
/// directive.
///
/// \code
/// #pragma omp target teams distribute parallel for private(x)
/// \endcode
/// In this example directive '#pragma omp target teams distribute parallel
/// for' has clause 'private' with the variables 'x'
///
class OMPTargetTeamsDistributeParallelForDirective final
: public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// true if the construct has inner cancel directive.
bool HasCancel = false;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPTargetTeamsDistributeParallelForDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPTargetTeamsDistributeParallelForDirectiveClass,
llvm::omp::OMPD_target_teams_distribute_parallel_for,
StartLoc, EndLoc, CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPTargetTeamsDistributeParallelForDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPTargetTeamsDistributeParallelForDirectiveClass,
llvm::omp::OMPD_target_teams_distribute_parallel_for,
SourceLocation(), SourceLocation(), CollapsedNum) {}
/// Sets special task reduction descriptor.
void setTaskReductionRefExpr(Expr *E) {
Data->getChildren()[numLoopChildren(
getLoopsNumber(),
llvm::omp::OMPD_target_teams_distribute_parallel_for)] = E;
}
/// Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
/// \param TaskRedRef Task reduction special reference expression to handle
/// taskgroup descriptor.
/// \param HasCancel true if this directive has inner cancel directive.
///
static OMPTargetTeamsDistributeParallelForDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs, Expr *TaskRedRef,
bool HasCancel);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTargetTeamsDistributeParallelForDirective *
CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum,
EmptyShell);
/// Returns special task reduction reference expression.
Expr *getTaskReductionRefExpr() {
return cast_or_null<Expr>(Data->getChildren()[numLoopChildren(
getLoopsNumber(),
llvm::omp::OMPD_target_teams_distribute_parallel_for)]);
}
const Expr *getTaskReductionRefExpr() const {
return const_cast<OMPTargetTeamsDistributeParallelForDirective *>(this)
->getTaskReductionRefExpr();
}
/// Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() ==
OMPTargetTeamsDistributeParallelForDirectiveClass;
}
};
/// This represents '#pragma omp target teams distribute parallel for simd'
/// combined directive.
///
/// \code
/// #pragma omp target teams distribute parallel for simd private(x)
/// \endcode
/// In this example directive '#pragma omp target teams distribute parallel
/// for simd' has clause 'private' with the variables 'x'
///
class OMPTargetTeamsDistributeParallelForSimdDirective final
: public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPTargetTeamsDistributeParallelForSimdDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(
OMPTargetTeamsDistributeParallelForSimdDirectiveClass,
llvm::omp::OMPD_target_teams_distribute_parallel_for_simd, StartLoc,
EndLoc, CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPTargetTeamsDistributeParallelForSimdDirective(
unsigned CollapsedNum)
: OMPLoopDirective(
OMPTargetTeamsDistributeParallelForSimdDirectiveClass,
llvm::omp::OMPD_target_teams_distribute_parallel_for_simd,
SourceLocation(), SourceLocation(), CollapsedNum) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPTargetTeamsDistributeParallelForSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTargetTeamsDistributeParallelForSimdDirective *
CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() ==
OMPTargetTeamsDistributeParallelForSimdDirectiveClass;
}
};
/// This represents '#pragma omp target teams distribute simd' combined
/// directive.
///
/// \code
/// #pragma omp target teams distribute simd private(x)
/// \endcode
/// In this example directive '#pragma omp target teams distribute simd'
/// has clause 'private' with the variables 'x'
///
class OMPTargetTeamsDistributeSimdDirective final : public OMPLoopDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
///
OMPTargetTeamsDistributeSimdDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
unsigned CollapsedNum)
: OMPLoopDirective(OMPTargetTeamsDistributeSimdDirectiveClass,
llvm::omp::OMPD_target_teams_distribute_simd, StartLoc,
EndLoc, CollapsedNum) {}
/// Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
///
explicit OMPTargetTeamsDistributeSimdDirective(unsigned CollapsedNum)
: OMPLoopDirective(OMPTargetTeamsDistributeSimdDirectiveClass,
llvm::omp::OMPD_target_teams_distribute_simd,
SourceLocation(), SourceLocation(), CollapsedNum) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPTargetTeamsDistributeSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// Creates an empty directive with the place for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTargetTeamsDistributeSimdDirective *
CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetTeamsDistributeSimdDirectiveClass;
}
};
/// This represents the '#pragma omp tile' loop transformation directive.
class OMPTileDirective final : public OMPLoopBasedDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Default list of offsets.
enum {
PreInitsOffset = 0,
TransformedStmtOffset,
};
explicit OMPTileDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumLoops)
: OMPLoopBasedDirective(OMPTileDirectiveClass, llvm::omp::OMPD_tile,
StartLoc, EndLoc, NumLoops) {}
void setPreInits(Stmt *PreInits) {
Data->getChildren()[PreInitsOffset] = PreInits;
}
void setTransformedStmt(Stmt *S) {
Data->getChildren()[TransformedStmtOffset] = S;
}
public:
/// Create a new AST node representation for '#pragma omp tile'.
///
/// \param C Context of the AST.
/// \param StartLoc Location of the introducer (e.g. the 'omp' token).
/// \param EndLoc Location of the directive's end (e.g. the tok::eod).
/// \param Clauses The directive's clauses.
/// \param NumLoops Number of associated loops (number of items in the
/// 'sizes' clause).
/// \param AssociatedStmt The outermost associated loop.
/// \param TransformedStmt The loop nest after tiling, or nullptr in
/// dependent contexts.
/// \param PreInits Helper preinits statements for the loop nest.
static OMPTileDirective *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses,
unsigned NumLoops, Stmt *AssociatedStmt,
Stmt *TransformedStmt, Stmt *PreInits);
/// Build an empty '#pragma omp tile' AST node for deserialization.
///
/// \param C Context of the AST.
/// \param NumClauses Number of clauses to allocate.
/// \param NumLoops Number of associated loops to allocate.
static OMPTileDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses,
unsigned NumLoops);
unsigned getNumAssociatedLoops() const { return getLoopsNumber(); }
/// Gets/sets the associated loops after tiling.
///
/// This is in de-sugared format stored as a CompoundStmt.
///
/// \code
/// for (...)
/// ...
/// \endcode
///
/// Note that if the generated loops a become associated loops of another
/// directive, they may need to be hoisted before them.
Stmt *getTransformedStmt() const {
return Data->getChildren()[TransformedStmtOffset];
}
/// Return preinits statement.
Stmt *getPreInits() const { return Data->getChildren()[PreInitsOffset]; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTileDirectiveClass;
}
};
/// This represents '#pragma omp scan' directive.
///
/// \code
/// #pragma omp scan inclusive(a)
/// \endcode
/// In this example directive '#pragma omp scan' has clause 'inclusive' with
/// list item 'a'.
class OMPScanDirective final : public OMPExecutableDirective {
friend class ASTStmtReader;
friend class OMPExecutableDirective;
/// Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPScanDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(OMPScanDirectiveClass, llvm::omp::OMPD_scan,
StartLoc, EndLoc) {}
/// Build an empty directive.
///
explicit OMPScanDirective()
: OMPExecutableDirective(OMPScanDirectiveClass, llvm::omp::OMPD_scan,
SourceLocation(), SourceLocation()) {}
public:
/// Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses (only single OMPFlushClause clause is
/// allowed).
///
static OMPScanDirective *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses);
/// Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPScanDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPScanDirectiveClass;
}
};
} // end namespace clang
#endif
|
Lyra2.c | /**
* Implementation of the Lyra2 Password Hashing Scheme (PHS). SSE-oriented implementation.
*
* Author: The Lyra PHC team (http://www.lyra2.net/) -- 2015.
*
* This software is hereby placed in the public domain.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHORS ''AS IS'' AND ANY EXPRESS
* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <immintrin.h>
#include <omp.h>
#include "Lyra2.h"
#include "Sponge.h"
/**
* Executes Lyra2 based on the G function from Blake2b or BlaMka. The number of columns of the memory matrix is set to nCols = N_COLS.
* This version supports salts and passwords whose combined length is smaller than the size of the memory matrix,
* (i.e., (nRows x nCols x b) bits, where "b" is the underlying sponge's bitrate). In this implementation, the "params"
* is composed by all integer parameters (treated as type "unsigned int") in the order they are provided, plus the value
* of nCols, (i.e., params = kLen || pwdlen || saltlen || timeCost || nRows || nCols).
*
* @param out The derived key to be output by the algorithm
* @param outlen Desired key length
* @param in User password
* @param inlen Password length
* @param salt Salt
* @param saltlen Salt length
* @param t_cost Parameter to determine the processing time (T)
* @param m_cost Memory cost parameter (defines the number of rows of the memory matrix, R)
*
* @return 0 if the key is generated correctly; -1 if there is an error (usually due to lack of memory for allocation)
*/
int PHS(void *out, size_t outlen, const void *in, size_t inlen, const void *salt, size_t saltlen, unsigned int t_cost, unsigned int m_cost){
return LYRA2(out, outlen, in, inlen, salt, saltlen, t_cost, m_cost, N_COLS);
}
#if (nPARALLEL == 1)
/**
* Executes Lyra2 based on the G function from Blake2b or BlaMka. This version supports salts and passwords
* whose combined length is smaller than the size of the memory matrix, (i.e., (nRows x nCols x b) bits,
* where "b" is the underlying sponge's bitrate). In this implementation, the "params" is composed by all
* integer parameters (treated as type "unsigned int") in the order they are provided, plus the value
* of nCols, (i.e., params = kLen || pwdlen || saltlen || timeCost || nRows || nCols).
*
* @param K The derived key to be output by the algorithm
* @param kLen Desired key length
* @param pwd User password
* @param pwdlen Password length
* @param salt Salt
* @param saltlen Salt length
* @param timeCost Parameter to determine the processing time (T)
* @param nRows Number or rows of the memory matrix (R)
* @param nCols Number of columns of the memory matrix (C)
*
* @return 0 if the key is generated correctly; -1 if there is an error (usually due to lack of memory for allocation)
*/
int LYRA2(void *K, unsigned int kLen, const void *pwd, unsigned int pwdlen, const void *salt, unsigned int saltlen, unsigned int timeCost, unsigned int nRows, unsigned int nCols){
//============================= Basic variables ============================//
int64_t gap = 1; //Modifier to the step, assuming the values 1 or -1
uint64_t step = 1; //Visitation step (used during Setup to dictate the sequence in which rows are read)
uint64_t window = 2; //Visitation window (used to define which rows can be revisited during Setup)
uint64_t sqrt = 2; //Square of window (i.e., square(window)), when a window is a square number;
//otherwise, sqrt = 2*square(window/2)
uint64_t row0 = 3; //row0: sequentially written during Setup; randomly picked during Wandering
uint64_t prev0 = 2; //prev0: stores the previous value of row0
uint64_t row1 = 1; //row1: revisited during Setup, and then read [and written]; randomly picked during Wandering
uint64_t prev1 = 0; //prev1: stores the previous value of row1
uint64_t i; //auxiliary iteration counter
//==========================================================================/
//========== Initializing the Memory Matrix and pointers to it =============//
//Tries to allocate enough space for the whole memory matrix
i = (uint64_t) ((uint64_t)nRows * (uint64_t)ROW_LEN_BYTES);
__m128i *wholeMatrix = malloc(i);
if (wholeMatrix == NULL) {
return -1;
}
//Allocates pointers to each row of the matrix
__m128i **memMatrix = malloc(nRows * sizeof (uint64_t*));
if (memMatrix == NULL) {
return -1;
}
//Places the pointers in the correct positions
__m128i *ptrWord = wholeMatrix;
for (i = 0; i < nRows; i++) {
memMatrix[i] = ptrWord;
ptrWord += ROW_LEN_INT128;
}
//==========================================================================/
//============= Padding (password + salt + params) with 10*1 ===============//
//OBS.:The memory matrix will temporarily hold the password: not for saving memory,
//but this ensures that the password copied locally will be overwritten as soon as possible
//First, we clean enough blocks for the password, salt, params and padding
//Change the ''6'' if different amounts of parameters were passed
uint64_t nBlocksInput = ((saltlen + pwdlen + 6 * sizeof (int)) / BLOCK_LEN_BLAKE2_SAFE_BYTES) + 1;
byte *ptrByte = (byte*) wholeMatrix;
memset(ptrByte, 0, nBlocksInput * BLOCK_LEN_BLAKE2_SAFE_BYTES);
//Prepends the password
memcpy(ptrByte, pwd, pwdlen);
ptrByte += pwdlen;
//Concatenates the salt
memcpy(ptrByte, salt, saltlen);
ptrByte += saltlen;
//Concatenates the params: every integer passed as parameter, in the order they are provided by the interface
memcpy(ptrByte, &kLen, sizeof (int));
ptrByte += sizeof (int);
memcpy(ptrByte, &pwdlen, sizeof (int));
ptrByte += sizeof (int);
memcpy(ptrByte, &saltlen, sizeof (int));
ptrByte += sizeof (int);
memcpy(ptrByte, &timeCost, sizeof (int));
ptrByte += sizeof (int);
memcpy(ptrByte, &nRows, sizeof (int));
ptrByte += sizeof (int);
memcpy(ptrByte, &nCols, sizeof (int));
ptrByte += sizeof (int);
//Now comes the padding
*ptrByte = 0x80; //first byte of padding: right after the password
ptrByte = (byte*) wholeMatrix; //resets the pointer to the start of the memory matrix
ptrByte += nBlocksInput * BLOCK_LEN_BLAKE2_SAFE_BYTES - 1; //sets the pointer to the correct position: end of incomplete block
*ptrByte ^= 0x01; //last byte of padding: at the end of the last incomplete block
//==========================================================================/
//============== Initializing the Sponge State =============/
//Sponge state: 8 __m128i, BLOCK_LEN_INT128 words of them for the bitrate (b) and the remainder for the capacity (c)
__m128i *state = malloc(8 * sizeof (__m128i));
if (state == NULL) {
return -1;
}
initState(state);
//==========================================================================/
//============= Absorbing the input data with the sponge ===============//
//Absorbing salt, password and params: this is the only place in which the block length is hard-coded to 512 bits, for compatibility with Blake2b and BlaMka
ptrWord = wholeMatrix;
for (i = 0; i < nBlocksInput; i++) {
absorbBlockBlake2Safe(state, ptrWord); //absorbs each block of pad(pwd || salt || params)
ptrWord += BLOCK_LEN_BLAKE2_SAFE_INT128; //goes to next block of pad(pwd || salt || params)
}
//================================================================================/
//================================ Setup Phase ==================================//
//==Initializes a (nRows x nCols) memory matrix, it's cells having b bits each)==//
//Initializes M[0]
reducedSqueezeRow0(state, memMatrix[0]); //The locally copied password is most likely overwritten here
//Initializes M[1]
reducedDuplexRow1and2(state, memMatrix[0], memMatrix[1]);
//Initializes M[2]
reducedDuplexRow1and2(state, memMatrix[1], memMatrix[2]);
//Filling Loop
for(row0 = 3 ; row0 < nRows; row0++){
//Performs a reduced-round duplexing operation over "M[row1][col] [+] M[prev0][col] [+] M[prev1][col]", filling M[row0] and updating M[row1]
//M[row0][N_COLS-1-col] = M[prev0][col] XOR rand;
//M[row1][col] = M[row1][col] XOR rot(rand) rot(): right rotation by 'omega' bits (e.g., 1 or more words)
reducedDuplexRowFilling(state, memMatrix[row1], memMatrix[prev0], memMatrix[prev1], memMatrix[row0]);
//Updates the "prev" indices: the rows more recently updated
prev0 = row0;
prev1 = row1;
//updates the value of row1: deterministically picked, with a variable step
row1 = (row1 + step) & (window - 1);
//Checks if all rows in the window where visited.
if (row1 == 0) {
window *= 2; //doubles the size of the re-visitation window
step = sqrt + gap; //changes the step: approximately doubles its value
gap = -gap; //inverts the modifier to the step
if (gap == -1){
sqrt *= 2; //Doubles sqrt every other iteration
}
}
}
//============================ Wandering Phase =============================//
//=====Iteratively overwrites pseudorandom cells of the memory matrix=======//
//Visitation Loop
for (i = 0 ; i < timeCost*nRows ; i++) {
//Selects a pseudorandom indices row0 and row1
//------------------------------------------------------------------------------------------
/*(USE THIS IF nRows IS A POWER OF 2)*/
//row0 = ((uint64_t)(((__uint128_t *)state)[0])) & (nRows-1);
//row1 = ((uint64_t)(((__uint128_t *)state)[1])) & (nRows-1);
/*(USE THIS FOR THE "GENERIC" CASE)*/
row0 = ((uint64_t)(((__uint128_t *)state)[0])) % nRows; //row0 = lsw(rand) mod nRows
row1 = ((uint64_t)(((__uint128_t *)state)[1])) % nRows; //row1 = lsw(rot(rand)) mod nRows
//Performs a reduced-round duplexing operation over "M[row0][col] [+] M[row1][col] [+] M[prev0][col0] [+] M[prev1][col1]", updating both M[row0] and M[row1]
//M[row0][col] = M[row0][col] XOR rand;
//M[row1][col] = M[row1][col] XOR rot(rand) rot(): right rotation by 'omega' bits (e.g., 1 or more words)
reducedDuplexRowWandering(state, memMatrix[row0], memMatrix[row1], memMatrix[prev0], memMatrix[prev1]);
//update prev's: they now point to the last rows ever updated
prev0 = row0;
prev1 = row1;
}
//==========================================================================/
//============================ Wrap-up Phase ===============================//
//========================= Output computation =============================//
//Absorbs one last block of the memory matrix with the full-round sponge
absorbColumn(state, memMatrix[row0]);
//Squeezes the key with the full-round sponge
squeeze(state, K, kLen);
//==========================================================================/
//========================= Freeing the memory =============================//
free(memMatrix);
free(wholeMatrix);
//Wiping out the sponge's internal state before freeing it
memset(state, 0, 8 * sizeof (__m128i));
free(state);
//==========================================================================/
return 0;
}
#endif
#if (nPARALLEL > 1)
/**
* Executes Lyra2 based on the G function from Blake2b or BlaMka. This version supports salts and passwords
* whose combined length is smaller than the size of the memory matrix, (i.e., (nRows x nCols x b) bits,
* where "b" is the underlying sponge's bitrate). In this implementation, the "params" is composed by all
* integer parameters (treated as type "unsigned int") in the order they are provided, plus the value
* of nCols, (i.e., params = kLen || pwdlen || saltlen || timeCost || nRows || nCols).
*
* @param K The derived key to be output by the algorithm
* @param kLen Desired key length
* @param pwd User password
* @param pwdlen Password length
* @param salt Salt
* @param saltlen Salt length
* @param timeCost Parameter to determine the processing time (T)
* @param nRows Number or rows of the memory matrix (R)
* @param nCols Number of columns of the memory matrix (C)
*
* @return 0 if the key is generated correctly; -1 if there is an error (usually due to lack of memory for allocation)
*/
int LYRA2(void *K, unsigned int kLen, const void *pwd, unsigned int pwdlen, const void *salt, unsigned int saltlen, unsigned int timeCost, unsigned int nRows, unsigned int nCols){
//============================= Basic variables ============================//
uint64_t i,j; //auxiliary iteration counter
//==========================================================================/
//========== Initializing the Memory Matrix and pointers to it =============//
//Allocates pointers to each row of the matrix
__m128i **memMatrix = malloc(nRows * sizeof (uint64_t*));
if (memMatrix == NULL) {
return -1;
}
//Allocates pointers to each key
unsigned char **pKeys = malloc(nPARALLEL * sizeof (unsigned char*));
if (pKeys == NULL) {
return -1;
}
#if _OPENMP <= 201107 //OpenMP 3.X or less
#pragma omp parallel num_threads(nPARALLEL) default(none) /*private(pwd)*/ shared(memMatrix, pKeys, pwd, pwdlen, salt, saltlen, nRows, nCols, kLen, timeCost)
#endif // _OPENMP
#if _OPENMP > 201107 //OpenMP 4.0
#pragma omp parallel proc_bind(spread) num_threads(nPARALLEL) default(none) /*private(pwd)*/ shared(memMatrix, pKeys, pwd, pwdlen, salt, saltlen, nRows, nCols, kLen, timeCost)
#endif // _OPENMP
{
//============================= Basic threads variables ============================//
int64_t gap = 1; //Modifier to the step, assuming the values 1 or -1
uint64_t step = 1; //Visitation step (used during Setup and Wandering phases)
uint64_t window = 2; //Visitation window (used to define which rows can be revisited during Setup)
uint64_t sync = 4; //Synchronize counter
uint64_t sqrt = 2; //Square of window (i.e., square(window)), when a window is a square number;
//otherwise, sqrt = 2*square(window/2)
uint64_t row0 = 3; //row0: sequentially written during Setup; randomly picked during Wandering
uint64_t prev0 = 2; //prev0: stores the previous value of row0
uint64_t rowP = 1; //rowP: revisited during Setup, and then read [and written]; randomly picked during Wandering
uint64_t prevP = 0; //prevP: stores the previous value of rowP
uint64_t threadNumber = 0;
uint64_t iP;
uint64_t jP; //Starts with threadNumber.
uint64_t kP;
uint64_t wCont;
uint64_t sizeSlicedRows;
uint64_t off0;
uint64_t offP;
//==========================================================================/
//========================== BootStrapping Phase ==========================//
// Size of each chunk that each thread will work with
sizeSlicedRows = nRows/nPARALLEL;
// Thread index:
threadNumber = omp_get_thread_num();
uint64_t sliceStart = threadNumber*sizeSlicedRows;
uint64_t halfSlice = sizeSlicedRows/2;
iP = (uint64_t) ((uint64_t) sizeSlicedRows * (uint64_t) ROW_LEN_BYTES);
__m128i *threadSliceMatrix = malloc(iP);
if (threadSliceMatrix == NULL) {
printf("Error: unable to allocate memory (nRows too large?)\n");
exit(EXIT_FAILURE);
}
//Places the pointers in the correct positions
__m128i *ptrWord = threadSliceMatrix;
for (kP = 0; kP < sizeSlicedRows; kP++) {
memMatrix[threadNumber*sizeSlicedRows + kP] = ptrWord;
ptrWord += ROW_LEN_INT128;
}
unsigned char *threadKey = malloc(kLen);
if (threadKey == NULL) {
exit(EXIT_FAILURE);
}
//Places the pointers in the correct positions
pKeys[threadNumber] = threadKey;
//==========================================================================/
//============= Padding (password + salt + params) with 10*1 ===============//
//OBS.:The memory matrix will temporarily hold the password: not for saving memory,
//but this ensures that the password copied locally will be overwritten as soon as possible
//First, we clean enough blocks for the password, salt, params and padding
//Change the ''8'' if different amounts of parameters were passed
uint64_t nBlocksInput = ((pwdlen + saltlen + 8 * sizeof (int)) / BLOCK_LEN_BLAKE2_SAFE_BYTES) + 1;
byte *ptrByte = (byte*) threadSliceMatrix;
memset(ptrByte, 0, nBlocksInput * BLOCK_LEN_BLAKE2_SAFE_BYTES);
//Prepends the password
memcpy(ptrByte, pwd, pwdlen);
ptrByte += pwdlen;
//Concatenates the salt
memcpy(ptrByte, salt, saltlen);
ptrByte += saltlen;
//Concatenates the params: every integer passed as parameter, in the order they are provided by the interface
memcpy(ptrByte, &kLen, sizeof (int));
ptrByte += sizeof (int);
memcpy(ptrByte, &pwdlen, sizeof (int));
ptrByte += sizeof (int);
memcpy(ptrByte, &saltlen, sizeof (int));
ptrByte += sizeof (int);
memcpy(ptrByte, &timeCost, sizeof (int));
ptrByte += sizeof (int);
memcpy(ptrByte, &nRows, sizeof (int));
ptrByte += sizeof (int);
memcpy(ptrByte, &nCols, sizeof (int));
ptrByte += sizeof (int);
int p = nPARALLEL;
memcpy(ptrByte, &p, sizeof (int));
ptrByte += sizeof (int);
memcpy(ptrByte, &threadNumber, sizeof (int));
ptrByte += sizeof (int);// */
//Now comes the padding
*ptrByte = 0x80; //first byte of padding: right after the password
ptrByte = (byte*) threadSliceMatrix; //resets the pointer to the start of the memory matrix
ptrByte += nBlocksInput * BLOCK_LEN_BLAKE2_SAFE_BYTES - 1; //sets the pointer to the correct position: end of incomplete block
*ptrByte ^= 0x01; //last byte of padding: at the end of the last incomplete block
//==========================================================================/
//============== Initializing the Sponge State =============/
//Sponge state: 8 __m128i, BLOCK_LEN_INT128 words of them for the bitrate (b) and the remainder for the capacity (c)
//Thread State
__m128i *threadState = malloc(8 * sizeof (__m128i));
if (threadState == NULL) {
exit(EXIT_FAILURE);
}
initState(threadState);
//==========================================================================/
//============= Absorbing the input data with the sponge ===============//
//Absorbing salt, password and params: this is the only place in which the block length is hard-coded to 512 bits, for compatibility with Blake2b and BlaMka
ptrWord = threadSliceMatrix;
for (kP = 0; kP < nBlocksInput; kP++) {
absorbBlockBlake2Safe(threadState, ptrWord); //absorbs each block of pad(pwd || salt || params)
ptrWord += BLOCK_LEN_BLAKE2_SAFE_INT128; //goes to next block of pad(pwd || salt || params)
}
//================================================================================/
//================================ Setup Phase ==================================//
//==Initializes a (nRows x nCols) memory matrix, it's cells having b bits each)==//
//Initializes M[0]
reducedSqueezeRow0(threadState, memMatrix[sliceStart]); //The locally copied password is most likely overwritten here
//Initializes M[1]
reducedDuplexRow1and2(threadState, memMatrix[sliceStart], memMatrix[sliceStart+1]);
//Initializes M[2]
reducedDuplexRow1and2(threadState, memMatrix[sliceStart + 1], memMatrix[sliceStart + 2]);
jP = threadNumber;
//Filling Loop
for (row0 = 3; row0 < sizeSlicedRows; row0++) {
//Performs a reduced-round duplexing operation over "Mj[rowP][col] [+] Mi[prev0][col] [+] Mj[prevP][col]", filling Mi[row0] and updating Mj[rowP]
//Mi[row0][N_COLS-1-col] = Mi[prev0][col] XOR rand;
//Mj[rowP][col] = Mj[rowP][col] XOR rot(rand) rot(): right rotation by 'omega' bits (e.g., 1 or more words)
reducedDuplexRowFilling(threadState, memMatrix[jP*sizeSlicedRows + rowP], memMatrix[sliceStart + prev0], memMatrix[jP*sizeSlicedRows + prevP], memMatrix[sliceStart + row0]);
//Updates the "prev" indices: the rows more recently updated
prev0 = row0;
prevP = rowP;
//updates the value of rowP: deterministically picked, with a variable step
rowP = (rowP + step) & (window - 1);
//Checks if all rows in the window where visited.
if (rowP == 0) {
window *= 2; //doubles the size of the re-visitation window
step = sqrt + gap; //changes the step: approximately doubles its value
gap = -gap; //inverts the modifier to the step
if (gap == -1){
sqrt *= 2; //Doubles sqrt every other iteration
}
}
//Synchronize threads and change the slices
if (row0 == sync) {
sync += sqrt/2;
jP = (jP + 1) % nPARALLEL;
#pragma omp barrier
}
}
// Needs all matrix done before starting Wandering Phase.
#pragma omp barrier
//============================ Wandering Phase =============================//
//=====Iteratively overwrites pseudorandom cells of the memory matrix=======//
window = halfSlice;
sync = sqrt;
off0 = 0;
offP = window;
uint64_t offTemp;
//Visitation Loop
for (wCont = 0; wCont < timeCost*sizeSlicedRows; wCont++){
//Selects a pseudorandom indices row0 and rowP
//------------------------------------------------------------------------------------------
/*(USE THIS IF window IS A POWER OF 2)*/
//row0 = off0 + (((uint64_t)(((__uint128_t *)threadState)[0])) & (window-1));
//rowP = offP + (((uint64_t)(((__uint128_t *)threadState)[1])) & (window-1));
/*(USE THIS FOR THE "GENERIC" CASE)*/
row0 = off0 + (((uint64_t)(((__uint128_t *)threadState)[0])) % window); //row0 = off0 + (lsw(rand) mod window)
rowP = offP + (((uint64_t)(((__uint128_t *)threadState)[1])) % window); //row1 = offP + (lsw(rot(rand)) mod window)
//Selects a pseudorandom indices j0 (LSW(rot^2 (rand)) mod p)
jP = ((uint64_t)(((__uint128_t *)threadState)[2])) % nPARALLEL; //jP = lsw(rot^2(rand)) mod nPARALLEL
//Performs a reduced-round duplexing operation over "Mi[row0][col] [+] Mj[rowP][col] [+] Mi[prev0][col0]", updating Mi[row0]
//Mi[row0][col] = Mi[row0][col] XOR rand;
reducedDuplexRowWanderingParallel(threadState, memMatrix[sliceStart + row0], memMatrix[jP*sizeSlicedRows + rowP], memMatrix[sliceStart + prev0]);
//update prev: they now point to the last rows ever updated
prev0 = row0;
//Synchronize threads and change the slices
if (wCont == sync) {
sync += sqrt;
offTemp = off0;
off0 = offP;
offP = offTemp;
#pragma omp barrier
}
}
#pragma omp barrier
//==========================================================================/
//============================ Wrap-up Phase ===============================//
//========================= Output computation =============================//
//Absorbs one last block of the memory matrix with the full-round sponge
absorbColumn(threadState, memMatrix[sliceStart + row0]);
//Squeezes the key
squeeze(threadState, threadKey, kLen);
//========================= Freeing the thread memory =============================//
free(threadSliceMatrix);
//Wiping out the sponge's internal state before freeing it
memset(threadState, 0, 8 * sizeof (__m128i));
free(threadState);
} // Parallelism End
// XORs all Keys
for (i = 1; i < nPARALLEL; i++) {
for (j = 0; j < kLen; j++) {
pKeys[0][j] ^= pKeys[i][j];
}
}
// Returns in the correct variable
memcpy(K, pKeys[0], kLen);
//========================= Freeing the memory =============================//
free(memMatrix);
//Free each thread Key
for (i = 0; i < nPARALLEL; i++) {
free(pKeys[i]);
}
//Free the pointers to allKeys
free(pKeys);
//==========================================================================/
return 0;
}
#endif |
attribute.h | #pragma once
#include <assert.h>
#include <utility>
#include "rxmesh/handle.h"
#include "rxmesh/kernels/attribute.cuh"
#include "rxmesh/kernels/collective.cuh"
#include "rxmesh/kernels/util.cuh"
#include "rxmesh/patch_info.h"
#include "rxmesh/types.h"
#include "rxmesh/util/cuda_query.h"
#include "rxmesh/util/log.h"
#include "rxmesh/util/util.h"
#include "rxmesh/util/vector.h"
class RXMeshTest;
namespace rxmesh {
/**
* @brief Base untyped attributes used as an interface for attribute container
*/
class AttributeBase
{
// our friend tester class
friend class ::RXMeshTest;
public:
AttributeBase() = default;
virtual const char* get_name() const = 0;
virtual void release(locationT location = LOCATION_ALL) = 0;
virtual ~AttributeBase() = default;
};
/**
* @brief Here we manage the attributes on top of the mesh. An attributes is
* attached to mesh element (e.g., vertices, edges, or faces).
* largely inspired by
* https://github.com/gunrock/gunrock/blob/master/gunrock/util/array_utils.cuh
* It is discouraged to use Attribute directly in favor of using
* add_X_attributes() from RXMeshStatic where X is vertex, edge, or face. This
* way, the user does not have to specify the number of mesh elements or
* deallocate/release the Attribute (attribute garbage collection is managed by
* RXMeshStatic)
* @tparam T type of the attribute
*/
template <class T>
class Attribute : public AttributeBase
{
template <typename S>
friend class ReduceHandle;
public:
/**
* @brief Default constructor which initializes all pointers to nullptr
*/
Attribute()
: AttributeBase(),
m_name(nullptr),
m_num_attributes(0),
m_allocated(LOCATION_NONE),
m_h_attr(nullptr),
m_h_ptr_on_device(nullptr),
m_d_attr(nullptr),
m_num_patches(0),
m_d_element_per_patch(nullptr),
m_h_element_per_patch(nullptr),
m_layout(AoS)
{
this->m_name = (char*)malloc(sizeof(char) * 1);
this->m_name[0] = '\0';
}
/**
* @brief Main constructor
* @param name attribute name
*/
Attribute(const char* name)
: AttributeBase(),
m_name(nullptr),
m_num_attributes(0),
m_allocated(LOCATION_NONE),
m_h_attr(nullptr),
m_h_ptr_on_device(nullptr),
m_d_attr(nullptr),
m_num_patches(0),
m_d_element_per_patch(nullptr),
m_h_element_per_patch(nullptr),
m_layout(AoS)
{
if (name != nullptr) {
this->m_name = (char*)malloc(sizeof(char) * (strlen(name) + 1));
strcpy(this->m_name, name);
}
}
Attribute(const Attribute& rhs) = default;
virtual ~Attribute() = default;
/**
* @brief Get the name of the attribute
*/
const char* get_name() const
{
return m_name;
}
/**
* @brief get the number of attributes per mesh element
*/
__host__ __device__ __forceinline__ uint32_t get_num_attributes() const
{
return this->m_num_attributes;
}
/**
* @brief Flag that indicates where the memory is allocated
*/
__host__ __device__ __forceinline__ locationT get_allocated() const
{
return this->m_allocated;
}
/**
* @brief Check if attribute is allocated on device
*/
__host__ __device__ __forceinline__ bool is_device_allocated() const
{
return ((m_allocated & DEVICE) == DEVICE);
}
/**
* @brief Check if attribute is allocated on host
*/
__host__ __device__ __forceinline__ bool is_host_allocated() const
{
return ((m_allocated & HOST) == HOST);
}
/**
* @brief Reset attribute to certain value
* @param value to be set
* @param location which location (device, host, or both) where attribute
* will be set
* @param stream in case of DEVICE, this is the stream that will be used to
* launch the reset kernel
*/
void reset(const T value, locationT location, cudaStream_t stream = NULL)
{
if ((location & DEVICE) == DEVICE) {
assert((m_allocated & DEVICE) == DEVICE);
const int threads = 256;
detail::template memset_attribute<T>
<<<m_num_patches, threads, 0, stream>>>(*this,
value,
m_d_element_per_patch,
m_num_patches,
m_num_attributes);
}
if ((location & HOST) == HOST) {
assert((m_allocated & HOST) == HOST);
#pragma omp parallel for
for (int p = 0; p < static_cast<int>(m_num_patches); ++p) {
for (int e = 0; e < m_h_element_per_patch[p]; ++e) {
m_h_attr[p][e] = value;
}
}
}
}
/**
* @brief Allocate memory for attribute. This is meant to be used by
* RXMeshStatic
* @param element_per_patch indicate the number of mesh element owned by
* each patch
* @param num_attributes number of attribute per mesh element
* @param location where the memory should reside (host, device, or both)
* @param layout memory layout in case num_attributes>1
*/
void init(const std::vector<uint16_t>& element_per_patch,
const uint32_t num_attributes,
locationT location = LOCATION_ALL,
const layoutT layout = AoS)
{
release();
m_num_patches = element_per_patch.size();
m_num_attributes = num_attributes;
m_layout = layout;
if (m_num_patches == 0) {
return;
}
allocate(element_per_patch.data(), location);
}
/**
* @brief Copy memory from one location to another. If target is not
* allocated, it will be allocated first before copying the memory.
* @param source the source location
* @param target the destination location
* @param stream to be used to launch the kernel
* TODO it is better to launch a kernel that do the memcpy than relying on
* the host API from CUDA since all these small memcpy will be enqueued in
* the same stream and so serialized
*/
void move(locationT source, locationT target, cudaStream_t stream = NULL)
{
if (source == target) {
RXMESH_WARN(
"Attribute::move() source ({}) and target ({}) "
"are the same.",
location_to_string(source),
location_to_string(target));
return;
}
if ((source == HOST || source == DEVICE) &&
((source & m_allocated) != source)) {
RXMESH_ERROR(
"Attribute::move() moving source is not valid"
" because it was not allocated on source i.e., {}",
location_to_string(source));
}
if (((target & HOST) == HOST || (target & DEVICE) == DEVICE) &&
((target & m_allocated) != target)) {
RXMESH_WARN(
"Attribute::move() allocating target before moving to {}",
location_to_string(target));
allocate(m_h_element_per_patch, target);
}
if (this->m_num_patches == 0) {
return;
}
if (source == HOST && target == DEVICE) {
for (uint32_t p = 0; p < m_num_patches; ++p) {
CUDA_ERROR(cudaMemcpyAsync(
m_h_ptr_on_device[p],
m_h_attr[p],
sizeof(T) * m_h_element_per_patch[p] * m_num_attributes,
cudaMemcpyHostToDevice,
stream));
}
} else if (source == DEVICE && target == HOST) {
for (uint32_t p = 0; p < m_num_patches; ++p) {
CUDA_ERROR(cudaMemcpyAsync(
m_h_attr[p],
m_h_ptr_on_device[p],
sizeof(T) * m_h_element_per_patch[p] * m_num_attributes,
cudaMemcpyDeviceToHost,
stream));
}
}
}
/**
* @brief Release allocated memory in certain location
* @param location where memory will be released
*/
void release(locationT location = LOCATION_ALL)
{
if (((location & HOST) == HOST) && ((m_allocated & HOST) == HOST)) {
for (uint32_t p = 0; p < m_num_patches; ++p) {
free(m_h_attr[p]);
}
free(m_h_attr);
m_h_attr = nullptr;
free(m_h_element_per_patch);
m_h_element_per_patch = nullptr;
m_allocated = m_allocated & (~HOST);
}
if (((location & DEVICE) == DEVICE) &&
((m_allocated & DEVICE) == DEVICE)) {
for (uint32_t p = 0; p < m_num_patches; ++p) {
GPU_FREE(m_h_ptr_on_device[p]);
}
GPU_FREE(m_d_attr);
GPU_FREE(m_d_element_per_patch);
m_allocated = m_allocated & (~DEVICE);
}
}
/**
* @brief Deep copy from a source attribute. If source_flag and dst_flag are
* both set to LOCATION_ALL, then we copy what is on host to host, and what
* on device to device. If sourc_flag is set to HOST (or DEVICE) and
* dst_flag is set to LOCATION_ALL, then we copy source's HOST (or
* DEVICE) to both HOST and DEVICE. Setting source_flag to
* LOCATION_ALL while dst_flag is NOT set to LOCATION_ALL is invalid
* because we don't know which source to copy from
* @param source attribute to copy from
* @param source_flag defines where we will copy from
* @param dst_flag defines where we will copy to
* @param stream used to launch kernel/memcpy
*/
void copy_from(Attribute<T>& source,
locationT source_flag,
locationT dst_flag,
cudaStream_t stream = NULL)
{
if (source.m_layout != m_layout) {
RXMESH_ERROR(
"Attribute::copy_from() does not support copy from "
"source of different layout!");
}
if ((source_flag & LOCATION_ALL) == LOCATION_ALL &&
(dst_flag & LOCATION_ALL) != LOCATION_ALL) {
RXMESH_ERROR("Attribute::copy_from() Invalid configuration!");
}
if (m_num_attributes != source.get_num_attributes()) {
RXMESH_ERROR(
"Attribute::copy_from() number of attributes is "
"different!");
}
if (this->is_empty() || this->m_num_patches == 0) {
return;
}
// 1) copy from HOST to HOST
if ((source_flag & HOST) == HOST && (dst_flag & HOST) == HOST) {
if ((source_flag & source.m_allocated) != source_flag) {
RXMESH_ERROR(
"Attribute::copy() copying source is not valid"
" because it was not allocated on host");
}
if ((dst_flag & m_allocated) != dst_flag) {
RXMESH_ERROR(
"Attribute::copy() copying source is not valid"
" because location (this) was not allocated on host");
}
for (uint32_t p = 0; p < m_num_patches; ++p) {
assert(m_h_element_per_patch[p] ==
source.m_h_element_per_patch[p]);
std::memcpy(
m_h_ptr_on_device[p],
source.m_h_ptr_on_device[p],
sizeof(T) * m_h_element_per_patch[p] * m_num_attributes);
}
}
// 2) copy from DEVICE to DEVICE
if ((source_flag & DEVICE) == DEVICE && (dst_flag & DEVICE) == DEVICE) {
if ((source_flag & source.m_allocated) != source_flag) {
RXMESH_ERROR(
"Attribute::copy() copying source is not valid"
" because it was not allocated on device");
}
if ((dst_flag & m_allocated) != dst_flag) {
RXMESH_ERROR(
"Attribute::copy() copying source is not valid"
" because location (this) was not allocated on device");
}
for (uint32_t p = 0; p < m_num_patches; ++p) {
assert(m_h_element_per_patch[p] ==
source.m_h_element_per_patch[p]);
CUDA_ERROR(cudaMemcpyAsync(
m_h_ptr_on_device[p],
source.m_h_ptr_on_device[p],
sizeof(T) * m_h_element_per_patch[p] * m_num_attributes,
cudaMemcpyDeviceToDevice,
stream));
}
}
// 3) copy from DEVICE to HOST
if ((source_flag & DEVICE) == DEVICE && (dst_flag & HOST) == HOST) {
if ((source_flag & source.m_allocated) != source_flag) {
RXMESH_ERROR(
"Attribute::copy() copying source is not valid"
" because it was not allocated on host");
}
if ((dst_flag & m_allocated) != dst_flag) {
RXMESH_ERROR(
"Attribute::copy() copying source is not valid"
" because location (this) was not allocated on device");
}
for (uint32_t p = 0; p < m_num_patches; ++p) {
assert(m_h_element_per_patch[p] ==
source.m_h_element_per_patch[p]);
CUDA_ERROR(cudaMemcpyAsync(
m_h_attr[p],
source.m_h_ptr_on_device[p],
sizeof(T) * m_h_element_per_patch[p] * m_num_attributes,
cudaMemcpyDeviceToHost,
stream));
}
}
// 4) copy from HOST to DEVICE
if ((source_flag & HOST) == HOST && (dst_flag & DEVICE) == DEVICE) {
if ((source_flag & source.m_allocated) != source_flag) {
RXMESH_ERROR(
"Attribute::copy() copying source is not valid"
" because it was not allocated on device");
}
if ((dst_flag & m_allocated) != dst_flag) {
RXMESH_ERROR(
"Attribute::copy() copying source is not valid"
" because location (this) was not allocated on host");
}
for (uint32_t p = 0; p < m_num_patches; ++p) {
assert(m_h_element_per_patch[p] ==
source.m_h_element_per_patch[p]);
CUDA_ERROR(cudaMemcpyAsync(
m_h_ptr_on_device[p],
source.m_h_attr[p],
sizeof(T) * m_h_element_per_patch[p] * m_num_attributes,
cudaMemcpyHostToDevice,
stream));
}
}
}
/**
* @brief Access the attribute value using patch and local index in the
* patch. This is meant to be used by XXAttribute not directly by the user
* @param patch_id patch to be accessed
* @param local_id the local id in the patch
* @param attr the attribute id
* @return const reference to the attribute
*/
__host__ __device__ __forceinline__ T& operator()(const uint32_t patch_id,
const uint16_t local_id,
const uint32_t attr) const
{
assert(patch_id < m_num_patches);
assert(attr < m_num_attributes);
const uint32_t pitch_x = (m_layout == AoS) ? m_num_attributes : 1;
#ifdef __CUDA_ARCH__
const uint32_t pitch_y =
(m_layout == AoS) ? 1 : m_d_element_per_patch[patch_id];
return m_d_attr[patch_id][local_id * pitch_x + attr * pitch_y];
#else
const uint32_t pitch_y =
(m_layout == AoS) ? 1 : m_h_element_per_patch[patch_id];
return m_h_attr[patch_id][local_id * pitch_x + attr * pitch_y];
#endif
}
/**
* @brief Access the attribute value using patch and local index in the
* patch. This is meant to be used by XXAttribute not directly by the user
* @param patch_id patch to be accessed
* @param local_id the local id in the patch
* @param attr the attribute id
* @return non-const reference to the attribute
*/
__host__ __device__ __forceinline__ T& operator()(const uint32_t patch_id,
const uint16_t local_id,
const uint32_t attr)
{
assert(patch_id < m_num_patches);
assert(attr < m_num_attributes);
const uint32_t pitch_x = (m_layout == AoS) ? m_num_attributes : 1;
#ifdef __CUDA_ARCH__
const uint32_t pitch_y =
(m_layout == AoS) ? 1 : m_d_element_per_patch[patch_id];
return m_d_attr[patch_id][local_id * pitch_x + attr * pitch_y];
#else
const uint32_t pitch_y =
(m_layout == AoS) ? 1 : m_h_element_per_patch[patch_id];
return m_h_attr[patch_id][local_id * pitch_x + attr * pitch_y];
#endif
}
/**
* @brief Check if the attribute is empty
*/
__host__ __device__ __forceinline__ bool is_empty() const
{
return m_num_patches == 0;
}
private:
/**
* @brief allocate internal memory
*/
void allocate(const uint16_t* element_per_patch, locationT location)
{
if (m_num_patches != 0) {
if ((location & HOST) == HOST) {
release(HOST);
m_h_element_per_patch = static_cast<uint16_t*>(
malloc(sizeof(uint16_t) * m_num_patches));
m_h_attr = static_cast<T**>(malloc(sizeof(T*) * m_num_patches));
std::memcpy(m_h_element_per_patch,
element_per_patch,
sizeof(uint16_t) * m_num_patches);
for (uint32_t p = 0; p < m_num_patches; ++p) {
m_h_attr[p] = static_cast<T*>(malloc(
sizeof(T) * element_per_patch[p] * m_num_attributes));
}
m_allocated = m_allocated | HOST;
}
if ((location & DEVICE) == DEVICE) {
release(DEVICE);
m_h_element_per_patch = static_cast<uint16_t*>(
malloc(sizeof(uint16_t) * m_num_patches));
std::memcpy(m_h_element_per_patch,
element_per_patch,
sizeof(uint16_t) * m_num_patches);
CUDA_ERROR(cudaMalloc((void**)&(m_d_element_per_patch),
sizeof(uint16_t) * m_num_patches));
CUDA_ERROR(cudaMalloc((void**)&(m_d_attr),
sizeof(T*) * m_num_patches));
m_h_ptr_on_device =
static_cast<T**>(malloc(sizeof(T*) * m_num_patches));
CUDA_ERROR(cudaMemcpy(m_d_element_per_patch,
element_per_patch,
sizeof(uint16_t) * m_num_patches,
cudaMemcpyHostToDevice));
for (uint32_t p = 0; p < m_num_patches; ++p) {
CUDA_ERROR(cudaMalloc((void**)&(m_h_ptr_on_device[p]),
sizeof(T) * m_h_element_per_patch[p] *
m_num_attributes));
}
CUDA_ERROR(cudaMemcpy(m_d_attr,
m_h_ptr_on_device,
sizeof(T*) * m_num_patches,
cudaMemcpyHostToDevice));
m_allocated = m_allocated | DEVICE;
}
}
}
char* m_name;
uint32_t m_num_attributes;
locationT m_allocated;
T** m_h_attr;
T** m_h_ptr_on_device;
T** m_d_attr;
uint32_t m_num_patches;
uint16_t* m_d_element_per_patch;
uint16_t* m_h_element_per_patch;
layoutT m_layout;
constexpr static uint32_t m_block_size = 256;
};
/**
* @brief Attributes for faces
* @tparam T the attribute type
*/
template <class T>
class FaceAttribute : public Attribute<T>
{
public:
/**
* @brief Default constructor
*/
FaceAttribute() = default;
/**
* @brief Main constructor to be used by RXMeshStatic not directly by the
* user
* @param name of the attribute
* @param face_per_patch number of faces owned per patch
* @param num_attributes number of attribute per face
* @param location where the attribute to be allocated
* @param layout memory layout in case of num_attributes>1
*/
FaceAttribute(const char* name,
const std::vector<uint16_t>& face_per_patch,
const uint32_t num_attributes,
locationT location,
const layoutT layout)
: Attribute<T>(name)
{
this->init(face_per_patch, num_attributes, location, layout);
}
/**
* @brief Accessing face attribute using FaceHandle
* @param f_handle input face handle
* @param attr the attribute id
* @return const reference to the attribute
*/
__host__ __device__ __forceinline__ T& operator()(
const FaceHandle f_handle,
const uint32_t attr = 0) const
{
auto pl = f_handle.unpack();
return Attribute<T>::operator()(pl.first, pl.second, attr);
}
/**
* @brief Accessing face attribute using FaceHandle
* @param f_handle input face handle
* @param attr the attribute id
* @return non-const reference to the attribute
*/
__host__ __device__ __forceinline__ T& operator()(const FaceHandle f_handle,
const uint32_t attr = 0)
{
auto pl = f_handle.unpack();
return Attribute<T>::operator()(pl.first, pl.second, attr);
}
};
/**
* @brief Attributes for edges
* @tparam T the attribute type
*/
template <class T>
class EdgeAttribute : public Attribute<T>
{
public:
/**
* @brief Default constructor
*/
EdgeAttribute() = default;
/**
* @brief Main constructor to be used by RXMeshStatic not directly by the
* user
* @param name of the attribute
* @param edge_per_patch number of edges owned per patch
* @param num_attributes number of attribute per edge
* @param location where the attribute to be allocated
* @param layout memory layout in case of num_attributes>1
*/
EdgeAttribute(const char* name,
const std::vector<uint16_t>& edge_per_patch,
const uint32_t num_attributes,
locationT location,
const layoutT layout)
: Attribute<T>(name)
{
this->init(edge_per_patch, num_attributes, location, layout);
}
/**
* @brief Accessing edge attribute using EdgeHandle
* @param e_handle input edge handle
* @param attr the attribute id
* @return const reference to the attribute
*/
__host__ __device__ __forceinline__ T& operator()(
const EdgeHandle e_handle,
const uint32_t attr = 0) const
{
auto pl = e_handle.unpack();
return Attribute<T>::operator()(pl.first, pl.second, attr);
}
/**
* @brief Accessing edge attribute using EdgeHandle
* @param e_handle input edge handle
* @param attr the attribute id
* @return non-const reference to the attribute
*/
__host__ __device__ __forceinline__ T& operator()(const EdgeHandle e_handle,
const uint32_t attr = 0)
{
auto pl = e_handle.unpack();
return Attribute<T>::operator()(pl.first, pl.second, attr);
}
};
/**
* @brief Attributes for vertices
* @tparam T the attribute type
*/
template <class T>
class VertexAttribute : public Attribute<T>
{
public:
/**
* @brief Default constructor
*/
VertexAttribute() = default;
/**
* @brief Main constructor to be used by RXMeshStatic not directly by the
* user
* @param name of the attribute
* @param vertex_per_patch number of vertices owned per patch
* @param num_attributes number of attribute per vertex
* @param location where the attribute to be allocated
* @param layout memory layout in case of num_attributes > 1
*/
VertexAttribute(const char* name,
const std::vector<uint16_t>& vertex_per_patch,
const uint32_t num_attributes,
locationT location,
const layoutT layout)
: Attribute<T>(name)
{
this->init(vertex_per_patch, num_attributes, location, layout);
}
/**
* @brief Accessing vertex attribute using VertexHandle
* @param v_handle input face handle
* @param attr the attribute id
* @return const reference to the attribute
*/
__host__ __device__ __forceinline__ T& operator()(
const VertexHandle v_handle,
const uint32_t attr = 0) const
{
auto pl = v_handle.unpack();
return Attribute<T>::operator()(pl.first, pl.second, attr);
}
/**
* @brief Accessing vertex attribute using VertexHandle
* @param v_handle input face handle
* @param attr the attribute id
* @return non-const reference to the attribute
*/
__host__ __device__ __forceinline__ T& operator()(
const VertexHandle v_handle,
const uint32_t attr = 0)
{
auto pl = v_handle.unpack();
return Attribute<T>::operator()(pl.first, pl.second, attr);
}
};
/**
* @brief Attribute container used to manage a collection of attributes by
* RXMeshStatic
*/
class AttributeContainer
{
public:
/**
* @brief Default constructor
*/
AttributeContainer() = default;
/**
* @brief Destructor which releases all attribute managed by this container
*/
virtual ~AttributeContainer()
{
while (!m_attr_container.empty()) {
m_attr_container.back()->release();
m_attr_container.pop_back();
}
}
/**
* @brief Number of attribute managed by this container
*/
size_t size()
{
return m_attr_container.size();
}
/**
* @brief get a list of name of the attributes managed by this container
* @return
*/
std::vector<std::string> get_attribute_names() const
{
std::vector<std::string> names;
for (size_t i = 0; i < m_attr_container.size(); ++i) {
names.push_back(m_attr_container[i]->get_name());
}
return names;
}
/**
* @brief add a new attribute to be managed by this container
* @tparam AttrT attribute type
* @param name unique name given to the attribute
* @param element_per_patch number of mesh element owned by each patch
* @param num_attributes number of attributes per mesh element
* @param location where the attributes will be allocated
* @param layout memory layout in case of num_attributes > 1
* @return a shared pointer to the attribute
*/
template <typename AttrT>
std::shared_ptr<AttrT> add(const char* name,
std::vector<uint16_t>& element_per_patch,
uint32_t num_attributes,
locationT location,
layoutT layout)
{
if (does_exist(name)) {
RXMESH_WARN(
"AttributeContainer::add() adding an attribute with "
"name {} already exists!",
std::string(name));
}
auto new_attr = std::make_shared<AttrT>(
name, element_per_patch, num_attributes, location, layout);
m_attr_container.push_back(
std::dynamic_pointer_cast<AttributeBase>(new_attr));
return new_attr;
}
/**
* @brief Check if an attribute exists
* @param name of the attribute
*/
bool does_exist(const char* name)
{
for (size_t i = 0; i < m_attr_container.size(); ++i) {
if (!strcmp(m_attr_container[i]->get_name(), name)) {
return true;
}
}
return false;
}
/**
* @brief remove an attribute and release its memory
* @param name of the attribute
*/
void remove(const char* name)
{
for (auto it = m_attr_container.begin(); it != m_attr_container.end();
++it) {
if (!strcmp((*it)->get_name(), name)) {
(*it)->release(LOCATION_ALL);
m_attr_container.erase(it);
break;
}
}
}
private:
std::vector<std::shared_ptr<AttributeBase>> m_attr_container;
};
} // namespace rxmesh |
main.c | #include <stdio.h>
#include <omp.h>
int main()
{
int i;
#pragma omp parallel num_threads(2) // seta o número de threads em 2
{
int tid = omp_get_thread_num(); // lê o identificador da thread
#pragma omp for ordered schedule(static)
for(i = 1; i <= 3; i++)
{
printf("[PRINT1] T%d = %d \n",tid,i);
printf("[PRINT2] T%d = %d \n",tid,i);
}
}
}
<<<<<<< HEAD
=======
>>>>>>> 3f1a2a26671abb5caaf3af4ac68667eb6a418f74
|
CPUMatrixImpl.h | //
// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE.md file in the project root for full license information.
//
// CPUMatrix.h : template implementation of all matrix functions on the CPU side
//
#pragma once
#include "Basics.h"
#include "File.h"
#include "CPUMatrix.h"
#include "TensorOps.h"
#include <assert.h>
#include <stdexcept>
#include <omp.h>
#include <math.h>
#include <random>
#include <chrono>
#include <exception>
#include <thread>
#include <iostream>
#include <algorithm>
#pragma warning(push)
#pragma warning(disable:4244) // 'conversion' conversion from 'type1' to 'type2', possible loss of data
#include <boost/random/normal_distribution.hpp>
#pragma warning(pop)
#include <boost/random/uniform_real_distribution.hpp>
#ifdef _WIN32
#define NOMINMAX
#include "Windows.h"
#else
#include <cfloat>
#endif
#ifdef LEAKDETECT
#include <vld.h>
#endif
#pragma warning(disable : 4100) // unreferenced formal parameter; "struct TensorOpReduction<ElemType, OPFN, typename ReductionOp, N, -1>" trigger this
#pragma warning(disable : 4127) // conditional expression is constant; "if (sizeof(ElemType)==sizeof(float))" triggers this
#pragma warning(disable : 4244) // unreachable code; triggered for unknown reasons
#pragma warning(disable : 4702) // conversion from 'double' to 'float'
#ifdef USE_MKL
// requires MKL 10.0 and above
#include <mkl.h>
#else
#ifdef _MSC_VER
// Visual Studio doesn't define standard complex types properly
#define HAVE_LAPACK_CONFIG_H
#define LAPACK_COMPLEX_STRUCTURE
#endif
#include <cblas.h>
#include <lapacke.h>
#endif
#define SWAP(a, b) \
{ \
(a) ^= (b); \
(b) ^= (a); \
(a) ^= (b); \
}
#define IDX2C(i, j, ld) (((j) * (ld)) + (i)) // 0 based indexing
namespace Microsoft { namespace MSR { namespace CNTK {
#pragma region Helpful Enum Definitions
enum class MatrixOrder
{
RowMajor = 101, // row-major arrays
ColMajor = 102 // column-major arrays
};
enum class MatrixTranspose : char
{
NoTrans = 'N', // trans='N'
Trans = 'T', // trans='T'
ConjTrans = 'C' // trans='C'
};
enum class SymMatrixType : char
{
Up = 'U', // symmetric matrix is stored in the upper part
Low = 'L', // symmetric matrix is stored in thelower part
Full = 'F', // full populated
NotSymmetric = 'N' // not a symmetric matrix
};
enum class MatrixOpSide : char
{
Left = 'L', // left multiply
Right = 'R', // right multiply
};
#pragma endregion Helpful Enum Definitions
#pragma region Constructors and Destructor
template <class ElemType>
CPUMatrix<ElemType>::CPUMatrix()
{
ZeroInit();
}
// helper to allocate an array of ElemType
// Use this instead of new[] to get NaN initialization for debugging.
template <class ElemType>
static ElemType* NewArray(size_t n)
{
// We need to allocate possibly one more element for the following reason.
// At some point we might want to fill a buffer with the result of a random
// number generator. The RNG is oblivious to whether the buffer is on the
// CPU or GPU but it needs to keep an accurate tally of how many numbers it
// has generated. The trouble stems from the fact that generating an odd
// number gaussians on the GPU is not supported so we must always
// generate an even number. So since we wouldn't know how to update the tally
// we are making this allocate one more element in the worst case.
ElemType* p = new ElemType[AsMultipleOf(n, 2)]();
#if 0 // _DEBUG
ElemType nan = Matrix<ElemType>::MakeNan(__LINE__);
for (size_t i = 0; i < n; i++)
p[i] = nan;
#endif
return p;
}
template <class ElemType>
CPUMatrix<ElemType>::CPUMatrix(const size_t numRows, const size_t numCols)
{
ZeroInit();
m_numRows = numRows;
m_numCols = numCols;
SetSizeAllocated(GetNumElements());
if (GetNumElements() != 0)
{
SetBuffer(NewArray<ElemType>(GetNumElements()), GetNumElements() * sizeof(ElemType));
}
}
template <class ElemType>
CPUMatrix<ElemType>::CPUMatrix(const size_t numRows, const size_t numCols, ElemType* pArray, const size_t matrixFlags)
{
ZeroInit();
SetValue(numRows, numCols, pArray, matrixFlags);
}
//copy constructor, deep copy
template <class ElemType>
CPUMatrix<ElemType>::CPUMatrix(const CPUMatrix<ElemType>& deepCopyFrom)
{
ZeroInit();
SetValue(deepCopyFrom);
}
//assignment operator, deep copy
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::operator=(const CPUMatrix<ElemType>& deepCopyFrom)
{
SetValue(deepCopyFrom);
return *this;
}
//move constructor, shallow copy
template <class ElemType>
CPUMatrix<ElemType>::CPUMatrix(CPUMatrix<ElemType>&& moveFrom)
: Base(/* shallow */ true)
{
ShallowCopyFrom(moveFrom);
moveFrom.ZeroValues();
}
// Shortcut of default constructor + shallow copy, to avoid one initialization
template <class ElemType>
CPUMatrix<ElemType>::CPUMatrix(const CPUMatrix<ElemType>& shallowCopyFrom, bool shallow)
: Base(shallow)
{
ShallowCopyFrom(shallowCopyFrom);
}
//move assignment operator, shallow copy
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::operator=(CPUMatrix<ElemType>&& moveFrom)
{
if (this != &moveFrom)
{
ShallowCopyFrom(moveFrom);
// release the pointer from the source object so that the destructor won't release it twice
moveFrom.ZeroValues();
}
return *this;
}
template <class ElemType>
void CPUMatrix<ElemType>::Clear()
{
ZeroInit();
}
#pragma endregion Constructors and Destructor
#pragma region Basic Operators
template <class ElemType>
CPUMatrix<ElemType> CPUMatrix<ElemType>::ColumnSlice(size_t startColumn, size_t numCols) const
{
if (startColumn + numCols > m_numCols)
InvalidArgument("The slice (%d+%d) is out of range of the source matrix (%d).", (int) startColumn, (int) numCols, (int) m_numCols);
CPUMatrix<ElemType> slice(*this, /* shallow= */ true);
slice.m_numCols = numCols;
slice.m_sliceViewOffset = m_sliceViewOffset + startColumn * m_numRows;
return slice;
}
// set this(:, 0:numCols-1) = fromMatrix(:, startColumn : startColumn+numCols-1)
// TODO: why not say *this = ColumnSlice()?
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignColumnSlice(const CPUMatrix<ElemType>& fromMatrix, size_t startColumn, size_t numCols)
{
if (startColumn + numCols > fromMatrix.m_numCols)
InvalidArgument("The slice (%d+%d) is out of range of the source matrix (%d).", (int) startColumn, (int) numCols, (int) fromMatrix.m_numCols);
Clear();
ShallowCopyFrom(fromMatrix);
m_numCols = numCols;
m_sliceViewOffset = fromMatrix.m_sliceViewOffset + startColumn * m_numRows;
return *this;
}
// set this(: , startColumn:startColumn+numCols-1)= fromMatrix;
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::SetColumnSlice(const CPUMatrix<ElemType>& fromMatrix, size_t startColumn, size_t numCols)
{
if (startColumn + numCols > m_numCols)
LogicError("The slice is out of range of the destination matrix.");
if (numCols > fromMatrix.GetNumCols())
InvalidArgument("The slice (%d) is out of range of the source matrix (%d).", (int) numCols, (int) fromMatrix.GetNumCols());
if (m_numRows != fromMatrix.m_numRows)
LogicError("The number of rows in source and destination matrices do not match");
memcpy(Data() + startColumn * m_numRows, fromMatrix.Data(), numCols * m_numRows * sizeof(ElemType));
return *this;
}
template <class ElemType>
void CPUMatrix<ElemType>::CopyColumnsStrided(const CPUMatrix<ElemType>& fromMatrix, size_t numCols, size_t srcNumColsStride, size_t destNumColsStride)
{
if ((((numCols - 1) * srcNumColsStride) + 1) > fromMatrix.m_numCols)
LogicError("The numCols to copy and srcNumColsStride specified is out of range of the source matrix.");
if ((((numCols - 1) * destNumColsStride) + 1) > m_numCols)
LogicError("The numCols to copy and srcNumColsStride specified is out of range of the destination matrix.");
if (m_numRows != fromMatrix.m_numRows)
LogicError("The number of rows in source and destination matrices do not match");
long n = (long) numCols, m = (long) m_numRows;
auto& us = *this;
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (size_t i = 0; i < (m & ~3); i += 4)
{
us(i, j * destNumColsStride) = fromMatrix(i, j * srcNumColsStride);
us(i + 1, j * destNumColsStride) = fromMatrix(i + 1, j * srcNumColsStride);
us(i + 2, j * destNumColsStride) = fromMatrix(i + 2, j * srcNumColsStride);
us(i + 3, j * destNumColsStride) = fromMatrix(i + 3, j * srcNumColsStride);
}
// handle remaining
for (size_t i = m & ~3; i < m; i++)
{
us(i, j * destNumColsStride) = fromMatrix(i, j * srcNumColsStride);
}
}
}
//for each column of a, we add all rows of a to this starting from startIndex
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignToRowSliceValuesOf(const CPUMatrix<ElemType>& a, const size_t startIndex, const size_t numRows)
{
if (a.GetNumRows() != numRows)
LogicError("AddToRowSliceValuesOf: a.GetNumRows() != numRows.");
if (startIndex + numRows > GetNumRows())
LogicError("AddToRowSliceValuesOf: startIndex + numRows exceeds GetNumRows().");
if (a.GetNumCols() != GetNumCols())
LogicError("AddToRowSliceValuesOf: columns does not match.");
long n = (long) a.GetNumCols(), m = (long) numRows;
auto& us = *this;
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (size_t i = 0, startRow = startIndex; i < (m & ~3); i += 4, startRow += 4)
{
us(startRow, j) = a(i, j);
us(startRow + 1, j) = a(i + 1, j);
us(startRow + 2, j) = a(i + 2, j);
us(startRow + 3, j) = a(i + 3, j);
}
// handle remaining stuffs
for (size_t i = m & ~3, startRow = startIndex + (m & ~3); i < m; i++, startRow++)
{
us(startRow, j) = a(i, j);
}
}
return *this;
}
//for each column of a, we assign numRows starting from startIndex to this
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignRowSliceValuesOf(const CPUMatrix<ElemType>& a, const size_t startIndex, const size_t numRows)
{
if (startIndex + numRows > a.GetNumRows())
LogicError("AssignRowSliceValuesOf: startIndex + numRows exceeds a.GetNumRows().");
RequireSize(numRows, a.GetNumCols());
long n = (long) a.GetNumCols(); // note: OpenMP requires loop indices to be long, not size_t
long k = (long) a.GetNumRows();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// memory copy might be faster?
memcpy(Data() + j * numRows, a.Data() + j * k + startIndex, sizeof(ElemType) * numRows);
// //four-way unrolling
// for (long i=0, startRow = startIndex; i<(m & ~3); i+=4, startRow+=4)
// {
// us(i,j) = a(startRow,j);
// us(i+1,j) = a(startRow+1,j);
// us(i+2,j) = a(startRow+2,j);
// us(i+3,j) = a(startRow+3,j);
// }
// //handle remaining stuffs
// for (long i=m & ~3, startRow = startIndex+(m & ~3); i<m; i++, startRow++)
// {
// us(i,j) = a(startRow,j);
// }
}
return *this;
}
//for the row slice of this starting from startIndex we add a to it.
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AddToRowSliceValuesOf(const CPUMatrix<ElemType>& a, const size_t startIndex, const size_t numRows)
{
if (a.IsEmpty())
LogicError("AddToRowSliceValuesOf: input matrix a is empty.");
if (a.GetNumRows() != numRows)
LogicError("AddToRowSliceValuesOf: a.GetNumRows() != numRows.");
if (startIndex + numRows > GetNumRows())
LogicError("AddToRowSliceValuesOf: startIndex + numRows exceeds GetNumRows().");
if (a.GetNumCols() != GetNumCols())
LogicError("AddToRowSliceValuesOf: columns does not match.");
long n = (long) a.GetNumCols(), m = (long) numRows;
auto& us = *this;
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0, startRow = (long) startIndex; i < (m & ~3); i += 4, startRow += 4)
{
us(startRow, j) += a(i, j);
us(startRow + 1, j) += a(i + 1, j);
us(startRow + 2, j) += a(i + 2, j);
us(startRow + 3, j) += a(i + 3, j);
}
// handle remaining stuffs
for (long i = m & ~3, startRow = (long) startIndex + (m & ~3); i < m; i++, startRow++)
{
us(startRow, j) += a(i, j);
}
}
return *this;
}
//for each column of this, we add row slice of a starting from startIndex
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AddWithRowSliceValuesOf(const CPUMatrix<ElemType>& a, const size_t startIndex, const size_t numRows)
{
if (a.IsEmpty())
LogicError("AddWithRowSliceValuesOf: input matrix a is empty.");
if (GetNumRows() != numRows)
LogicError("AddWithRowSliceValuesOf: GetNumRows() != numRows.");
if (startIndex + numRows > a.GetNumRows())
LogicError("AddWithRowSliceValuesOf: startIndex + numRows exceeds a.GetNumRows().");
if (a.GetNumCols() != GetNumCols())
LogicError("AddWithRowSliceValuesOf: columns does not match.");
long n = (long) a.GetNumCols(), m = (long) numRows;
auto& us = *this;
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0, startRow = (long) startIndex; i < (m & ~3); i += 4, startRow += 4)
{
us(i, j) += a(startRow, j);
us(i + 1, j) += a(startRow + 1, j);
us(i + 2, j) += a(startRow + 2, j);
us(i + 3, j) += a(startRow + 3, j);
}
// handle remaining stuffs
for (long i = m & ~3, startRow = (long) startIndex + (m & ~3); i < m; i++, startRow++)
{
us(i, j) += a(startRow, j);
}
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType> CPUMatrix<ElemType>::Diagonal() const
{
if (m_numRows != m_numCols)
LogicError("Diagonal can be called only for square matrix. (rows=%d, cols=%d)", (int) m_numRows, (int) m_numCols);
CPUMatrix<ElemType> diag(1, m_numCols);
auto& us = *this;
#pragma omp parallel for
for (long i = 0; i < m_numRows; i++)
{
diag(0, (size_t) i) = us(i, i);
}
return diag;
}
template <class ElemType>
void CPUMatrix<ElemType>::MinusOneAt(CPUMatrix<ElemType>& c, const size_t position)
{
if (position < c.GetNumElements())
c.Data()[position] -= 1.0;
else
RuntimeError("MinusOneAt: position is out of CPU matrix size");
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignRepeatOf(const CPUMatrix<ElemType>& a, const size_t numRowRepeats, const size_t numColRepeats)
{
if (this == &a)
LogicError("AssignRepeatOf: a is the same as [this]. Does not support inplace repeat.");
if (a.IsEmpty())
LogicError("AssignRepeatOf: Matrix a is empty.");
RequireSize(a.GetNumRows() * numRowRepeats, a.GetNumCols() * numColRepeats);
long n = (long) a.GetNumCols(), m = (long) a.GetNumRows();
auto& us = *this;
#pragma omp parallel for
for (long q = 0; q < numColRepeats; q++)
{
for (long p = 0; p < numRowRepeats; p++)
{
long colOffset = q * n;
for (long j = 0; j < n; j++, colOffset++)
{
long rowOffset = p * m;
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4, rowOffset += 4)
{
us(rowOffset, colOffset) = a(i, j);
us(rowOffset + 1, colOffset) = a(i + 1, j);
us(rowOffset + 2, colOffset) = a(i + 2, j);
us(rowOffset + 3, colOffset) = a(i + 3, j);
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++, rowOffset++)
{
us(rowOffset, colOffset) = a(i, j);
}
}
}
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AddToRowRepeatValuesOf(const CPUMatrix<ElemType>& a, const size_t numRepeats)
{
if (a.IsEmpty())
LogicError("AddToRowRepeatValuesOf: input matrix a is empty.");
if (a.GetNumRows() != GetNumRows() * numRepeats)
LogicError("AddToRowRepeatValuesOf: a.GetNumRows() != GetNumRows() * numRepeats.");
long n = (long) a.GetNumCols(), m = (long) GetNumRows();
auto& us = *this;
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
for (long k = 0; k < numRepeats; k++)
{
us(i, j) += a(k * m + i, j);
us(i + 1, j) += a(k * m + i + 1, j);
us(i + 2, j) += a(k * m + i + 2, j);
us(i + 3, j) += a(k * m + i + 3, j);
}
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
for (long k = 0; k < numRepeats; k++)
{
us(i, j) += a(k * m + i, j);
}
}
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignPositiveAndShiftedNegSample(const CPUMatrix<ElemType>& a, const size_t posNumber, const size_t negNumber, const size_t shiftNumber)
{
a;
posNumber;
negNumber;
shiftNumber;
NOT_IMPLEMENTED;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AddFoldedPositiveAndShiftedNegSample(const CPUMatrix<ElemType>& a, const size_t posNumber, const size_t negNumber, const size_t shiftNumber)
{
a;
posNumber;
negNumber;
shiftNumber;
NOT_IMPLEMENTED;
}
template <class ElemType>
CPUMatrix<ElemType> CPUMatrix<ElemType>::Transpose()
{
if (IsEmpty())
LogicError("Transpose: Matrix is empty.");
CPUMatrix<ElemType> c;
c.AssignTransposeOf(*this);
return c;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignTransposeOf(const CPUMatrix<ElemType>& a)
{
if (this == &a)
LogicError("AssignTransposeOf: a is the same as [this]. Does not support inplace transpose.");
if (a.IsEmpty())
LogicError("AssignTransposeOf: Matrix a is empty.");
RequireSize(a.GetNumCols(), a.GetNumRows());
long n = (long) a.GetNumCols(), m = (long) a.GetNumRows();
auto& us = *this;
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(j, i) = a(i, j);
us(j, i + 1) = a(i + 1, j);
us(j, i + 2) = a(i + 2, j);
us(j, i + 3) = a(i + 3, j);
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(j, i) = a(i, j);
}
}
return *this;
}
// dst[i] = src[i] * alpha + dst[i] * beta
// scale a column vector and add it to another
// The usual special case: If beta = 0, then dst[] is not read, and may be uninitialized or NaN.
template <class ElemType>
static void ScaleAndAddColumn(ElemType beta, ElemType* dst, const ElemType* src, size_t numRows, ElemType alpha)
{
if (alpha != 1) // rare case: just do the full thing
for (size_t i = 0; i < numRows; i++)
dst[i] = beta * dst[i] + alpha * src[i];
else if (beta == 1) // used in backprop
for (size_t i = 0; i < numRows; i++)
dst[i] += src[i];
else if (beta == 0) // plain assignment
memcpy(dst, src, sizeof(ElemType) * numRows);
else // alpha=1, arbitrary beta: also rare case
for (size_t i = 0; i < numRows; i++)
dst[i] = beta * dst[i] + src[i];
}
// *this[:,j] = a[:,idx[j]] * alpha + *this[:,j] * beta
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::DoGatherColumnsOf(ElemType beta, const CPUMatrix<ElemType>& idx, const CPUMatrix<ElemType>& a, ElemType alpha)
{
if (idx.GetNumRows() != 1) // index is 1-dimensional only
InvalidArgument("DoGatherColumnsOf: Map must be a row vector.");
if (beta)
VerifySize(a.GetNumRows(), idx.GetNumCols());
else
Resize(a.GetNumRows(), idx.GetNumCols());
auto& us = *this;
// race-condition consideration: Since this loops over independent output columns, this has no race condition. Cf. DoScatterColumnsOf().
#pragma omp parallel for // TODO: Depending in circumstance, it may be more efficient to parallelize over rows.
foreach_column(jOut, us)
{
auto jInF = idx(0, jOut); // this is the column we need to get
if (std::isnan(jInF) || jInF < 0) // negative index means gap
continue;
size_t jIn = (size_t)jInF;
if (jIn >= a.GetNumCols())
InvalidArgument("DoGatherColumnsOf: Map out of bounds. %ld >= %ld", (long int)jIn, (long int)a.GetNumCols());
ScaleAndAddColumn(beta, &us(0,jOut), &a(0,jIn), us.GetNumRows(), alpha);
}
return *this;
}
// *this[:,idx[j]] = a[:,j] * alpha + *this[:,idx[j]] * beta
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::DoScatterColumnsOf(ElemType beta, const CPUMatrix<ElemType>& idx, const CPUMatrix<ElemType>& a, ElemType alpha)
{
if (idx.GetNumRows() != 1) // index is 1-dimensional only
InvalidArgument("DoScatterColumnsOf: Map must be a row vector.");
if (idx.GetNumCols() != a.GetNumCols())
InvalidArgument("DoScatterColumnsOf: Map must have width of input vector.");
if (a.GetNumRows() != GetNumRows())
InvalidArgument("DoScatterColumnsOf: Output must have same height as input vector.");
auto& us = *this;
// pre-scale with beta upfront
// Scatter may add more than one source column to the same target, so we must pre-scale with beta, and then just keep adding.
Scale(beta, us); // if beta is 0, then this will be a memset()
ScatterValues(idx.Data(), a.Data(), us.Data(), alpha, idx.GetNumCols(), a.GetNumRows(), GetNumCols(), idx.GetNumRows());
return *this;
}
template <class ElemType>
void CPUMatrix<ElemType>::SetValue(const ElemType v)
{
if (IsEmpty())
LogicError("SetValue: Matrix is empty.");
bool isFinite = std::numeric_limits<ElemType>::is_integer || std::isfinite((double) v);
if (isFinite && v == 0)
{
memset(Data(), 0, sizeof(ElemType) * GetNumElements());
}
else
{
ElemType* bufPtr = Data();
long m = (long) GetNumElements();
// 2-way thread parallelism is sufficient for the memory bound
// operation of just setting the values of an array.
const unsigned SETVALUE_NUM_THREADS = 2;
UNUSED(SETVALUE_NUM_THREADS); // in case OMP is turned off.
#pragma omp parallel for num_threads(SETVALUE_NUM_THREADS)
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
bufPtr[i] = v;
bufPtr[i + 1] = v;
bufPtr[i + 2] = v;
bufPtr[i + 3] = v;
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
bufPtr[i] = v;
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::MaskColumnsValue(const CPUMatrix<char>& columnsMask, ElemType val, size_t numColsPerMaskEntry)
{
if (GetNumCols() != (columnsMask.GetNumCols() * numColsPerMaskEntry))
RuntimeError("MaskColumnsValue: Matrix number of columns must equal 'column mask number of columns * numColsPerMaskEntry'.");
auto& us = *this;
long n = (long)columnsMask.GetNumCols(), m = (long) GetNumRows();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
if (columnsMask(0, j) == 1)
continue;
for (long k = 0; k < numColsPerMaskEntry; ++k)
{
// four-way unrolling
for (size_t i = 0; i < (m & ~3); i += 4)
{
us(i, (j * numColsPerMaskEntry) + k) = val;
us(i + 1, (j * numColsPerMaskEntry) + k) = val;
us(i + 2, (j * numColsPerMaskEntry) + k) = val;
us(i + 3, (j * numColsPerMaskEntry) + k) = val;
}
// handle remaining
for (size_t i = m & ~3; i < m; i++)
{
us(i, (j * numColsPerMaskEntry) + k) = val;
}
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::SetColumn(const ElemType* colPointer, size_t j)
{
if (IsEmpty())
LogicError("SetColumn: Matrix is empty.");
if (colPointer == NULL)
return;
auto& us = *this;
long m = (long) GetNumRows();
#pragma omp parallel for
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, j) = colPointer[i];
us(i + 1, j) = colPointer[i + 1];
us(i + 2, j) = colPointer[i + 2];
us(i + 3, j) = colPointer[i + 3];
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, j) = colPointer[i];
}
}
template <class ElemType>
void CPUMatrix<ElemType>::SetColumn(const ElemType val, size_t j)
{
if (IsEmpty())
LogicError("SetColumn: Matrix is empty.");
auto& us = *this;
long m = (long) GetNumRows();
#pragma omp parallel for
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, j) = val;
us(i + 1, j) = val;
us(i + 2, j) = val;
us(i + 3, j) = val;
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, j) = val;
}
}
template <class ElemType>
void CPUMatrix<ElemType>::SetColumn(const CPUMatrix<ElemType>& valMat, size_t j)
{
if (IsEmpty())
LogicError("SetColumn: Matrix is empty.");
if (valMat.GetNumRows() != GetNumRows() || valMat.GetNumCols() != 1)
LogicError("The valMat matrix has incorrect number of rows or columns.");
auto& us = *this;
long m = (long) GetNumRows();
#pragma omp parallel for
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, j) = valMat(i, 0);
us(i + 1, j) = valMat(i + 1, 0);
us(i + 2, j) = valMat(i + 2, 0);
us(i + 3, j) = valMat(i + 3, 0);
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, j) = valMat(i, 0);
}
}
template <class ElemType>
void CPUMatrix<ElemType>::SetValue(const CPUMatrix<ElemType>& deepCopyFrom)
{
if (this == &deepCopyFrom)
return;
SetValue(deepCopyFrom.GetNumRows(), deepCopyFrom.GetNumCols(), deepCopyFrom.Data(), 0);
}
#if 0
template <class ElemType>
void CPUMatrix<ElemType>::SetValue(const GPUMatrix<ElemType>& /*deepCopyFrom*/)
{
NOT_IMPLEMENTED;
}
template <class ElemType>
void CPUMatrix<ElemType>::SetValue(const CPUSparseMatrix<ElemType>& deepCopyFrom)
{
deepCopyFrom.AssignColumnSliceToDense(*this, 0, deepCopyFrom.GetNumCols());
}
template <class ElemType>
void CPUMatrix<ElemType>::SetValue(const GPUSparseMatrix<ElemType>& /*deepCopyFrom*/)
{
NOT_IMPLEMENTED;
}
#endif
template <class ElemType>
void CPUMatrix<ElemType>::SetValue(const size_t numRows, const size_t numCols, ElemType* pArray, const size_t matrixFlags)
{
if (pArray == nullptr && numRows * numCols > 0)
InvalidArgument("Invalid pArray. pArray == nullptr, but matrix is of size %d * %d = %d.", (int)numRows, (int)numCols, (int)(numRows * numCols));
SetFormat(matrixFormatDense);
SetComputeDeviceId(CPUDEVICE);
// if it's externally managed, then populate the structure
if (matrixFlags & matrixFlagDontOwnBuffer)
{
// free previous array allocation if any before overwriting
delete[] Buffer();
m_numRows = numRows;
m_numCols = numCols;
SetBuffer(pArray, GetNumElements() * sizeof(ElemType), true);
SetSizeAllocated(GetNumElements());
}
else
{
RequireSize(numRows, numCols);
if (!IsEmpty())
{
if (!(matrixFlags & matrixFormatRowMajor)) // compatible to internal structure
memcpy(Data(), pArray, GetNumElements() * sizeof(ElemType));
else // need to transpose
{
ElemType* bufPtr = Data();
auto& us = *this;
if (sizeof(ElemType) == sizeof(double))
{
#pragma omp parallel for
foreach_column (j, us)
{
cblas_dcopy((int) numRows, reinterpret_cast<double*>(pArray + j), (int) numCols, reinterpret_cast<double*>(bufPtr + LocateColumn(j)), 1);
}
}
else
{
#pragma omp parallel for
foreach_column (j, us)
{
{
#pragma warning(suppress : 4244)
cblas_scopy((int) numRows, reinterpret_cast<float*>(pArray + j), (int) numCols, reinterpret_cast<float*>(bufPtr + LocateColumn(j)), 1);
}
}
}
}
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::SetDiagonalValue(const ElemType v)
{
if (GetNumRows() != GetNumCols())
LogicError("SetDiagonalValue: NumRows and NumCols do not agree.");
auto& us = *this;
long m = (long) GetNumRows();
#pragma omp parallel for
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, i) = v;
us(i + 1, i + 1) = v;
us(i + 2, i + 2) = v;
us(i + 3, i + 3) = v;
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, i) = v;
}
}
template <class ElemType>
void CPUMatrix<ElemType>::SetDiagonalValue(const CPUMatrix<ElemType>& vector)
{
if (IsEmpty() || vector.IsEmpty())
LogicError("SetDiagonalValue: Matrix is empty.");
if (GetNumRows() != GetNumCols())
LogicError("SetDiagonalValue: NumRows and NumCols do not agree.");
if (vector.GetNumRows() != 1 && vector.GetNumCols() != 1)
LogicError("SetDiagonalValue: input vector must be a vector.");
if (vector.GetNumElements() == 1) // reduce to simple form
SetDiagonalValue(vector(0, 0));
else if (vector.GetNumRows() != GetNumRows() && vector.GetNumCols() != GetNumRows())
LogicError("SetDiagonalValue: input vector's dimension does not agree with [this].");
else
{
auto& us = *this;
long m = (long) GetNumRows();
if (vector.GetNumRows() == 1) // row vector
{
#pragma omp parallel for
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, i) = vector(0, i);
us(i + 1, i + 1) = vector(0, i + 1);
us(i + 2, i + 2) = vector(0, i + 2);
us(i + 3, i + 3) = vector(0, i + 3);
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, i) = vector(0, i);
}
}
else
{
#pragma omp parallel for
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, i) = vector(i, 0);
us(i + 1, i + 1) = vector(i + 1, 0);
us(i + 2, i + 2) = vector(i + 2, 0);
us(i + 3, i + 3) = vector(i + 3, 0);
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, i) = vector(i, 0);
}
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::SetUniformRandomValue(const ElemType low, const ElemType high, unsigned long seed)
{
if (IsEmpty())
LogicError("SetUniformRandomValue: Matrix is empty.");
std::mt19937_64 generator;
generator.seed(seed == USE_TIME_BASED_SEED ? (unsigned long) time(NULL) : seed);
boost::random::uniform_real_distribution<ElemType> r(low, high);
ElemType* bufPtr = Data();
long m = (long) GetNumElements();
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
bufPtr[i] = r(generator);
bufPtr[i + 1] = r(generator);
bufPtr[i + 2] = r(generator);
bufPtr[i + 3] = r(generator);
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
bufPtr[i] = r(generator);
}
}
template <class ElemType>
void CPUMatrix<ElemType>::SetUniformRandomValue(RNGHandle& rngHandle, const ElemType low, const ElemType high)
{
if (IsEmpty())
LogicError("SetUniformRandomValue: Matrix is empty.");
CPURNGHandle* cpuRNGHandle = dynamic_cast<CPURNGHandle*>(&rngHandle);
if (cpuRNGHandle == nullptr)
LogicError("rngHandle must be a CPURNGHandle.");
boost::random::uniform_real_distribution<ElemType> r(low, high);
std::generate(Data(), Data() + GetNumElements(), [&cpuRNGHandle, &r]() {return r(cpuRNGHandle->Generator()); });
}
template <class ElemType>
void CPUMatrix<ElemType>::SetGaussianRandomValue(RNGHandle& rngHandle, const ElemType mean, const ElemType stdev)
{
if (IsEmpty())
LogicError("SetGaussianRandomValue: Matrix is empty.");
CPURNGHandle* cpuRNGHandle = dynamic_cast<CPURNGHandle*>(&rngHandle);
if (cpuRNGHandle == nullptr)
LogicError("rngHandle must be a CPURNGHandle.");
boost::random::normal_distribution<ElemType> r(mean, stdev);
auto n = AsMultipleOf(GetNumElements(), 2);
std::generate(Data(), Data() + n, [&cpuRNGHandle, &r]() {return r(cpuRNGHandle->Generator()); });
}
template <class ElemType>
void CPUMatrix<ElemType>::SetGumbelRandomValue(RNGHandle& rngHandle, const ElemType loc, const ElemType scale)
{
if (IsEmpty())
LogicError("SetGumbelRandomValue: Matrix is empty.");
CPURNGHandle* cpuRNGHandle = dynamic_cast<CPURNGHandle*>(&rngHandle);
if (cpuRNGHandle == nullptr)
LogicError("rngHandle must be a CPURNGHandle.");
boost::random::uniform_real_distribution<ElemType> r(0, 1);
std::generate(Data(), Data() + GetNumElements(), [&cpuRNGHandle, &r, loc, scale]() {return loc - scale * log(-log1p(-r(cpuRNGHandle->Generator()))); });
}
template <class ElemType>
void CPUMatrix<ElemType>::SetGaussianRandomValue(const ElemType mean, const ElemType sigma, unsigned long seed)
{
if (sigma <= 0)
InvalidArgument("SetGaussianRandomValue: sigma must be a positive value.");
if (IsEmpty())
LogicError("SetGaussianRandomValue: Matrix is empty.");
auto& us = *this;
std::mt19937_64 generator(seed == USE_TIME_BASED_SEED ? (unsigned long) time(NULL) : seed);
boost::random::normal_distribution<ElemType> r(mean, sigma);
// #pragma omp parallel for is not thread safe. Also the results would not be deterministic
foreach_coord (i, j, us)
{
us(i, j) = r(generator);
}
}
template <class ElemType>
void CPUMatrix<ElemType>::SetTruncatedNormalRandomValue(const ElemType mean, const ElemType sigma, unsigned long seed)
{
if (sigma <= 0)
InvalidArgument("SetTruncatedNormalRandomValue: sigma must be a positive value.");
if (IsEmpty())
LogicError("SetTruncatedNormalRandomValue: Matrix is empty.");
auto& us = *this;
std::mt19937_64 generator(seed == USE_TIME_BASED_SEED ? (unsigned long)time(NULL) : seed);
boost::random::normal_distribution<ElemType> r(mean, sigma);
const ElemType high = mean + 2 * sigma;
const ElemType low = mean - 2 * sigma;
// #pragma omp parallel for is not thread safe. Also the results would not be deterministic
foreach_coord(i, j, us)
{
ElemType tmp = 0;
do
tmp = r(generator);
while (tmp < low || tmp > high ); // Rejection sampling is fine here because the acceptance probability is about 0.9545
us(i, j) = tmp;
}
}
template <class ElemType>
void CPUMatrix<ElemType>::AddGaussianRandomValue(const ElemType mean, const ElemType sigma, unsigned long seed)
{
if (sigma <= 0)
InvalidArgument("SetUniformRandomValue: sigma must be a positive value.");
if (IsEmpty())
LogicError("SetUniformRandomValue: Matrix is empty.");
auto& us = *this;
std::mt19937_64 generator;
generator.seed(seed == USE_TIME_BASED_SEED ? (unsigned long) time(NULL) : seed);
boost::random::normal_distribution<ElemType> r(mean, sigma);
long m = (long) GetNumRows(), n = (long) GetNumCols();
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, j) = r(generator);
us(i + 1, j) = r(generator);
us(i + 2, j) = r(generator);
us(i + 3, j) = r(generator);
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, j) = r(generator);
}
}
}
//maskRate: percentage of values masked out (similar to dropout rate)
//scaleValue: which scale value to set to the left ones (unmasked items).
template <class ElemType>
void CPUMatrix<ElemType>::SetUniformRandomMask(const ElemType maskRate, const ElemType scaleValue, RNGHandle& rngHandle)
{
if (IsEmpty())
LogicError("SetUniformRandomValue: Matrix is empty.");
CPURNGHandle* cpuRNGHandle = dynamic_cast<CPURNGHandle*>(&rngHandle);
if (cpuRNGHandle == nullptr)
LogicError("rngHandle must be a CPURNGHandle.");
auto& us = *this;
boost::random::uniform_real_distribution<ElemType> r(0, 1);
long m = (long) GetNumRows(), n = (long) GetNumCols();
ElemType v;
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
v = r(cpuRNGHandle->Generator());
us(i, j) = v <= maskRate ? 0 : scaleValue;
v = r(cpuRNGHandle->Generator());
us(i + 1, j) = v <= maskRate ? 0 : scaleValue;
v = r(cpuRNGHandle->Generator());
us(i + 2, j) = v <= maskRate ? 0 : scaleValue;
v = r(cpuRNGHandle->Generator());
us(i + 3, j) = v <= maskRate ? 0 : scaleValue;
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
v = r(cpuRNGHandle->Generator());
us(i, j) = v <= maskRate ? 0 : scaleValue;
}
}
}
template <class ElemType>
ElemType CPUMatrix<ElemType>::Adagrad(CPUMatrix<ElemType>& gradients, const bool needAveMultiplier)
{
ElemType aveMultiplier = 0;
if (IsEmpty() || gradients.GetNumCols() != GetNumCols() || gradients.GetNumRows() != GetNumRows())
{
RequireSize(gradients.GetNumRows(), gradients.GetNumCols());
SetValue(0.0);
}
if (GetNumRows() != gradients.GetNumRows() || GetNumCols() != gradients.GetNumCols())
LogicError("The matrix gradients must have the same rows and columns as this matrix.");
ElemType *a = Data(), *d_v = gradients.Data();
size_t n = GetNumElements();
const ElemType floor = 1e-16f;
ElemType a0, a1, a2, a3;
// disable omp here because aveMultiper needs to be added atomically. however, it seems the result is incorrect even if rmp atomic and amp critical are used.
// #pragma omp parallel for
for (long i = 0; i < (n & ~3); i += 4) // four-way unrolling
{
a[i] += d_v[i] * d_v[i];
a[i + 1] += d_v[i + 1] * d_v[i + 1];
a[i + 2] += d_v[i + 2] * d_v[i + 2];
a[i + 3] += d_v[i + 3] * d_v[i + 3];
a0 = sqrt(a[i] + floor);
a1 = sqrt(a[i + 1] + floor);
a2 = sqrt(a[i + 2] + floor);
a3 = sqrt(a[i + 3] + floor);
d_v[i] /= a0;
d_v[i + 1] /= a1;
d_v[i + 2] /= a2;
d_v[i + 3] /= a3;
if (needAveMultiplier)
{
aveMultiplier += 1 / a0 + 1 / a1 + 1 / a2 + 1 / a3;
}
}
// get the last few elements if any
for (long i = n & ~3; i < n; i++)
{
a[i] += d_v[i] * d_v[i];
a0 = sqrt(a[i] + floor);
d_v[i] /= a0;
if (needAveMultiplier)
{
aveMultiplier += 1 / a0;
}
}
if (needAveMultiplier && n > 0)
return aveMultiplier / n;
else
return 1;
}
template <class ElemType>
void CPUMatrix<ElemType>::FSAdagrad(CPUMatrix<ElemType>& gradients,
CPUMatrix<ElemType>& functionValues,
ElemType learnRatePerSample,
ElemType momentum,
ElemType adaWeight,
ElemType adaMul,
bool unitGainMomentum)
{
auto unitGainFactor = ElemType(unitGainMomentum ? (1.0 - momentum) : 1.0);
size_t numColsNeeded = 2 * gradients.GetNumCols();
if (IsEmpty() || (GetNumCols() < numColsNeeded))
{
RequireSize(gradients.GetNumRows(), numColsNeeded);
SetValue(0.0);
}
if (GetNumRows() != gradients.GetNumRows() || GetNumCols() != numColsNeeded)
LogicError("The matrix gradients does not have expected dimensions.");
size_t n = gradients.GetNumElements();
ElemType* grad = gradients.Data();
ElemType* smoothAda = Data();
ElemType* smoothMom = Data() + n;
ElemType* val = functionValues.Data();
#pragma omp parallel for
// TODO: Unroll 4-times for better performance leveraging vectorization
for (long i = 0; i < n; i++)
{
ElemType g = grad[i];
ElemType adaSqr = adaWeight * smoothAda[i] + (1.0f - adaWeight) * g * g;
smoothAda[i] = adaSqr;
if (adaSqr != 0.0f)
{
ElemType ada = sqrt(adaSqr);
ElemType w = adaMul * ((ElemType) 1.0 / ada);
if (w > 10.0f)
w = 10.0f;
g *= w;
}
if (momentum > 0.0f)
{
g = momentum * smoothMom[i] + unitGainFactor * g;
smoothMom[i] = g;
}
g *= learnRatePerSample;
val[i] -= g;
}
}
template <class ElemType>
void CPUMatrix<ElemType>::Adam(CPUMatrix<ElemType>& gradients, CPUMatrix<ElemType>& functionValues, ElemType learnRatePerSample,
ElemType momentum, ElemType adaWeight, ElemType adaMul, ElemType epsilon, bool unitGainMomentum, bool adamax)
{
size_t numColsNeeded = 2 * gradients.GetNumCols();
auto unitGainFactor = ElemType(unitGainMomentum ? (1.0 - momentum) : 1.0);
if (IsEmpty() || (GetNumCols() < numColsNeeded))
{
RequireSize(gradients.GetNumRows(), numColsNeeded);
SetValue(0.0);
}
if (GetNumRows() != gradients.GetNumRows() || GetNumCols() != numColsNeeded)
LogicError("The matrix gradients does not have expected dimensions.");
size_t n = gradients.GetNumElements();
ElemType* grad = gradients.Data();
ElemType* smoothAda = Data();
ElemType* smoothMom = Data() + n;
ElemType* val = functionValues.Data();
#pragma omp parallel for
// TODO: Unroll 4-times for better performance leveraging vectorization
for (long i = 0; i < n; i++)
{
ElemType g = grad[i];
ElemType ada;
if (!adamax)
{
ElemType adaSqr = adaWeight * smoothAda[i] + (1.0f - adaWeight) * g * g;
smoothAda[i] = adaSqr;
ada = sqrt(adaSqr);
}
else
ada = smoothAda[i] = std::max(adaWeight * smoothAda[i], abs(g));
ElemType w = adaMul * (ElemType)( 1.0 / (ada + epsilon));
g = momentum * smoothMom[i] + unitGainFactor * g;
smoothMom[i] = g;
val[i] -= g * w * learnRatePerSample;
}
}
template <class ElemType>
ElemType CPUMatrix<ElemType>::RmsProp(CPUMatrix<ElemType>& gradients,
ElemType RMS_GAMMA,
ElemType RMS_WGT_INC,
ElemType RMS_WGT_MAX,
ElemType RMS_WGT_DEC,
ElemType RMS_WGT_MIN,
const bool needAveMultiplier)
{
const ElemType floor = 1e-6f;
size_t n = gradients.GetNumElements();
ElemType* curr_grad = gradients.Data();
if (IsEmpty() || GetNumCols() < gradients.GetNumCols() * 3)
{
RequireSize(gradients.GetNumRows(), gradients.GetNumCols() * 3);
SetValue(0.0);
ElemType* avars = Data(); // accumulated variances for RMS scaling
ElemType* steps = Data() + 2 * n; // current step size
// initialize moving average of gradient-squared
for (long i = 0; i < n; i++)
avars[i] = curr_grad[i] * curr_grad[i];
// initialize starting step size
for (long i = 0; i < n; i++)
steps[i] = ElemType(0.02);
}
ElemType* avars = Data(); // accumulated variances for RMS scaling
ElemType* signs = Data() + n; // sign of previous gradient
ElemType* steps = Data() + 2 * n; // current step size
if (GetNumRows() != gradients.GetNumRows() || GetNumCols() != gradients.GetNumCols() * 3)
LogicError("The matrix gradients does not have expected dimensions.");
ElemType ONE_MINUS_GAMMA = ElemType(1.0) - RMS_GAMMA;
// int upd[] = {
// 2,2,0,
// 2,2,0,
// 1,1,1,
// 2,2,0,
// 1,2,1,
// 0,2,2,
// 1,1,1,
// 0,2,2,
// 0,2,2,
// };
// for (long i=0; i<n; i++)
// {
// avars[i] = RMS_GAMMA * avars[i] + ONE_MINUS_GAMMA * (curr_grad[i] * curr_grad[i]);
// // grad sign base 3: 0->neg, 1->zero, 2->pos
// const int grad_sign = 1 + (ElemType(0) < curr_grad[i]) - (curr_grad[i] < ElemType(0));
// // signs[i] contains three consecutive grad_sign
// signs[i] = 3*(int(signs[i]) % 9) + grad_sign;
// switch(upd[int(signs[i])])
// {
// case 0:
// steps[i] = max(steps[i] * RMS_WGT_DEC, RMS_WGT_MIN);
// break;
// case 2:
// steps[i] = min(steps[i] * RMS_WGT_INC, RMS_WGT_MAX);
// break;
// }
// curr_grad[i] *= steps[i] / sqrt(avars[i] + floor);
// }
ElemType aveMultiplier = 0, a;
for (long i = 0; i < n; i++)
{
avars[i] = RMS_GAMMA * avars[i] + ONE_MINUS_GAMMA * (curr_grad[i] * curr_grad[i]);
const int grad_sign = (ElemType(0) < curr_grad[i]) - (curr_grad[i] < ElemType(0));
if (signs[i] * grad_sign > 0)
steps[i] = std::min(steps[i] * RMS_WGT_INC, RMS_WGT_MAX);
else
steps[i] = std::max(steps[i] * RMS_WGT_DEC, RMS_WGT_MIN);
a = steps[i] / sqrt(avars[i] + floor);
curr_grad[i] *= a;
signs[i] = (ElemType) grad_sign;
if (needAveMultiplier)
aveMultiplier += a;
}
if (needAveMultiplier)
return aveMultiplier / n;
else
return 1;
}
template <class ElemType>
void CPUMatrix<ElemType>::AdaDelta(CPUMatrix<ElemType>& gradients, CPUMatrix<ElemType>& functionValues, ElemType learningRate, ElemType rho, ElemType epsilon)
{
size_t numColsNeeded = 2 * gradients.GetNumCols();
if (IsEmpty() || (GetNumCols() < numColsNeeded))
{
RequireSize(gradients.GetNumRows(), numColsNeeded);
SetValue(0.0);
}
if (GetNumRows() != gradients.GetNumRows() || GetNumCols() != numColsNeeded)
LogicError("The matrix gradients does not have expected dimensions.");
size_t n = gradients.GetNumElements();
ElemType* grad = gradients.Data();
ElemType* smoothAda = Data();
ElemType* smoothX2 = Data() + n;
ElemType* val = functionValues.Data();
#pragma omp parallel for
// TODO: Unroll 4-times for better performance leveraging vectorization
for (long i = 0; i < n; i++)
{
ElemType g = grad[i];
ElemType adaSqr = rho * smoothAda[i] + (1 - rho) * g * g;
smoothAda[i] = adaSqr;
ElemType x2 = smoothX2[i];
ElemType deltaX = -sqrt(x2 + epsilon) / sqrt(adaSqr + epsilon) * g;
smoothX2[i] = rho * smoothX2[i] + (1 - rho) * deltaX * deltaX;
val[i] += learningRate * deltaX;
}
}
template <class ElemType>
void CPUMatrix<ElemType>::Reshape(const size_t numRows, const size_t numCols)
{
if (numRows * numCols != GetNumElements())
InvalidArgument("Reshape: Total number of elements does not match.");
m_numRows = numRows;
m_numCols = numCols;
}
// RequireSize() -- Tests if the matrix is the right size. If not, resizes the matrix. This avoids the VerifyResizable check if we're already the right size.
template <class ElemType>
void CPUMatrix<ElemType>::RequireSize(const size_t numRows, const size_t numCols, bool growOnly /*=true*/)
{
if (GetNumRows() != numRows || GetNumCols() != numCols)
Resize(numRows, numCols, growOnly);
}
// Resize() -- change matrix size
// This function is cheap if the matrix size does not change.
// Current content is not preserved.
// If growOnly is true, resize will not reallocate memory if the current memory is large enough (i.e., will not shrink).
// If this object does not own its memory then new memory cannot be allocated (one can still shrink and/or reshape).
template <class ElemType>
void CPUMatrix<ElemType>::Resize(const size_t numRows, const size_t numCols, bool growOnly /*=true*/)
{
if (GetNumRows() == numRows && GetNumCols() == numCols)
return;
VerifyResizable(__func__);
size_t numElements = numRows * numCols;
if (numElements > GetSizeAllocated() || // grow allocation
(!growOnly && (numElements != GetSizeAllocated()))) // shrink allocation (not if 'growOnly')
{
// reallocate buffer
ElemType* pArray = nullptr;
if (numElements > 0)
{
pArray = NewArray<ElemType>(numElements);
}
// success: update the object
delete[] Buffer();
SetBuffer(pArray, numElements * sizeof(ElemType));
SetSizeAllocated(numElements);
}
// success
m_sliceViewOffset = 0;
m_numRows = numRows;
m_numCols = numCols;
}
// allocated by the callee but should be deleted by the caller
// TODO: change to use STL vector instead
template <class ElemType>
ElemType* CPUMatrix<ElemType>::CopyToArray() const
{
size_t numElements = GetNumElements();
if (numElements != 0)
{
ElemType* arrayCopyTo = NewArray<ElemType>(numElements);
memcpy(arrayCopyTo, Data(), sizeof(ElemType) * numElements);
return arrayCopyTo;
}
else
{
return nullptr;
}
}
//memory will be allocated by the callee if not enough but need to be deleted by the caller after it's done
//return number of elements copied
template <class ElemType>
size_t CPUMatrix<ElemType>::CopyToArray(ElemType*& arrayCopyTo, size_t& currentArraySize) const
{
size_t numElements = GetNumElements();
if (numElements > currentArraySize)
{
delete arrayCopyTo;
arrayCopyTo = NewArray<ElemType>(numElements);
currentArraySize = numElements;
}
if (numElements != 0)
{
memcpy(arrayCopyTo, Data(), sizeof(ElemType) * numElements);
}
return numElements;
}
template <typename ElemType>
void CPUMatrix<ElemType>::CopySection(size_t /*numRows*/, size_t /*numCols*/, ElemType* /*dst*/, size_t /*colStride*/) const
{
// REVIEW alexeyk: currently not used by CPU, but implement when possible.
RuntimeError("Not implemented.");
}
template <class ElemType>
inline size_t CPUMatrix<ElemType>::LocateColumn(const size_t col) const
{
// For performance reason avoid extra validation in release.
assert(col == 0 || col < GetNumCols());
return col * m_numRows; // matrix in column-wise storage
}
template <class ElemType>
inline size_t CPUMatrix<ElemType>::LocateElement(const size_t row, const size_t col) const
{
// For performance reason avoid extra validation in release.
assert(row < m_numRows);
return LocateColumn(col) + row; // matrix in column-wise storage
}
#pragma endregion Basic Operators
#pragma region Member BLAS Functions
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::operator+=(ElemType alpha)
{
return AssignSumOf(alpha, *this);
}
template <class ElemType>
CPUMatrix<ElemType> CPUMatrix<ElemType>::operator+(ElemType alpha) const
{
CPUMatrix<ElemType> c(GetNumRows(), GetNumCols());
c.AssignSumOf(alpha, *this);
return c;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignSumOf(const ElemType alpha, const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AssignSumOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
long m = (long) GetNumRows(), n = (long) GetNumCols();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, j) = alpha + a(i, j);
us(i + 1, j) = alpha + a(i + 1, j);
us(i + 2, j) = alpha + a(i + 2, j);
us(i + 3, j) = alpha + a(i + 3, j);
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, j) = alpha + a(i, j);
}
}
return *this;
}
//if [this] and a have same dimension then [this]=[this]+a
//if a is a column vector, add to all columns of [this]
//if a is a row vector, add to all rows of [this]
//if a is a scalar, add it to all elements.
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::operator+=(const CPUMatrix<ElemType>& a)
{
// if (a.GetNumElements() == 1)
// *this += a(0,0);
// else
ScaleAndAdd(1, a, *this);
return *this;
}
//if [this] and a have same dimension then OUTPUT=[this]+a
//if a is a column vector, add to all columns of [this]
//if a is a row vector, add to all rows of [this]
template <class ElemType>
CPUMatrix<ElemType> CPUMatrix<ElemType>::operator+(const CPUMatrix<ElemType>& a) const
{
if (GetNumElements() == 1)
{
CPUMatrix<ElemType> c(a);
c += (*this)(0, 0);
return c;
}
else if (a.GetNumElements() == 1)
{
CPUMatrix<ElemType> c(*this);
c += a(0, 0);
return c;
}
else
{
CPUMatrix<ElemType> c(*this); // this implementation will introduce a copy overhead. but make resue of the code
c += a;
return c;
}
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignSumOf(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b)
{
if (a.GetNumElements() == 1)
{
SetValue(b);
(*this) += a;
}
else
{
SetValue(a);
(*this) += b;
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::operator-=(ElemType alpha)
{
return AssignDifferenceOf(*this, alpha);
}
template <class ElemType>
CPUMatrix<ElemType> CPUMatrix<ElemType>::operator-(ElemType alpha) const
{
CPUMatrix<ElemType> c(GetNumRows(), GetNumCols());
c.AssignDifferenceOf(*this, alpha);
return c;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignDifferenceOf(const ElemType alpha, const CPUMatrix<ElemType>& a)
{
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
long m = (long) GetNumRows(), n = (long) GetNumCols();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, j) = alpha - a(i, j);
us(i + 1, j) = alpha - a(i + 1, j);
us(i + 2, j) = alpha - a(i + 2, j);
us(i + 3, j) = alpha - a(i + 3, j);
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, j) = alpha - a(i, j);
}
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignDifferenceOf(const CPUMatrix<ElemType>& a, const ElemType alpha)
{
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
long m = (long) GetNumRows(), n = (long) GetNumCols();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, j) = a(i, j) - alpha;
us(i + 1, j) = a(i + 1, j) - alpha;
us(i + 2, j) = a(i + 2, j) - alpha;
us(i + 3, j) = a(i + 3, j) - alpha;
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, j) = a(i, j) - alpha;
}
}
return *this;
}
//if [this] and a have same dimension then [this]=[this]-a
//if a is a column vector, minus it from all columns of [this]
//if a is a row vector, minus it from all rows of [this]
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::operator-=(const CPUMatrix<ElemType>& a)
{
ScaleAndAdd(-1, a, *this);
return *this;
}
//if [this] and a have same dimension then output=[this]-a
//if a is a column vector, minus it from all columns of [this]
//if a is a row vector, minus it from all rows of [this]
template <class ElemType>
CPUMatrix<ElemType> CPUMatrix<ElemType>::operator-(const CPUMatrix<ElemType>& a) const
{
CPUMatrix<ElemType> c(*this); // this implementation will introduce a copy overhead. but make resue of the code
c -= a;
return c;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignDifferenceOf(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b)
{
if (this != &a)
{
RequireSize(a.GetNumRows(), a.GetNumCols());
SetValue(a);
}
(*this) -= b;
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::operator*=(ElemType alpha)
{
Scale(alpha, *this);
return *this;
}
template <class ElemType>
CPUMatrix<ElemType> CPUMatrix<ElemType>::operator*(ElemType alpha) const
{
CPUMatrix<ElemType> c(GetNumRows(), GetNumCols());
Scale(alpha, *this, c);
return c;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignProductOf(const ElemType alpha, const CPUMatrix<ElemType>& a)
{
Scale(alpha, a, *this);
return *this;
}
// [this]=a*b
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignProductOf(const CPUMatrix<ElemType>& a, const bool transposeA, const CPUMatrix<ElemType>& b, const bool transposeB)
{
if (a.GetNumElements() == 1)
{
if (transposeB)
AssignTransposeOf(b);
(*this) *= a(0, 0);
}
else if (b.GetNumElements() == 1)
{
if (transposeA)
AssignTransposeOf(a);
(*this) *= b(0, 0);
}
else
Multiply(a, transposeA, b, transposeB, *this);
return *this;
}
template <class ElemType>
CPUMatrix<ElemType> CPUMatrix<ElemType>::operator*(const CPUMatrix<ElemType>& a) const
{
auto& us = *this;
if (GetNumElements() == 1)
{
CPUMatrix<ElemType> c;
c.AssignProductOf(us(0, 0), a);
return c;
}
else if (a.GetNumElements() == 1)
{
CPUMatrix<ElemType> c;
c.AssignProductOf(a(0, 0), us);
return c;
}
else
{
CPUMatrix<ElemType> c;
Multiply(*this, a, c);
return c;
}
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::operator/=(ElemType alpha)
{
(*this) *= 1 / alpha;
return (*this);
}
template <class ElemType>
CPUMatrix<ElemType> CPUMatrix<ElemType>::operator/(ElemType alpha) const
{
return ((*this) * (1 / alpha));
}
//element-wise power
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::operator^=(ElemType alpha)
{
auto& us = *this;
ElementWisePower(alpha, us, us);
return us;
}
//element-wise power
template <class ElemType>
CPUMatrix<ElemType> CPUMatrix<ElemType>::operator^(ElemType alpha) const
{
CPUMatrix<ElemType> c(GetNumRows(), GetNumCols());
ElementWisePower(alpha, *this, c);
return c;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignElementPowerOf(const CPUMatrix<ElemType>& a, const ElemType power)
{
ElementWisePower(power, a, *this);
return *this;
}
//[this]=[this] .* a (we cannot override operator .* in c++)
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::ElementMultiplyWith(const CPUMatrix<ElemType>& a)
{
return AssignElementProductOf(*this, a);
}
//[this]=[this] .* a (we cannot override operator .* in c++)
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::ElementDivideBy(const CPUMatrix<ElemType>& a)
{
return AssignElementDivisionOf(*this, a);
}
//[this]=a .* b
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignElementProductOf(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b)
{
if (a.IsEmpty() || b.IsEmpty())
LogicError("AssignElementProductOf: Matrix is empty.");
if (!(a.GetNumRows() == b.GetNumRows() && a.GetNumCols() == b.GetNumCols()))
InvalidArgument("AssignElementProductOf: The input matrix dimensions do not match.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
long m = (long) GetNumRows(), n = (long) GetNumCols();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, j) = a(i, j) * b(i, j);
us(i + 1, j) = a(i + 1, j) * b(i + 1, j);
us(i + 2, j) = a(i + 2, j) * b(i + 2, j);
us(i + 3, j) = a(i + 3, j) * b(i + 3, j);
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, j) = a(i, j) * b(i, j);
}
}
return *this;
}
//[this] +=a .* b
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AddElementProductOf(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b)
{
if (a.IsEmpty() || b.IsEmpty())
LogicError("AddElementProductOf: Matrix is empty.");
if (!(a.GetNumRows() == b.GetNumRows() && a.GetNumCols() == b.GetNumCols()))
InvalidArgument("AddElementProductOf : The input matrix dimensions do not match.");
if (!(a.GetNumRows() == GetNumRows() && a.GetNumCols() == GetNumCols()))
InvalidArgument("AddElementProductOf : The input matrix dimensions do not match [this].");
auto& us = *this;
long m = (long) GetNumRows(), n = (long) GetNumCols();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, j) += a(i, j) * b(i, j);
us(i + 1, j) += a(i + 1, j) * b(i + 1, j);
us(i + 2, j) += a(i + 2, j) * b(i + 2, j);
us(i + 3, j) += a(i + 3, j) * b(i + 3, j);
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, j) += a(i, j) * b(i, j);
}
}
return *this;
}
//[this]=a ./ b
// TODO: This clips the divisor by a small value. Is that really what one would want?
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignElementDivisionOf(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b)
{
if (a.IsEmpty() || b.IsEmpty())
LogicError("AssignElementDivisionOf: Matrix is empty.");
if (!(a.GetNumRows() == b.GetNumRows() && a.GetNumCols() == b.GetNumCols()))
InvalidArgument("AssignElementDivisionOf : The input matrix dimensions do not match.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
ElemType smallValue = EPS_IN_INVERSE;
#pragma omp parallel for
foreach_coord (i, j, us)
{
ElemType v = b(i, j);
if (v >= 0 && v < smallValue)
us(i, j) = a(i, j) / smallValue;
else if (v < 0 && v > -smallValue)
us(i, j) = a(i, j) / (-smallValue);
else
us(i, j) = a(i, j) / v;
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::ColumnElementMultiplyWith(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty() || IsEmpty())
LogicError("ColumnElementMultiplyWith: Matrix is empty.");
if (!(a.GetNumRows() == GetNumRows() && a.GetNumCols() == 1))
InvalidArgument("ColumnElementMultiplyWith: The input matrix should be a col vector and match [this]'s rows.");
auto& us = *this;
long m = (long) GetNumRows(), n = (long) GetNumCols();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, j) *= a(i, 0);
us(i + 1, j) *= a(i + 1, 0);
us(i + 2, j) *= a(i + 2, 0);
us(i + 3, j) *= a(i + 3, 0);
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, j) *= a(i, 0);
}
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::RowElementMultiplyWith(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty() || IsEmpty())
LogicError("RowElementMultiplyWith: Matrix is empty.");
if (!(a.GetNumRows() == 1 && a.GetNumCols() == GetNumCols()))
InvalidArgument("RowElementMultiplyWith: The input matrix should be a row vector and match [this]'s columns.");
auto& us = *this;
long m = (long) GetNumRows(), n = (long) GetNumCols();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
ElemType v = a(0, j);
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, j) *= v;
us(i + 1, j) *= v;
us(i + 2, j) *= v;
us(i + 3, j) *= v;
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, j) *= v;
}
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::RowElementDivideBy(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty() || IsEmpty())
LogicError("RowElementDivideBy: Matrix is empty.");
if (!(a.GetNumRows() == 1 && a.GetNumCols() == GetNumCols()))
InvalidArgument("RowElementDivideBy: The input matrix should be a row vector and match [this]'s columns.");
auto& us = *this;
long m = (long) GetNumRows(), n = (long) GetNumCols();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
ElemType v = a(0, j);
if (v >= 0 && v < EPS_IN_INVERSE)
v = EPS_IN_INVERSE;
else if (v < 0 && v > -EPS_IN_INVERSE)
v = (-EPS_IN_INVERSE);
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, j) /= v;
us(i + 1, j) /= v;
us(i + 2, j) /= v;
us(i + 3, j) /= v;
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, j) /= v;
}
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::ColumnElementDivideBy(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty() || IsEmpty())
LogicError("ColumnElementDivideBy: Matrix is empty.");
if (!(a.GetNumRows() == GetNumRows() && a.GetNumCols() == 1))
InvalidArgument("ColumnElementDivideBy: The input matrix should be a col vector and match [this]'s rows.");
auto& us = *this;
long m = (long) GetNumRows(), n = (long) GetNumCols();
ElemType smallValue = EPS_IN_INVERSE;
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
for (long i = 0; i < m; i++)
{
ElemType v = a(i, 0);
if (v >= 0 && v < smallValue)
us(i, j) /= smallValue;
else if (v < 0 && v > -smallValue)
us(i, j) /= (-smallValue);
else
us(i, j) /= v;
}
}
return *this;
}
//[this]=1 ./ a
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::ElementInverse()
{
return AssignElementInverseOf(*this);
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignElementInverseOf(const CPUMatrix<ElemType>& a)
{
ElemType smallValue = EPS_IN_INVERSE;
if (a.IsEmpty())
LogicError("AssignElementInverseOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
#pragma omp parallel for
foreach_coord (i, j, us)
{
if (a(i, j) < 0 && a(i, j) > -smallValue)
us(i, j) = 1 / (-smallValue);
else if (a(i, j) >= 0 && a(i, j) < smallValue)
us(i, j) = 1 / smallValue;
else
us(i, j) = 1 / a(i, j);
}
return *this;
}
//[this]=sigmoid([this]) element wise
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceSigmoid()
{
return AssignSigmoidOf(*this);
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignSigmoidOf(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AssignSigmoidOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
#pragma omp parallel for
foreach_coord (i, j, us)
{
if (a(i, j) >= 0)
us(i, j) = 1 / (1 + exp(-a(i, j)));
else
{
ElemType v = exp(a(i, j));
us(i, j) = v / (1 + v);
}
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceLinearRectifierDerivative()
{
return AssignLinearRectifierDerivativeOf(*this);
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignLinearRectifierDerivativeOf(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AssignLinearRectifierDerivativeOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
long m = (long) GetNumRows(), n = (long) GetNumCols();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, j) = a(i, j) > 0.0f ? 1.0f : 0.0f;
us(i + 1, j) = a(i + 1, j) > 0.0f ? 1.0f : 0.0f;
us(i + 2, j) = a(i + 2, j) > 0.0f ? 1.0f : 0.0f;
us(i + 3, j) = a(i + 3, j) > 0.0f ? 1.0f : 0.0f;
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, j) = a(i, j) > 0.0f ? 1.0f : 0.0f;
}
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceSigmoidDerivative()
{
return AssignSigmoidDerivativeOf(*this);
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignSigmoidDerivativeOf(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AssignSigmoidDerivativeOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
long m = (long) GetNumRows(), n = (long) GetNumCols();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
ElemType v = a(i, j);
us(i, j) = v * (1 - v);
ElemType v1 = a(i + 1, j);
us(i + 1, j) = v1 * (1 - v1);
ElemType v2 = a(i + 2, j);
us(i + 2, j) = v2 * (1 - v2);
ElemType v3 = a(i + 3, j);
us(i + 3, j) = v3 * (1 - v3);
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
ElemType v = a(i, j);
us(i, j) = v * (1 - v);
}
}
return *this;
}
//[this]=tanh([this]) element wise
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceTanh()
{
return AssignTanhOf(*this);
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignTanhOf(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AssignTanhOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
long m = (long) GetNumRows(), n = (long) GetNumCols();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, j) = tanh(a(i, j));
us(i + 1, j) = tanh(a(i + 1, j));
us(i + 2, j) = tanh(a(i + 2, j));
us(i + 3, j) = tanh(a(i + 3, j));
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, j) = tanh(a(i, j));
}
}
return *this;
}
//[this]=softmax([this]) element wise
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceLogSoftmax(const bool isColWise)
{
return AssignLogSoftmaxOf(*this, isColWise);
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignLogSoftmaxOf(const CPUMatrix<ElemType>& a, const bool isColWise)
{
if (a.IsEmpty())
LogicError("AssignLogSoftmaxOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
if (isColWise)
{
#pragma omp parallel for
foreach_column (j, a)
{
// we need to extract max before applying exp to avoid overflow
ElemType maxV = a(0, j);
foreach_row (i, a)
maxV = std::max(maxV, a(i, j));
ElemType sum = 0;
foreach_row (i, a)
sum += exp(us(i, j) = a(i, j) - maxV);
sum = log(sum);
foreach_row (i, us)
us(i, j) -= sum;
}
}
else
{
#pragma omp parallel for
foreach_row (i, a)
{
// we need to extract max before applying exp to avoid overflow
ElemType maxV = a(i, 0);
foreach_column (j, a)
maxV = std::max(maxV, a(i, j));
ElemType sum = 0;
foreach_column (j, a)
sum += exp(us(i, j) = a(i, j) - maxV);
sum = log(sum);
foreach_column (j, us)
us(i, j) -= sum;
}
}
return *this;
}
//[this]=hardmax([this])
//the max element is 1 else is 0
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceHardmax(const bool isColWise)
{
return AssignHardmaxOf(*this, isColWise);
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignHardmaxOf(const CPUMatrix<ElemType>& a, const bool isColWise)
{
if (a.IsEmpty())
LogicError("AssignHardmaxOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
if (isColWise)
{
#pragma omp parallel for
foreach_column (j, a)
{
// we need to extract max
ElemType maxV = a(0, j);
long maxI = 0;
foreach_row (i, a)
{
if (maxV < a(i, j))
{
maxV = a(i, j);
maxI = i;
}
}
foreach_row (i, us)
us(i, j) = (i == maxI) ? 1.0f : 0.0f;
}
}
else
{
#pragma omp parallel for
foreach_row (i, a)
{
// we need to extract max
ElemType maxV = a(i, 0);
long maxJ = 0;
foreach_column (j, a)
{
if (maxV < a(i, j))
{
maxV = a(i, j);
maxJ = j;
}
}
foreach_column (j, us)
us(i, j) = (j == maxJ) ? 1.0f : 0.0f;
}
}
return *this;
}
//[this]=sqrt([this]) element wise
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceSqrt()
{
return AssignSqrtOf(*this);
}
//to prevent negative values caused by floating operations, we force inputs to be >=0
//this may, however, hide problems in the caller.
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignSqrtOf(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AssignSqrtOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
long m = (long) GetNumRows(), n = (long) GetNumCols();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, j) = sqrt(max((ElemType)0, a(i, j)));
us(i + 1, j) = sqrt(max((ElemType)0, a(i + 1, j)));
us(i + 2, j) = sqrt(max((ElemType)0, a(i + 2, j)));
us(i + 3, j) = sqrt(max((ElemType)0, a(i + 3, j)));
}
// remaining
for (long i = m & ~3; i < m; i++)
{
us(i, j) = sqrt(max((ElemType)0, a(i, j)));
}
}
return *this;
}
//[this]=exp([this]) element wise
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceExp()
{
return AssignExpOf(*this);
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignExpOf(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AssignExpOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
long m = (long) GetNumRows(), n = (long) GetNumCols();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, j) = exp(a(i, j));
us(i + 1, j) = exp(a(i + 1, j));
us(i + 2, j) = exp(a(i + 2, j));
us(i + 3, j) = exp(a(i + 3, j));
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, j) = exp(a(i, j));
}
}
return *this;
}
//[this]=exp([this]) element wise
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceAbs()
{
return AssignAbsOf(*this);
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignAbsOf(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AssignAbsOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
long m = (long) GetNumRows(), n = (long) GetNumCols();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, j) = abs(a(i, j));
us(i + 1, j) = abs(a(i + 1, j));
us(i + 2, j) = abs(a(i + 2, j));
us(i + 3, j) = abs(a(i + 3, j));
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, j) = abs(a(i, j));
}
}
return *this;
}
//[this]=log([this]) element wise
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceLog()
{
return AssignLogOf(*this);
}
//[this]=log([this]) element wise
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceLog10()
{
return AssignLog10Of(*this);
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignLogOf(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AssignLogOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
#pragma omp parallel for
foreach_coord (i, j, a)
{
const ElemType v = a(i, j);
if (v < EPS_IN_LOG)
{
us(i, j) = LOG_OF_EPS_IN_LOG;
}
else
us(i, j) = log(v);
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignLog10Of(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AssignLogOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
#pragma omp parallel for
foreach_coord (i, j, a)
{
const ElemType v = a(i, j);
if (v <= 0)
LogicError("AssignLogOf: Log can only applied to numbers larger than 0.");
else if (v < EPS_IN_LOG)
{
us(i, j) = LOG10_OF_EPS_IN_LOG;
}
else
us(i, j) = log10(v);
}
return *this;
}
//[this]=cos([this]) element wise
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceCosine()
{
return AssignCosineOf(*this);
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignCosineOf(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AssignCosineOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
#pragma omp parallel for
foreach_coord (i, j, a)
{
const ElemType v = a(i, j);
us(i, j) = cos(v);
}
return *this;
}
//[this]=-sin([this]) element wise
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceNegativeSine()
{
return AssignNegativeSineOf(*this);
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignNegativeSineOf(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AssignCosineOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
#pragma omp parallel for
foreach_coord (i, j, a)
{
const ElemType v = a(i, j);
us(i, j) = -sin(v);
}
return *this;
}
//Threshold truncating: this[i] = max( this[i], threshold )
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceTruncateBottom(const ElemType threshold)
{
if (IsEmpty())
LogicError("InplaceTruncateBottom: Matrix is empty.");
auto& us = *this;
long m = (long) GetNumRows(), n = (long) GetNumCols();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
if (us(i, j) < threshold)
us(i, j) = threshold;
if (us(i + 1, j) < threshold)
us(i + 1, j) = threshold;
if (us(i + 2, j) < threshold)
us(i + 2, j) = threshold;
if (us(i + 3, j) < threshold)
us(i + 3, j) = threshold;
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
if (us(i, j) < threshold)
us(i, j) = threshold;
}
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceTruncate(const ElemType threshold)
{
if (IsEmpty())
LogicError("InplaceTruncate: Matrix is empty.");
auto& us = *this;
ElemType locThresholdPos = abs(threshold);
ElemType locTHresholdNeg = -locThresholdPos;
long m = (long) GetNumRows(), n = (long) GetNumCols();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
if (us(i, j) > locThresholdPos)
us(i, j) = locThresholdPos;
else if (us(i, j) < locTHresholdNeg)
us(i, j) = locTHresholdNeg;
if (us(i + 1, j) > locThresholdPos)
us(i + 1, j) = locThresholdPos;
else if (us(i + 1, j) < locTHresholdNeg)
us(i + 1, j) = locTHresholdNeg;
if (us(i + 2, j) > locThresholdPos)
us(i + 2, j) = locThresholdPos;
else if (us(i + 2, j) < locTHresholdNeg)
us(i + 2, j) = locTHresholdNeg;
if (us(i + 3, j) > locThresholdPos)
us(i + 3, j) = locThresholdPos;
else if (us(i + 3, j) < locTHresholdNeg)
us(i + 3, j) = locTHresholdNeg;
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
if (us(i, j) > locThresholdPos)
us(i, j) = locThresholdPos;
else if (us(i, j) < locTHresholdNeg)
us(i, j) = locTHresholdNeg;
}
}
return *this;
}
//x= x-threshold if x>threshold, x+threshold if x<-threshold, 0 otherwise
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceSoftThreshold(const ElemType threshold)
{
if (IsEmpty())
LogicError("InplaceTruncate: Matrix is empty.");
long m = (long) GetNumElements();
ElemType* bufPtr = Data();
#pragma omp parallel for
for (long i = 0; i < (m & ~3); i += 4) // four-way unrolling
{
if (bufPtr[i] > threshold)
bufPtr[i] -= threshold;
else if (bufPtr[i] < -threshold)
bufPtr[i] += threshold;
else
bufPtr[i] = 0;
if (bufPtr[i + 1] > threshold)
bufPtr[i + 1] -= threshold;
else if (bufPtr[i + 1] < -threshold)
bufPtr[i + 1] += threshold;
else
bufPtr[i + 1] = 0;
if (bufPtr[i + 2] > threshold)
bufPtr[i + 2] -= threshold;
else if (bufPtr[i + 2] < -threshold)
bufPtr[i + 2] += threshold;
else
bufPtr[i + 2] = 0;
if (bufPtr[i + 3] > threshold)
bufPtr[i + 3] -= threshold;
else if (bufPtr[i + 3] < -threshold)
bufPtr[i + 3] += threshold;
else
bufPtr[i + 3] = 0;
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
if (bufPtr[i] > threshold)
bufPtr[i] -= threshold;
else if (bufPtr[i] < -threshold)
bufPtr[i] += threshold;
else
bufPtr[i] = 0;
}
return *this;
}
//Threshold truncating: this[i] = max( a[i], threshold )
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignTruncateBottomOf(const CPUMatrix<ElemType>& a, const ElemType threshold)
{
if (a.IsEmpty())
LogicError("AssignTruncateBottomOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
#pragma omp parallel for
foreach_coord (i, j, a)
{
if (a(i, j) < threshold)
us(i, j) = threshold;
else
us(i, j) = a(i, j);
}
return *this;
}
//Threshold truncating: this[i] = min( this[i], threshold )
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceTruncateTop(const ElemType threshold)
{
if (IsEmpty())
LogicError("InplaceTruncateTop: Matrix is empty.");
auto& us = *this;
#pragma omp parallel for
foreach_coord (i, j, us)
{
if (us(i, j) > threshold)
us(i, j) = threshold;
}
return *this;
}
//Threshold truncating: this[i] = min( a[i], threshold )
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignTruncateTopOf(const CPUMatrix<ElemType>& a, const ElemType threshold)
{
if (a.IsEmpty())
LogicError("AssignTruncateTopOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
#pragma omp parallel for
foreach_coord (i, j, a)
{
if (a(i, j) > threshold)
us(i, j) = threshold;
else
us(i, j) = a(i, j);
}
return *this;
}
//Threshold truncating: this[i] = 0 if abs(this[i]<threshold).
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::SetToZeroIfAbsLessThan(const ElemType threshold)
{
if (IsEmpty())
LogicError("SetToZeroIfAbsLessThan: Matrix is empty.");
auto& us = *this;
#pragma omp parallel for
foreach_coord (i, j, us)
{
if (abs(us(i, j)) < threshold)
us(i, j) = 0;
}
return *this;
}
//sum of all abs(elements)
template <class ElemType>
ElemType CPUMatrix<ElemType>::SumOfAbsElements() const
{
if (IsEmpty())
LogicError("SumOfAbsElements: Matrix is empty.");
if (sizeof(ElemType) == sizeof(double))
{
return (ElemType) cblas_dasum((int) GetNumElements(), reinterpret_cast<double*>(Data()), 1);
}
else
{
#pragma warning(suppress : 4244)
return cblas_sasum((int) GetNumElements(), reinterpret_cast<float*>(Data()), 1);
}
}
//sum of all elements
template <class ElemType>
ElemType CPUMatrix<ElemType>::SumOfElements() const
{
if (IsEmpty())
LogicError("SumOfElements: Matrix is empty.");
ElemType sum = 0;
long m = (long) GetNumElements(); // note: OpenMP requires loop indices to be long, not size_t
ElemType* bufPtr = Data();
//four-way unrolling
#pragma omp parallel for reduction(+ : sum)
for (long i = 0; i < (m & ~3); i += 4)
{
sum += bufPtr[i] + bufPtr[i + 1] + bufPtr[i + 2] + bufPtr[i + 3];
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
sum += bufPtr[i];
}
return sum;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignSumOfElements(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AssignSumOfElements: Matrix a is empty.");
auto& us = *this;
us.RequireSize(1, 1);
us(0, 0) = a.SumOfElements();
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignOneHot(const CPUMatrix<ElemType>& a, vector<size_t>& shape, size_t axis)
{
if (a.IsEmpty())
LogicError("AssignOneHot: Matrix a is empty.");
if (axis >= shape.size())
LogicError("AssignOneHot: axis is not correct");
size_t item_size = 1;
for (size_t i = 0; i < shape.size() && i < axis; i++)
item_size *= shape[i];
size_t num_class = shape[axis];
auto& us = *this;
auto nCols = a.GetNumCols();
auto nRows = num_class * a.GetNumRows();
us.RequireSize(nRows, nCols);
ElemType* bufPtr = Data();
ElemType* aBufPtr = a.Data();
memset(bufPtr, 0, sizeof(ElemType) * nRows *nCols);
#pragma omp parallel for
for (long i = 0; i < a.GetNumElements(); i++)
{
if (aBufPtr[i] >= 0 && aBufPtr[i] < num_class)
{
size_t block_id = i / item_size;
size_t item_id = i % item_size;
bufPtr[block_id * num_class * item_size + item_id + item_size * (size_t)aBufPtr[i]] = 1;
}
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::GatherFromTarget(const CPUMatrix<ElemType>& indices, const CPUMatrix<ElemType>& target, size_t row_elements)
{
if (indices.IsEmpty() || target.IsEmpty())
LogicError("GatherFromTarget: input matrix is empty.");
if (row_elements == 0)
LogicError("GatherFromTarget: target matrix at least need 1 dim.");
auto nCols = indices.GetNumCols();
auto nRows = indices.GetNumRows() * row_elements;
this->RequireSize(nRows, nCols);
ElemType* indicesBufPtr = indices.Data();
ElemType* targetBufPtr = target.Data();
ElemType* buffer = Data();
#pragma omp parallel for
for (int i = 0; i < indices.GetNumElements(); i++)
{
memcpy(buffer + i * row_elements, targetBufPtr + ((size_t)indicesBufPtr[i] * row_elements), sizeof(ElemType) * row_elements);
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::ScatterToIndices(const CPUMatrix<ElemType>& values, const CPUMatrix<ElemType>& indices, size_t row_elements)
{
if (indices.IsEmpty() || values.IsEmpty())
LogicError("ScatterToIndices: input matrix is empty.");
ElemType* indicesBufPtr = indices.Data();
ElemType* valueBufPtr = values.Data();
ElemType* buffer = Data();
ScatterValues(indicesBufPtr, valueBufPtr, buffer, (ElemType)1, indices.GetNumElements(), row_elements, this->GetNumCols());
return *this;
}
template <class ElemType>
bool CPUMatrix<ElemType>::IsEqualTo(const CPUMatrix<ElemType>& a, const ElemType threshold /*= 1e-8*/) const
{
return AreEqual(*this, a, threshold);
}
template <class ElemType>
void CPUMatrix<ElemType>::VectorSum(const CPUMatrix<ElemType>& a, CPUMatrix<ElemType>& c, const bool isColWise)
{
if (a.IsEmpty())
LogicError("VectorSum: Input matrix a is empty.");
const int m = (int) a.GetNumRows();
const int n = (int) a.GetNumCols();
assert(m > 0 && n > 0); // converting from size_t to int may cause overflow
if (isColWise) // col-wise
{
c.RequireSize(1, n);
#pragma omp parallel for
foreach_column (j, a)
{
ElemType v = 0;
foreach_row (i, a)
{
#pragma omp atomic
v += a(i, j);
}
c(0, j) = v;
}
}
else
{
c.RequireSize(m, 1);
#pragma omp parallel for
foreach_row (i, a)
{
ElemType v = 0;
foreach_column (j, a)
{
#pragma omp atomic
v += a(i, j);
}
c(i, 0) = v;
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::VectorNorm1(CPUMatrix<ElemType>& c, const bool isColWise) const
{
if (IsEmpty())
LogicError("VectorNorm1: Matrix is empty.");
auto& us = *this;
const int m = (int) us.GetNumRows();
const int n = (int) us.GetNumCols();
assert(m > 0 && n > 0); // converting from size_t to int may cause overflow
if (isColWise) // col-wise
{
c.RequireSize(1, n);
#pragma omp parallel for
foreach_column (j, us)
{
ElemType v = 0;
foreach_row (i, us)
{
#pragma omp atomic
v += abs(us(i, j));
}
c(0, j) = v;
}
}
else
{
c.RequireSize(m, 1);
#pragma omp parallel for
foreach_row (i, us)
{
ElemType v = 0;
foreach_column (j, us)
{
#pragma omp atomic
v += abs(us(i, j));
}
c(i, 0) = v;
}
}
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignVectorNorm1Of(CPUMatrix<ElemType>& a, const bool isColWise)
{
a.VectorNorm1(*this, isColWise);
return *this;
}
template <class ElemType>
void CPUMatrix<ElemType>::VectorNorm2(CPUMatrix<ElemType>& c, const bool isColWise) const
{
if (IsEmpty())
LogicError("VectorNorm2: Matrix is empty.");
auto& us = *this;
const int m = (int) us.GetNumRows();
const int n = (int) us.GetNumCols();
assert(m > 0 && n > 0); // converting from size_t to int may cause overflow
ElemType* bufPtr = us.Data();
if (isColWise) // col-wise
{
c.RequireSize(1, n);
if (sizeof(ElemType) == sizeof(double))
{
#pragma omp parallel for
foreach_column (j, c)
{
c(0, j) = (ElemType) cblas_dnrm2(m, reinterpret_cast<double*>(bufPtr + us.LocateColumn(j)), 1);
}
}
else
{
#pragma omp parallel for
foreach_column (j, c)
{
#pragma warning(suppress : 4244)
c(0, j) = cblas_snrm2(m, reinterpret_cast<float*>(bufPtr + us.LocateColumn(j)), 1);
}
}
}
else
{
c.RequireSize(m, 1);
if (sizeof(ElemType) == sizeof(double))
{
#pragma omp parallel for
foreach_row (i, c)
{
c(i, 0) = cblas_dnrm2(n, reinterpret_cast<double*>(bufPtr + i), m);
}
}
else
{
#pragma omp parallel for
foreach_row (i, c)
{
#pragma warning(suppress : 4244)
c(i, 0) = cblas_snrm2(n, reinterpret_cast<float*>(bufPtr + i), m);
}
}
}
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignVectorNorm2Of(CPUMatrix<ElemType>& a, const bool isColWise)
{
a.VectorNorm2(*this, isColWise);
return *this;
}
template <class ElemType>
void CPUMatrix<ElemType>::VectorNormInf(CPUMatrix<ElemType>& c, const bool isColWise) const
{
if (IsEmpty())
LogicError("VectorNormInf: Matrix is empty.");
auto& us = *this;
const int m = (int) us.GetNumRows();
const int n = (int) us.GetNumCols();
assert(m > 0 && n > 0); // converting from size_t to int may cause overflow
if (isColWise) // col-wise
{
c.RequireSize(1, n);
// #pragma omp parallel for
foreach_column (j, us)
{
ElemType v = 0;
foreach_row (i, us)
{
v = std::max(v, abs(us(i, j)));
}
c(0, j) = v;
}
}
else
{
c.RequireSize(m, 1);
// #pragma omp parallel for
foreach_row (i, us)
{
ElemType v = 0;
foreach_column (j, us)
{
v = std::max(v, abs(us(i, j)));
}
c(i, 0) = v;
}
}
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignVectorNormInfOf(CPUMatrix<ElemType>& a, const bool isColWise)
{
a.VectorNormInf(*this, isColWise);
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignInnerProductOf(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, const bool isColWise)
{
InnerProduct(a, b, *this, isColWise);
return *this;
}
//column-wise crossproduct
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignKhatriRaoProductOf(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b)
{
if (a.IsEmpty() || b.IsEmpty())
LogicError("AssignKhatriRaoProductOf: Matrix is empty.");
long cols = (long) a.GetNumCols();
if (cols != b.GetNumCols())
InvalidArgument("a.GetNumCols() != b.GetNumCols()");
long rowsA = (long) a.GetNumRows();
long rowsB = (long) b.GetNumRows();
RequireSize(rowsA * rowsB, cols);
#ifdef __INTEL_COMPILER // TODO: check this
#pragma simd statement
#endif
#pragma omp parallel for
for (long k = 0; k < cols; k++)
{
long jj = 0;
for (long j = 0; j < rowsB; j++)
{
for (long i = 0; i < rowsA; i++)
{
(*this)(jj++, k) = a(i, k) * b(j, k);
}
}
}
return *this;
}
//column-wise reshaped product. Used to compute KhatriRaoProduct Gradient
// this = reshape each column of a from (K1xK2,1) to (K1, K2)
// if each column of a is not transposed, each (K1, K2) times each column of b (K2, frames).
// the output is a (K1, frames) matrix
// if each column of a is tranposed, each (K1, K2)^T times each column of b(K1, frames) and output is (K2, frames)
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AddColumnReshapeProductOf(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, const bool transposeAColumn)
{
if (a.IsEmpty() || b.IsEmpty())
LogicError("AddColumnReshapeProductOf: Matrix is empty.");
long cols = (long) a.GetNumCols();
if (cols != b.GetNumCols())
InvalidArgument("AddColumnReshapeProductOf: a.GetNumCols() != b.GetNumCols()");
long rowsA = (long) a.GetNumRows();
long rowsB = (long) b.GetNumRows();
if (rowsA % rowsB != 0)
InvalidArgument("AddColumnReshapeProductOf: number of rows in a should be multiples of that in b.");
long rowsC = rowsA / rowsB;
if (rowsC != GetNumRows() || cols != GetNumCols())
InvalidArgument("AddColumnReshapeProductOf: This matrix does not have the right size.");
auto& us = *this;
if (transposeAColumn)
{
// find nrows and ncols of tbe reshaped a
long nrows = rowsB;
long ncols = rowsC;
#ifdef __INTEL_COMPILER // TODO: check this
#pragma simd statement
#endif
#pragma omp parallel for
foreach_column (t, a)
{
size_t k = 0;
for (size_t j = 0; j < ncols; j++) // row and col is transposed
{
ElemType v = 0;
for (size_t i = 0; i < nrows; i++)
{
v += a(k, t) * b(i, t);
k++;
}
us(j, t) += v;
}
}
}
else
{
size_t ncols = rowsB;
size_t nrows = rowsC;
#ifdef __INTEL_COMPILER // TODO: check this
#pragma simd statement
#endif
#pragma omp parallel for
foreach_column (t, a)
{
size_t k = 0;
for (size_t j = 0; j < ncols; j++)
{
for (size_t i = 0; i < nrows; i++)
{
us(i, t) += a(k, t) * b(j, t);
k++;
}
}
}
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AddWithScaleOf(ElemType alpha, const CPUMatrix<ElemType>& a)
{
ScaleAndAdd(alpha, a, *this);
return *this;
}
template <class ElemType>
ElemType CPUMatrix<ElemType>::FrobeniusNorm() const
{
if (IsEmpty())
LogicError("FrobeniusNorm: Matrix is empty.");
ElemType v = 0;
long m = (long) GetNumElements();
ElemType* bufPtr = Data();
//four-way unrolling
#pragma omp parallel for reduction(+ : v)
for (long i = 0; i < (m & ~3); i += 4)
{
v += bufPtr[i] * bufPtr[i] + bufPtr[i + 1] * bufPtr[i + 1] + bufPtr[i + 2] * bufPtr[i + 2] + bufPtr[i + 3] * bufPtr[i + 3];
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
v += bufPtr[i] * bufPtr[i];
}
return sqrt(v);
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignFrobeniusNormOf(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AssignFrobeniusNormOf: Matrix a is empty.");
auto& us = *this;
us.RequireSize(1, 1);
us(0, 0) = a.FrobeniusNorm();
return us;
}
template <class ElemType>
ElemType CPUMatrix<ElemType>::MatrixNormInf() const
{
if (IsEmpty())
LogicError("MatrixNormInf: Matrix is empty.");
auto& us = *this;
ElemType v = 0;
#pragma omp parallel for
foreach_coord (i, j, us)
{
#pragma omp critical
{
v = std::max(v, abs(us(i, j)));
}
}
return v;
}
template <class ElemType>
ElemType CPUMatrix<ElemType>::MatrixNorm0() const
{
if (IsEmpty())
LogicError("MatrixNorm0: Matrix is empty.");
auto& us = *this;
ElemType v = 0;
#pragma omp parallel for
foreach_coord (i, j, us)
{
if (us(i, j) != 0)
{
#pragma omp critical
{
++v;
}
}
}
return v;
}
template <class ElemType>
ElemType CPUMatrix<ElemType>::MatrixNorm1() const
{
if (IsEmpty())
LogicError("MatrixNorm1: Matrix is empty.");
auto& us = *this;
ElemType sum = 0;
#pragma omp parallel for reduction(+ : sum)
foreach_coord (i, j, us)
{
sum += abs(us(i, j));
}
return sum;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignSignOf(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AssignSignOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
#pragma omp parallel for
foreach_column (j, us)
{
foreach_row (i, us)
{
ElemType v = a(i, j);
if (!std::isnan(v))
us(i, j) = (v == (ElemType) 0 ? (ElemType) 0 : (v > 0 ? (ElemType) 1 : (ElemType)(-1)));
else
us(i, j) = v;
}
}
return us;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AddSignOf(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AddSignOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
#pragma omp parallel for
foreach_column (j, us)
{
foreach_row (i, us)
{
ElemType v = a(i, j);
if (!std::isnan(v))
us(i, j) += (v == (ElemType) 0 ? (ElemType) 0 : (v > 0 ? (ElemType) 1 : (ElemType)(-1)));
else
us(i, j) = v;
}
}
return us;
}
//I decided to use CPUMatrix<ElemType>& maxIndexes instead of integer vector because the result may be used to do additional calculation
template <class ElemType>
void CPUMatrix<ElemType>::VectorMax(CPUMatrix<ElemType>& maxIndexes, CPUMatrix<ElemType>& maxValues, const bool isColWise, int topK) const
{
if (IsEmpty())
LogicError("VectorMax: Matrix is empty.");
auto& us = *this;
const int m = (int) GetNumRows();
const int n = (int) GetNumCols();
if (topK > m)
InvalidArgument("VectorMax: TopK must be less or equal than the number of rows");
assert(m > 0 && n > 0); // converting from size_t to int may cause overflow
if (isColWise) // col-wise
{
maxValues.RequireSize(topK, n);
maxIndexes.RequireSize(topK, n);
if (topK == 1)
{
#pragma omp parallel for
for (int j = 0; j < n; j++)
{
ElemType v = us(0, j);
size_t index = 0;
foreach_row (i, us)
{
if (v < us(i, j))
{
index = i;
v = us(i, j);
}
}
maxValues(0, j) = v;
maxIndexes(0, j) = (ElemType) index;
}
}
else
{
std::vector<int> indices(m);
int i = 0;
std::generate(indices.begin(), indices.end(), [&i]
{
return i++;
});
const ElemType* curVal = Data();
ElemType* curIdx = maxIndexes.Data();
ElemType* curMax = maxValues.Data();
for (int icol = 0; icol < n; icol++, curVal += m, curIdx += topK, curMax += topK)
{
// Partial sort, descending order.
std::nth_element(indices.begin(), indices.begin() + topK, indices.end(),
[curVal](const int& a, const int& b)
{
return curVal[a] > curVal[b];
});
// REVIEW alexeyk: the following produces warning (see SCL_SECURE_NO_WARNINGS) so use loop instead.
// std::transform(indices.begin(), indices.begin() + topK, curIdx, [](const int& a) { return static_cast<ElemType>(a); });
for (int i2 = 0; i2 < topK; i2++)
{
curIdx[i2] = static_cast<ElemType>(indices[i2]);
curMax[i2] = curVal[indices[i2]];
}
}
}
}
else
{
if (topK > 1)
RuntimeError("Row-wise TopK max is not supported.");
maxValues.RequireSize(m, 1);
maxIndexes.RequireSize(m, 1);
#pragma omp parallel for
for (int i = 0; i < m; i++)
{
ElemType v = us(i, 0);
size_t index = 0;
foreach_column (j, us)
{
if (v < us(i, j))
{
index = j;
v = us(i, j);
}
}
maxValues(i, 0) = v;
maxIndexes(i, 0) = (ElemType) index;
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::VectorMin(CPUMatrix<ElemType>& minIndexes, CPUMatrix<ElemType>& minValues, const bool isColWise) const
{
if (IsEmpty())
LogicError("VectorMin: Matrix is empty.");
auto& us = *this;
const int m = (int) GetNumRows();
const int n = (int) GetNumCols();
assert(m > 0 && n > 0); // converting from size_t to int may cause overflow
if (isColWise) // col-wise
{
minValues.RequireSize(1, n);
minIndexes.RequireSize(1, n);
#pragma omp parallel for
for (int j = 0; j < n; j++)
{
ElemType v = us(0, j);
size_t index = 0;
foreach_row (i, us)
{
if (v > us(i, j))
{
index = i;
v = us(i, j);
}
}
minValues(0, j) = v;
minIndexes(0, j) = (ElemType) index;
}
}
else
{
minValues.RequireSize(m, 1);
minIndexes.RequireSize(m, 1);
#pragma omp parallel for
for (int i = 0; i < m; i++)
{
ElemType v = us(i, 0);
size_t index = 0;
foreach_column (j, us)
{
if (v > us(i, j))
{
index = j;
v = us(i, j);
}
}
minValues(i, 0) = v;
minIndexes(i, 0) = (ElemType) index;
}
}
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignNumOfDiff(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, bool searchInCol)
{
if (a.GetNumCols() != b.GetNumCols())
throw std::invalid_argument("AssignNumOfDiff: a and b must have the same number of columns.");
if (!searchInCol && a.GetNumRows() != b.GetNumRows())
throw std::invalid_argument("AssignNumOfDiff: a and b must have the same number of rows.");
ElemType n = 0;
if (!searchInCol)
{
foreach_coord (i, j, a)
{
n += (a(i, j) != b(i, j));
}
}
else
{
size_t crow = b.GetNumRows();
const ElemType* curCol = b.Data();
for (size_t icol = 0; icol < a.GetNumCols(); icol++, curCol += crow)
{
auto res = std::find(curCol, curCol + crow, a(0, icol));
if (res == curCol + crow)
n++;
}
}
RequireSize(1, 1); // result should be one element
(*this)(0, 0) = n;
return *this;
}
#pragma endregion Member BLAS Functions
#pragma region Other helper Functions
struct PrintRange
{
// print from begin to skipBegin, then from skipEnd to end
// skipBegin = end if no split
size_t begin;
size_t skipBegin;
size_t skipEnd;
size_t end;
bool IsEmpty() const { return end <= begin; }
// examples:
// * 3..10
// * -3..-3: include end-3..end and 0..3
PrintRange(ptrdiff_t first, ptrdiff_t last, size_t total)
{
if (first >= 0 && last >= 0)
{
begin = (size_t)first;
end = (size_t)last + 1;
if (end > total) // allow INT_MAX, meaning to end
end = total;
skipBegin = end;
skipEnd = end;
}
else if (first < 0 && last < 0)
{
begin = 0;
skipBegin = (size_t)(-last);
skipEnd = (size_t)(total + first);
if (skipEnd <= skipBegin)
skipBegin = skipEnd = total;
end = total;
}
else // if other combinations are ever of interest then implement them here
LogicError("Print: Bounds must be either both positive or both negative.");
}
};
// use negative ranges to print corners, e.g. Print("name", -3, -3, -3, -3) will print the first 3 and last 3 rows/cols
template <class ElemType>
void CPUMatrix<ElemType>::Print(const char* matrixName, ptrdiff_t rowFirst, ptrdiff_t rowLast, ptrdiff_t colFirst, ptrdiff_t colLast) const
{
fprintf(stderr, "\n###### ");
if (matrixName != nullptr)
fprintf(stderr, "%s ", matrixName);
fprintf(stderr, "(%lu, %lu)", (unsigned long)GetNumRows(), (unsigned long)GetNumCols());
if (rowFirst != 0 || colFirst != 0 || (size_t)(rowLast + 1) != GetNumRows() || (size_t)(colLast + 1) != GetNumCols())
fprintf(stderr, " [%ld:%ld, %ld:%ld]", (long)rowFirst, (long)rowLast, (long)colFirst, (long)colLast);
fprintf(stderr, " ######\n\n");
if (IsEmpty())
{
fprintf(stderr, "(empty)\n");
return;
}
PrintRange rowRange(rowFirst, rowLast, GetNumRows());
PrintRange colRange(colFirst, colLast, GetNumCols());
if (rowRange.IsEmpty() || colRange.IsEmpty())
{
fprintf(stderr, "(empty)\n");
return;
}
const auto& us = *this;
if (rowRange.begin > 0)
fprintf(stderr, "...\n");
for (size_t i = rowRange.begin; i < rowRange.end; i++)
{
if (i == rowRange.skipBegin) // insert ... between the two blocks if any
{
fprintf(stderr, "...\n");
i = rowRange.skipEnd;
}
if (colRange.begin > 0) // ... at line start
fprintf(stderr, "...\t");
for (size_t j = colRange.begin; j < colRange.end; j++)
{
if (j == colRange.skipBegin)
{
fprintf(stderr, "...\t");
j = colRange.skipEnd;
}
fprintf(stderr, "%.10f\t", us(i, j));
}
if (colRange.end < GetNumCols()) // ... at line end
fprintf(stderr, "...");
fprintf(stderr, "\n");
}
if (rowRange.end < GetNumRows())
fprintf(stderr, "...\n");
}
template <class ElemType>
void CPUMatrix<ElemType>::Print(const char* matrixName /*=nullptr*/) const
{
Print(matrixName, 0, GetNumRows() - 1, 0, GetNumCols() - 1);
}
// file I/O
//matrixName is used to verify that correct matrix is read.
template <class ElemType>
void CPUMatrix<ElemType>::ReadFromFile(FILE*, const char* /*matrixName*/)
{
RuntimeError("not implemented.");
}
//matrixName is used to verify that correct matrix is read.
template <class ElemType>
void CPUMatrix<ElemType>::WriteToFile(FILE*, const char* /*matrixName*/)
{
RuntimeError("not implemented.");
}
//assume each column is an input sample. Each sample is stored in [channel, row, col] (r00, g00, b00, r01, g01, b01, r10, g10, b10, r11, g11, b11)
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignPackedConvolutionInput(const CPUMatrix<ElemType>& inputSubBatch,
const size_t inputWidth, const size_t inputHeight, const size_t inputChannels,
const size_t outputWidth, const size_t outputHeight, const size_t /*outputChannels*/,
const size_t kernelWidth, const size_t kernelHeight, const size_t horizontalSubsample, const size_t verticalSubsample,
const bool zeroPadding)
{
if (verticalSubsample > kernelHeight || horizontalSubsample > kernelWidth)
LogicError("Arguments verticalSubsample (or horitzontalSubsample) must be less or equal than kernelHeight (or kernelWidth).");
const size_t packedInputRows = kernelWidth * kernelHeight * inputChannels;
const size_t packedInputColsPerSample = outputWidth * outputHeight; // output size per channel
const size_t inputDim = inputWidth * inputHeight * inputChannels;
const size_t smallBatchSize = inputSubBatch.GetNumCols();
const long inputHeightTimesChannel = (long) (inputHeight * inputChannels);
RequireSize(packedInputRows, packedInputColsPerSample * smallBatchSize);
if (zeroPadding)
SetValue((ElemType) 0);
const long halfKernelWidth = (long) kernelWidth / 2;
const long halfKernelHeight = (long) kernelHeight / 2;
#pragma omp parallel for // each input element is copied to many places
for (long sample = 0; sample < smallBatchSize; sample++)
{
for (long id = 0; id < inputDim; id++)
{
// IN_ELEM_ROWPOS(channel, row, col) = (channel + (row + col * inputHeight) * inputChannels)
// IN_ELEM_COLPOS = sample
const long y = id / inputHeightTimesChannel; // inputCol
const long nXC = id % inputHeightTimesChannel; // channel + inputRow*inputChannels
const long x = nXC / (long) inputChannels; // inputRow
const long c = nXC % (long) inputChannels; // channel
long x0 = 0, y0 = 0, x1 = 0, y1 = 0;
if (zeroPadding)
{
x0 = (long) max((ElemType)0, ceil((x - (ElemType)kernelHeight + 1.0f + halfKernelHeight) / (ElemType)verticalSubsample)); // row : first wrow in which x is in
x1 = (long) (x + halfKernelHeight - x0 * verticalSubsample); // first posxInKernel
y0 = (long) max((ElemType)0, ceil((y - (ElemType)kernelWidth + 1.0f + halfKernelWidth) / (ElemType)horizontalSubsample)); // col : first wcol in which y is in
y1 = (long) (y + halfKernelWidth - y0 * horizontalSubsample); // first posyInKernel
}
else
{
x0 = (long) max((ElemType)0, ceil((x - (ElemType)kernelHeight + 1) / (ElemType)verticalSubsample)); // row : first wrow in which x is in
x1 = (long) (x - x0 * verticalSubsample); // first posxInKernel
y0 = (long) max((ElemType)0, ceil((y - (ElemType)kernelWidth + 1) / (ElemType)horizontalSubsample)); // col : first wcol in which y is in
y1 = (long) (y - y0 * horizontalSubsample); // first posyInKernel
}
assert(x1 >= 0 && x1 < kernelHeight && y1 >= 0 && y1 < kernelWidth);
// PACK_ELEM_ROWPOS(channel, posxInKernel, posyInKernel) = (channel * kernelWidth * kernelHeight + posxInKernel + posyInKernel * kernelHeight)
// PACK_ELEM_COLPOS(sample, wrow, wcol) = (sample*packedInputColsPerSample + outputHeight*wcol + wrow
ElemType currentInputValue = inputSubBatch(id, sample);
long packColBase = (long) (sample * packedInputColsPerSample + y0 * outputHeight);
for (long wcol = y0, posyInKernel = y1; wcol < (long) outputWidth && posyInKernel >= 0; wcol++, posyInKernel -= (long) horizontalSubsample)
{
long packRowBase = (long) (c * kernelWidth * kernelHeight + posyInKernel * kernelHeight);
for (long wrow = x0, posxInKernel = x1; wrow < (long) outputHeight && posxInKernel >= 0; wrow++, posxInKernel -= (long) verticalSubsample)
{
const long packRow = packRowBase + posxInKernel;
const long packCol = packColBase + wrow;
(*this)(packRow, packCol) = currentInputValue;
}
packColBase += (long) outputHeight;
}
}
}
return *this;
}
//assume each column is an input sample. Each sample is stored in [channel, row, col] (r00, g00, b00, r01, g01, b01, r10, g10, b10, r11, g11, b11)
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::UnpackConvolutionInput(CPUMatrix<ElemType>& inputSubBatch,
const size_t inputWidth, const size_t inputHeight, const size_t inputChannels,
const size_t outputWidth, const size_t outputHeight, const size_t /*outputChannels*/,
const size_t kernelWidth, const size_t kernelHeight, const size_t horizontalSubsample, const size_t verticalSubsample,
const bool zeroPadding) const
{
if (verticalSubsample > kernelHeight || horizontalSubsample > kernelWidth)
LogicError("Arguments verticalSubsample (or horizonSubsample) must be less than or equal to kernelHeight (or kernelWidth).");
const size_t packedInputColsPerSample = outputWidth * outputHeight; // output size per channel
const size_t inputDim = inputWidth * inputHeight * inputChannels;
const size_t smallBatchSize = inputSubBatch.GetNumCols();
const long inputHeightTimesChannel = (long) (inputHeight * inputChannels);
const long halfKernelWidth = (long) kernelWidth / 2;
const long halfKernelHeight = (long) kernelHeight / 2;
#pragma omp parallel for // each input element is copied to many places
for (long sample = 0; sample < smallBatchSize; sample++)
{
for (long id = 0; id < inputDim; id++)
{
// IN_ELEM_ROWPOS(channel, row, col) = (channel + (row + col * inputHeight) * inputChannels)
// IN_ELEM_COLPOS = sample
const long y = id / inputHeightTimesChannel; // inputCol
const long nXC = id % inputHeightTimesChannel; // channel + inputRow*inputChannels
const long x = nXC / (long) inputChannels; // inputRow
const long c = nXC % (long) inputChannels; // channel
long x0 = 0, y0 = 0, x1 = 0, y1 = 0;
if (zeroPadding)
{
x0 = (long) max((ElemType)0, ceil((x - (ElemType) kernelHeight + 1.0f + halfKernelHeight) / (ElemType) verticalSubsample)); // row : first wrow in which x is in
x1 = (long) (x + halfKernelHeight - x0 * verticalSubsample); // first posxInKernel
y0 = (long) max((ElemType)0, ceil((y - (ElemType) kernelWidth + 1.0f + halfKernelWidth) / (ElemType) horizontalSubsample)); // col : first wcol in which y is in
y1 = (long) (y + halfKernelWidth - y0 * horizontalSubsample); // first posyInKernel
}
else
{
x0 = (long) max((ElemType)0, ceil((x - (ElemType) kernelHeight + 1) / (ElemType) verticalSubsample)); // row : first wrow in which x is in
x1 = (long) (x - x0 * verticalSubsample); // first posxInKernel
y0 = (long) max((ElemType)0, ceil((y - (ElemType) kernelWidth + 1) / (ElemType) horizontalSubsample)); // col : first wcol in which y is in
y1 = (long) (y - y0 * horizontalSubsample); // first posyInKernel
}
assert(x1 >= 0 && x1 < kernelHeight && y1 >= 0 && y1 < kernelWidth);
// PACK_ELEM_ROWPOS(channel, posxInKernel, posyInKernel) = (channel * kernelWidth * kernelHeight + posxInKernel + posyInKernel * kernelHeight)
// PACK_ELEM_COLPOS(sample, wrow, wcol) = (sample*packedInputColsPerSample + outputHeight*wcol + wrow
ElemType currentInputValue = inputSubBatch(id, sample);
long packColBase = (long) (sample * packedInputColsPerSample + y0 * outputHeight);
for (long wcol = y0, posyInKernel = y1; wcol < (long) outputWidth && posyInKernel >= 0; wcol++, posyInKernel -= (long) horizontalSubsample)
{
long packRowBase = (long) (c * kernelWidth * kernelHeight + posyInKernel * kernelHeight);
for (long wrow = x0, posxInKernel = x1; wrow < (long) outputHeight && posxInKernel >= 0; wrow++, posxInKernel -= (long) verticalSubsample)
{
const long packRow = packRowBase + posxInKernel;
const long packCol = packColBase + wrow;
currentInputValue += (*this)(packRow, packCol);
}
packColBase += (long) outputHeight;
}
inputSubBatch(id, sample) = currentInputValue;
}
}
return inputSubBatch;
}
//assume each column is an input sample. Each sample is stored in (r00, g00, b00, r01, g01, b01, r10, g10, b10, r11, g11, b11)
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignMaxPoolingResult(const CPUMatrix<ElemType>& inputBatch, const size_t channels,
const size_t /*inputWidth*/, const size_t inputHeight, const size_t /*inputSizePerSample*/,
const size_t /*outputWidth*/, const size_t outputHeight, const size_t outputSizePerSample,
const size_t windowWidth, const size_t windowHeight, const size_t horizontalSubsample, const size_t verticalSubsample)
{
const long inputHeightTimesChannel = (long) (inputHeight * channels);
const long outputHeightTimesChannel = (long) (outputHeight * channels);
const size_t batchSize = inputBatch.GetNumCols();
RequireSize(outputSizePerSample, batchSize);
// IN_ELEM_ROWPOS(channel, row, col) = (channel + (row + col * inputHeight) * channels)
// IN_ELEM_COLPOS = sample
// OUT_ELEM_ROWPOS(channel, wrow, wcol) = (channel + (wrow + wcol * outputHeight) * channels)
// OUT_ELEM_COLPOS = sample
#pragma omp parallel for
for (long sample = 0; sample < (long) batchSize; sample++)
{
for (long outputIndexWithinSample = 0; outputIndexWithinSample < outputSizePerSample; outputIndexWithinSample++)
{
const long y = outputIndexWithinSample / outputHeightTimesChannel; // wcol
const long nXC = outputIndexWithinSample % outputHeightTimesChannel; // channel + wrow*channels
const long x = (long) (nXC / channels); // wrow
const long c = (long) (nXC % channels); // channel
ElemType maxVal = -FLT_MAX;
ElemType minVal = FLT_MAX;
const long rowInWindowBase = (long) ((x * verticalSubsample + y * horizontalSubsample * inputHeight) * channels + c);
for (long colInWindow = 0; colInWindow < windowWidth; colInWindow++)
{
long rowInInput = rowInWindowBase + colInWindow * inputHeightTimesChannel;
for (long rowInWindow = 0; rowInWindow < windowHeight; rowInWindow++)
{
const ElemType val = inputBatch(rowInInput, sample); // pf[rowInWindow*channels];
maxVal = std::max(maxVal, val);
minVal = std::min(minVal, val);
rowInInput += (long) channels;
}
}
(*this)(outputIndexWithinSample, sample) = maxVal;
}
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AddMaxPoolingGradient(const CPUMatrix<ElemType>& outputGradientBatch, const CPUMatrix<ElemType>& inputBatch, const CPUMatrix<ElemType>& outputBatch,
const size_t channels,
const size_t /*inputWidth*/, const size_t inputHeight, const size_t inputSizePerSample,
const size_t outputWidth, const size_t outputHeight, const size_t /*outputSizePerSample*/,
const size_t windowWidth, const size_t windowHeight, const size_t horizontalSubsample, const size_t verticalSubsample)
{
size_t batchSize = inputBatch.GetNumCols();
const long inputHeightTimesChannel = (long) (inputHeight * channels);
const long outputHeightTimesChannel = (long) (outputHeight * channels);
// IN_ELEM_ROWPOS(channel, row, col) = (channel + (row + col * inputHeight) * channels)
// IN_ELEM_COLPOS = sample
// OUT_ELEM_ROWPOS(channel, wrow, wcol) = (channel + (wrow + wcol * outputHeight) * channels)
// OUT_ELEM_COLPOS = sample
#pragma omp parallel for
for (long sample = 0; sample < batchSize; sample++)
{
for (long inputIndexWithinSample = 0; inputIndexWithinSample < inputSizePerSample; inputIndexWithinSample++)
{
const long y = inputIndexWithinSample / inputHeightTimesChannel; // col in input
const long nXC = inputIndexWithinSample % inputHeightTimesChannel; // channel + row*chanels
const long x = (long) (nXC / channels); // row in input
const long c = (long) (nXC % channels); // channel
long startOutX = (long) max((ElemType)0, ceil((x - (ElemType) windowHeight + 1) / (ElemType) verticalSubsample)); // inclusive start
long endOutX = (long) ((x / verticalSubsample < outputHeight - 1) ? x / verticalSubsample : outputHeight - 1); // inclusive end
long startOutY = (long) max((ElemType)0, ceil((y - (ElemType) windowWidth + 1) / (ElemType) horizontalSubsample)); // inclusive start
long endOutY = (long) ((y / horizontalSubsample < outputWidth - 1) ? y / horizontalSubsample : outputWidth - 1); // inclusive end
ElemType inputValue = inputBatch(inputIndexWithinSample, sample);
for (long outY = startOutY; outY <= endOutY; outY++)
{
for (long outX = startOutX; outX <= endOutX; outX++)
{
long outputIndex = (long) (outY * outputHeightTimesChannel + outX * channels + c);
if (inputValue == outputBatch(outputIndex, sample))
(*this)(inputIndexWithinSample, sample) += outputGradientBatch(outputIndex, sample);
}
}
}
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignAveragePoolingResult(const CPUMatrix<ElemType>& inputBatch, const size_t channels,
const size_t /*inputWidth*/, const size_t inputHeight, const size_t /*inputSizePerSample*/,
const size_t /*outputWidth*/, const size_t outputHeight, const size_t outputSizePerSample,
const size_t windowWidth, const size_t windowHeight, const size_t horizontalSubsample, const size_t verticalSubsample)
{
const long inputHeightTimesChannel = (long) (inputHeight * channels);
const long outputHeightTimesChannel = (long) (outputHeight * channels);
const size_t batchSize = inputBatch.GetNumCols();
const size_t windowSize = windowWidth * windowHeight;
RequireSize(outputSizePerSample, batchSize);
// IN_ELEM_ROWPOS(channel, row, col) = (channel + (row + col * inputHeight) * channels)
// IN_ELEM_COLPOS = sample
// OUT_ELEM_ROWPOS(channel, wrow, wcol) = (channel + (wrow + wcol * outputHeight) * channels)
// OUT_ELEM_COLPOS = sample
#pragma omp parallel for
for (long sample = 0; sample < batchSize; sample++)
{
for (long outputIndexWithinSample = 0; outputIndexWithinSample < outputSizePerSample; outputIndexWithinSample++)
{
const long y = outputIndexWithinSample / outputHeightTimesChannel; // wcol
const long nXC = outputIndexWithinSample % outputHeightTimesChannel; // channel + wrow*channels
const long x = (long) (nXC / channels); // wrow
const long c = (long) (nXC % channels); // channel
ElemType sum = 0;
const long rowInWindowBase = (long) ((x * verticalSubsample + y * horizontalSubsample * inputHeight) * channels + c);
for (long colInWindow = 0; colInWindow < windowWidth; colInWindow++)
{
long rowInInput = rowInWindowBase + colInWindow * inputHeightTimesChannel;
for (long rowInWindow = 0; rowInWindow < windowHeight; rowInWindow++)
{
sum += inputBatch(rowInInput, sample);
rowInInput += (long) channels;
}
}
(*this)(outputIndexWithinSample, sample) = sum / windowSize;
}
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AddAveragePoolingGradient(const CPUMatrix<ElemType>& outputGradientBatch,
const size_t channels,
const size_t /*inputWidth*/, const size_t inputHeight, const size_t inputSizePerSample,
const size_t outputWidth, const size_t outputHeight, const size_t /*outputSizePerSample*/,
const size_t windowWidth, const size_t windowHeight, const size_t horizontalSubsample, const size_t verticalSubsample)
{
size_t batchSize = outputGradientBatch.GetNumCols();
const long inputHeightTimesChannel = (long) (inputHeight * channels);
const long outputHeightTimesChannel = (long) (outputHeight * channels);
const long windowSize = (long) (windowWidth * windowHeight);
// IN_ELEM_ROWPOS(channel, row, col) = (channel + (row + col * inputHeight) * channels)
// IN_ELEM_COLPOS = sample
// OUT_ELEM_ROWPOS(channel, wrow, wcol) = (channel + (wrow + wcol * outputHeight) * channels)
// OUT_ELEM_COLPOS = sample
#pragma omp parallel for
for (long sample = 0; sample < batchSize; sample++)
{
for (long inputIndexWithinSample = 0; inputIndexWithinSample < inputSizePerSample; inputIndexWithinSample++)
{
const long y = inputIndexWithinSample / inputHeightTimesChannel; // col in input
const long nXC = inputIndexWithinSample % inputHeightTimesChannel; // channel + row*chanels
const long x = nXC / (long) channels; // row in input
const long c = nXC % (long) channels; // channel
long startOutX = (long) max((ElemType)0, ceil((x - (ElemType) windowHeight + 1) / (ElemType) verticalSubsample)); // inclusive start
long endOutX = (long) ((x / verticalSubsample < outputHeight - 1) ? x / (long) verticalSubsample : outputHeight - 1); // inclusive end
long startOutY = (long) max((ElemType)0, ceil((y - (ElemType) windowWidth + 1) / (ElemType) horizontalSubsample)); // inclusive start
long endOutY = (long) ((y / horizontalSubsample < outputWidth - 1) ? y / horizontalSubsample : outputWidth - 1); // inclusive end
for (long outY = startOutY; outY <= endOutY; outY++)
{
for (long outX = startOutX; outX <= endOutX; outX++)
{
long outputIndex = outY * outputHeightTimesChannel + outX * (long) channels + c;
(*this)(inputIndexWithinSample, sample) += outputGradientBatch(outputIndex, sample) / windowSize;
}
}
}
}
return *this;
}
#pragma endregion Other Helper Functions
template <class ElemType>
void CPUMatrix<ElemType>::ConvolutionForward(const CPUMatrix<ElemType>& kernel, const CPUMatrix<int>& mpRowCol, const CPUMatrix<int>& mpRowIwht,
const CPUMatrix<int>& mpRowRun, const CPUMatrix<int>& runs, CPUMatrix<ElemType>& output) const
{
#pragma omp parallel for
for (int64_t sample = 0; sample < (int64_t)output.GetNumCols(); sample++)
{
for (size_t row = 0; row < output.GetNumRows(); row++)
{
int colBase = mpRowCol(row, 0);
int ivBase = mpRowIwht(row, 0);
assert(0 <= colBase && colBase < GetNumRows());
ElemType sum = 0;
int i0 = mpRowRun(row, 0);
int skip = runs(i0++, 0);
int size = runs(i0++, 0);
int imask = i0 + size;
for (int i = 0; i < size; i++)
{
if (runs(imask + i, 0) == 0)
continue;
int dcol = runs(i0 + i, 0);
assert(0 <= colBase + dcol && colBase + dcol < GetNumRows());
sum += kernel.Data()[ivBase + skip + i] * (*this)(colBase + dcol, sample);
}
output(row, sample) = sum;
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::ConvolutionBackwardData(const CPUMatrix<ElemType>& kernel, const CPUMatrix<int>& mpRowCol, const CPUMatrix<int>& mpRowIwht,
const CPUMatrix<int>& mpRowRun, const CPUMatrix<int>& runs, CPUMatrix<ElemType>& grad) const
{
#pragma omp parallel for
for (int64_t sample = 0; sample < (int64_t)GetNumCols(); sample++)
{
for (size_t row = 0; row < GetNumRows(); row++)
{
int colBase = mpRowCol(row, 0);
int ivBase = mpRowIwht(row, 0);
assert(0 <= colBase && colBase < grad.GetNumRows());
ElemType curGrad = (*this)(row, sample);
int i0 = mpRowRun(row, 0);
int skip = runs(i0++, 0);
int size = runs(i0++, 0);
int imask = i0 + size;
for (int i = 0; i < size; i++)
{
if (runs(imask + i, 0) == 0)
continue;
int dcol = runs(i0 + i, 0);
assert(0 <= colBase + dcol && colBase + dcol < grad.GetNumRows());
grad(colBase + dcol, sample) += curGrad * kernel.Data()[ivBase + skip + i];
}
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::ConvolutionBackwardKernel(const CPUMatrix<ElemType>& in, const CPUMatrix<int>& mpRowCol, const CPUMatrix<int>& mpRowIwht,
const CPUMatrix<int>& mpRowRun, const CPUMatrix<int>& runs, CPUMatrix<ElemType>& kernelGrad) const
{
// Do NOT parallelize these loops!
for (size_t sample = 0; sample < GetNumCols(); sample++)
{
for (size_t row = 0; row < GetNumRows(); row++)
{
int colBase = mpRowCol(row, 0);
int ivBase = mpRowIwht(row, 0);
assert(0 <= colBase && colBase < in.GetNumRows());
ElemType curGrad = (*this)(row, sample);
int i0 = mpRowRun(row, 0);
int skip = runs(i0++, 0);
int size = runs(i0++, 0);
int imask = i0 + size;
for (int i = 0; i < size; i++)
{
if (runs(imask + i, 0) == 0)
continue;
int dcol = runs(i0 + i, 0);
assert(0 <= colBase + dcol && colBase + dcol < in.GetNumRows());
kernelGrad.Data()[ivBase + skip + i] += curGrad * in(colBase + dcol, sample);
}
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::UnrollConvolutionInput(size_t unrollCols, size_t mapOutSize, const CPUMatrix<int>& mpRowCol,
const CPUMatrix<int>& mpRowRun, const CPUMatrix<int>& runs, CPUMatrix<ElemType>& output) const
{
size_t batchSize = GetNumCols();
#pragma omp parallel for
for (int64_t sample = 0; sample < (int64_t)batchSize; sample++)
{
for (size_t row = 0; row < mapOutSize; row++)
{
int colBase = mpRowCol(row, 0);
assert(0 <= colBase && colBase < GetNumRows());
int i0 = mpRowRun(row, 0);
int skip = runs(i0++, 0);
int size = runs(i0++, 0);
int imask = i0 + size;
for (int i = 0; i < size; i++)
{
if (runs(imask + i, 0) == 0)
continue;
int dcol = runs(i0 + i, 0);
assert(0 <= colBase + dcol && colBase + dcol < GetNumRows());
output.Data()[(row * batchSize + sample) * unrollCols + skip + i] = (*this)(colBase + dcol, sample);
}
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::UnrollConvolutionOutput(size_t unrollCols, size_t mapInCount, size_t mapOutCount, const CPUMatrix<int>& mpRowCol,
const CPUMatrix<int>& mpRowRun, const CPUMatrix<int>& runs, CPUMatrix<ElemType>& output) const
{
if (mpRowCol.GetNumRows() % mapOutCount != 0)
InvalidArgument("The number of rows in mpRowCol must be multiple of mapOutCount.");
size_t mapOutSize = mpRowCol.GetNumRows() / mapOutCount;
size_t batchSize = GetNumCols();
size_t kernelSize = runs(1, 0);
if (kernelSize % mapInCount != 0)
InvalidArgument("kernelSize must be multiple of mapInCount.");
size_t kernelMapSize = kernelSize / mapInCount;
#pragma omp parallel for
for (int64_t sample = 0; sample < (int64_t)GetNumCols(); sample++)
{
for (size_t row = 0; row < mapOutSize; row++)
{
int colBase = mpRowCol(row, 0);
int i0 = mpRowRun(row, 0);
int skip = runs(i0++, 0);
int size = runs(i0++, 0);
int imask = i0 + size;
for (int i = 0; i < std::min(size, (int)kernelMapSize); i++)
{
if (runs(imask + i, 0) == 0)
continue;
int dcol = runs(i0 + i, 0);
size_t isrc = row;
size_t idst = ((colBase + dcol) * batchSize + sample) * unrollCols + ((skip + i) % kernelMapSize) * mapOutCount;
for (size_t outMap = 0; outMap < mapOutCount; outMap++, isrc += mapOutSize)
{
assert(isrc < GetNumElements());
assert(idst + outMap < output.GetNumElements());
output.Data()[idst + outMap] = (*this)(isrc, sample);
}
}
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::UnrollConvolutionInputForKernelBackprop(size_t mapOutSize, const CPUMatrix<int>& mpRowCol,
const CPUMatrix<int>& mpRowRun, const CPUMatrix<int>& runs, CPUMatrix<ElemType>& output) const
{
size_t batchSize = GetNumCols();
size_t unrollCols = mapOutSize * batchSize;
#pragma omp parallel for
for (int64_t sample = 0; sample < (int64_t)batchSize; sample++)
{
for (size_t row = 0; row < mapOutSize; row++)
{
int colBase = mpRowCol(row, 0);
assert(0 <= colBase && colBase < GetNumRows());
int i0 = mpRowRun(row, 0);
int skip = runs(i0++, 0);
int size = runs(i0++, 0);
int imask = i0 + size;
for (int i = 0; i < size; i++)
{
if (runs(imask + i, 0) == 0)
continue;
int dcol = runs(i0 + i, 0);
assert(0 <= colBase + dcol && colBase + dcol < GetNumRows());
size_t idst = (skip + i) * unrollCols + row * batchSize + sample;
assert(idst < output.GetNumElements());
output.Data()[idst] = (*this)(colBase + dcol, sample);
}
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::MaxPoolingForward(const CPUMatrix<int>& mpRowCol, const CPUMatrix<int>& mpRowIndices, const CPUMatrix<int>& indices, CPUMatrix<ElemType>& output) const
{
#pragma omp parallel for
for (int64_t sample = 0; sample < (int64_t)output.GetNumCols(); sample++)
{
for (size_t row = 0; row < output.GetNumRows(); row++)
{
int colBase = mpRowCol(row, 0);
assert(0 <= colBase && colBase < GetNumRows());
assert(std::numeric_limits<ElemType>::has_infinity);
ElemType res = -std::numeric_limits<ElemType>::infinity();
int i0 = mpRowIndices(row, 0);
int size = indices(i0++, 0);
assert(size > 0);
for (int i = 0; i < size; i++)
{
int dcol = indices(i0 + i, 0);
assert(0 <= colBase + dcol && colBase + dcol < GetNumRows());
res = std::max(res, (*this)(colBase + dcol, sample));
}
output(row, sample) = res;
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::MaxPoolingBackward(const CPUMatrix<ElemType>& out, const CPUMatrix<ElemType>& in,
const CPUMatrix<int>& mpRowCol, const CPUMatrix<int>& mpRowIndices, const CPUMatrix<int>& indices,
CPUMatrix<ElemType>& grad) const
{
#pragma omp parallel for
for (int64_t sample = 0; sample < (int64_t)GetNumCols(); sample++)
{
for (size_t row = 0; row < GetNumRows(); row++)
{
int colBase = mpRowCol(row, 0);
assert(0 <= colBase && colBase < grad.GetNumRows());
int i0 = mpRowIndices(row, 0);
int size = indices(i0++, 0);
assert(size > 0);
ElemType g = (*this)(row, sample);
ElemType m = out(row, sample);
for (int i = 0; i < size; i++)
{
int dcol = indices(i0 + i, 0);
assert(0 <= colBase + dcol && colBase + dcol < grad.GetNumRows());
if (in(colBase + dcol, sample) >= m)
{
#pragma omp atomic
grad(colBase + dcol, sample) += g;
break;
}
}
}
}
}
// For each image, for each ROI, this function treats that ROI as an image
// and does max pooling so that it has output size pooledHeight x pooledWidth.
// It loops over each location in the output tensor, computes which ROI
// and image should populate that location, computes the subset of the image
// corresponding to the ROI and which pixels in that subset should go into the
// output location, then takes the max value over that window.
// src: Images [W x H x C x N]
// roiData: ROIs [4 x numROIs x N],
// dst: Pooled ROIs [PW x PH x C x numROIs x N]
// argmax: max positions [PW x PH x C x numROIs x N]
// where PW = Pooled Width, PH = Pooled Height, C = Channels, N = Batch Size
template <class ElemType>
void CPUMatrix<ElemType>::ROIPoolingForward(const size_t numRois, const size_t numImg, const size_t channels, const size_t width, const size_t height,
const size_t pooledWidth, const size_t pooledHeight, const CPUMatrix<ElemType>& roiData, CPUMatrix<ElemType>& output,
CPUMatrix<ElemType>& argmax) const
{
size_t roiOutputSize = pooledHeight * pooledWidth * channels;
#pragma omp parallel for
for (int imgIdx = 0; imgIdx < numImg; imgIdx++)
{
auto img = ColumnSlice(imgIdx, 1);
auto rois = roiData.ColumnSlice(imgIdx, 1);
#pragma omp parallel for
for (int roiIdx = 0; roiIdx < numRois; roiIdx++)
{
// each ROI is 4 elements: (x, y, w, h).
int base = roiIdx * 4;
// scaled ROI numbers (relative to original image size)
// roi points are doubles that represent location relative to image
ElemType scX = rois(base, (ElemType)0);
ElemType scY = rois(base + (ElemType)1, (ElemType)0);
ElemType scW = rois(base + (ElemType)2, (ElemType)0);
ElemType scH = rois(base + (ElemType)3, (ElemType)0);
// compute actual spatial location of the ROI in our featuremap.
size_t x = (size_t)round(scX * width);
size_t y = (size_t)round(scY * height);
ElemType roiW = (ElemType)max(round(scW * width), (ElemType)1);
ElemType roiH = (ElemType)max(round(scH * height), (ElemType)1);
const ElemType winW = roiW / (ElemType)pooledWidth;
const ElemType winH = roiH / (ElemType)pooledHeight;
// inspired by Ross Girshick fast-rcnn caffe cpu: https://github.com/rbgirshick/fast-rcnn
// loop over spatial locations in output.
#pragma omp parallel for
for (int outw = 0; outw < pooledWidth; outw++)
{
for (int outh = 0; outh < pooledHeight; outh++)
{
// compute the top left corner of the input
// spatial window corresponding to this output unit
size_t hstart = (size_t)floor(outh * winH);
size_t wstart = (size_t)floor(outw * winW);
// compute bottom right corner (not included)
size_t hend = (size_t)ceil((outh + 1) * winH);
size_t wend = (size_t)ceil((outw + 1) * winW);
// offset window based on ROI top left corner.
// these indices are into the input slice.
hstart = min(max(hstart + y, (size_t)0), height);
wstart = min(max(wstart + x, (size_t)0), width);
hend = min(max(hend + y, (size_t)0), height);
wend = min(max(wend + x, (size_t)0), width);
bool isempty = (hend <= hstart) || (wend <= wstart);
for (size_t c = 0; c < channels; c++)
{
// [W x H x C x R x N]; R = ROIs per image
size_t outputIdx = roiIdx * roiOutputSize + outw + outh * pooledWidth + c * pooledHeight * pooledWidth;
size_t maxidx = 0;
ElemType maxval = isempty ? (ElemType)0 : -FLT_MAX;
size_t baseIdx = c * height * width;
for (size_t h = hstart; h < hend; h++)
{
for (size_t w = wstart; w < wend; w++)
{
// stored argmax indices are relative to the current channel.
size_t dataIdx = w + h * width;
if (img(baseIdx + dataIdx, 0) > maxval)
{
maxval = img(baseIdx + dataIdx, 0);
maxidx = dataIdx;
}
}
}
output(outputIdx, imgIdx) = maxval;
argmax(outputIdx, imgIdx) = maxidx;
}
}
}
}
}
}
// This function loops over locations in the input to the ROIPoolingNode (image locations).
// It loops over the ROIs corresponding to that image, seeing which ones could contain the current location
// in their output. For each ROI, it checks the argmax data to see if that ROI indeed chose
// this pixel location as the maximum. If so, it increments the gradient term for the input location.
template <class ElemType>
void CPUMatrix<ElemType>::ROIPoolingBackward(const size_t numRois, const size_t numImg, const size_t channels, const size_t width, const size_t height,
const size_t pooledWidth, const size_t pooledHeight, const CPUMatrix<ElemType>& roiData, CPUMatrix<ElemType>& grad,
CPUMatrix<ElemType>& argmax) const
{
// loop over images in the batch.
#pragma omp parallel for
for (int imgIdx = 0; imgIdx < numImg; imgIdx++)
{
// ROIs for this image. length 4*numRois;
auto rois = roiData.ColumnSlice(imgIdx, 1).Data();
// gradient values for all ROIs from this image. length numRois*pooledHeight*pooledWidth*channels;
auto pooledGrad = ColumnSlice(imgIdx, 1).Data();
auto argmaxCol = argmax.ColumnSlice(imgIdx, 1).Data();
// loop over spatial locations in the image.
#pragma omp parallel for
for (int w = 0; w < width; w++)
{
#pragma omp parallel for
for (int h = 0; h < width; h++)
{
// loop over the ROIs seeing which ones contain this location.
for (int roiN = 0; roiN < numRois; roiN++)
{
// each ROI is 4 elements: (x, y, w, h).
int roiOffset = roiN * 4;
// ROI data is relative to original image size
size_t roiStartW = (size_t)round(rois[roiOffset + 0] * width);
size_t roiStartH = (size_t)round(rois[roiOffset + 1] * height);
size_t roiWidth = max((size_t)round(rois[roiOffset + 2] * width), (size_t)1);
size_t roiHeight = max((size_t)round(rois[roiOffset + 3] * height), (size_t)1);
// skip this ROI if it doesn't contain the current input location.
const bool inROI = (w >= roiStartW && w < roiStartW + roiWidth &&
h >= roiStartH && h < roiStartH + roiHeight);
if (!inROI)
continue;
ElemType winH = (ElemType)roiHeight / (ElemType)pooledHeight;
ElemType winW = (ElemType)roiWidth / (ElemType)pooledWidth;
// what pooled nodes in the output for this ROI could have pooled this input location?
size_t phstart = (size_t)((h - roiStartH) / winH);
size_t pwstart = (size_t)((w - roiStartW) / winW);
size_t phend = (size_t)(ceil((h - roiStartH + 1) / winH));
size_t pwend = (size_t)(ceil((w - roiStartW + 1) / winW));
phstart = min(max(phstart, (size_t)0), pooledHeight);
phend = min(max(phend, (size_t)0), pooledHeight);
pwstart = min(max(pwstart, (size_t)0), pooledWidth);
pwend = min(max(pwend, (size_t)0), pooledWidth);
for (size_t c = 0; c < channels; c++)
{
ElemType gradient = 0;
// [W x H x C x N]
size_t index = w + h*width + c*height*width;
// go right up to channel c of the current ROI.
size_t offset = (roiN * channels + c) * pooledWidth * pooledHeight;
const ElemType* offsetPoolGrad = pooledGrad + offset;
const ElemType* offsetArgmax = argmaxCol + offset;
for (size_t ph = phstart; ph < phend; ph++)
{
for (size_t pw = pwstart; pw < pwend; pw++)
{
if ((size_t)offsetArgmax[ph * pooledWidth + pw] == (w + h * width))
gradient += offsetPoolGrad[ph * pooledWidth + pw];
}
}
grad(index, imgIdx) = gradient;
}
}
}
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::MaxUnpooling(const CPUMatrix<int>& mpRowCol, const CPUMatrix<int>& mpRowIndices,
const CPUMatrix<int>& indices, const CPUMatrix<ElemType>& poolInput,
CPUMatrix<ElemType>& input) const
{
#pragma omp parallel for
for (int64_t sample = 0; sample < (int64_t)GetNumCols(); sample++)
{
for (size_t row = 0; row < GetNumRows(); row++)
{
int colBase = mpRowCol(row, 0);
assert(0 <= colBase && colBase < input.GetNumRows());
int i0 = mpRowIndices(row, 0);
int size = indices(i0++, 0);
assert(size > 0);
ElemType curMax = poolInput(colBase + indices(i0, 0), sample);
ElemType prevMax = curMax;
int imax = 0;
for (int i = 1; i < size; i++)
{
int dcol = indices(i0 + i, 0);
assert(0 <= colBase + dcol && colBase + dcol < poolInput.GetNumRows());
curMax = std::max(curMax, poolInput(colBase + dcol, sample));
if (curMax > prevMax)
{
prevMax = curMax;
imax = i;
}
}
int dcol = indices(i0 + imax, 0);
assert(0 <= colBase + dcol && colBase + dcol < input.GetNumRows());
input(colBase + dcol, sample) = (*this)(row, sample);
//int i = (int)poolIn(row, sample);
//assert(0 <= i && i < size);
//int dcol = indices(i0 + i, 0);
//assert(0 <= colBase + dcol && colBase + dcol < input.GetNumRows());
//input(colBase + dcol, sample) = (*this)(row, sample);
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::AveragePoolingForward(const CPUMatrix<int>& mpRowCol, const CPUMatrix<int>& mpRowIndices, const CPUMatrix<int>& indices, CPUMatrix<ElemType>& output, const bool poolIncludePad) const
{
#pragma omp parallel for
for (int64_t sample = 0; sample < (int64_t)output.GetNumCols(); sample++)
{
for (size_t row = 0; row < output.GetNumRows(); row++)
{
int colBase = mpRowCol(row, 0);
assert(0 <= colBase && colBase < GetNumRows());
ElemType sum = 0;
int i0 = mpRowIndices(row, 0);
int size = indices(i0++, 0);
assert(size > 0);
for (int i = 0; i < size; i++)
{
int dcol = indices(i0 + i, 0);
assert(0 <= colBase + dcol && colBase + dcol < GetNumRows());
sum += (*this)(colBase + dcol, sample);
}
// Note that we divide by size which is the number of actual elements (does not include padding).
// if poolIncludePad == true, use avg_pool_include_pad
if (poolIncludePad)
size = indices(0, 0);
output(row, sample) = sum / size;
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::AveragePoolingBackward(const CPUMatrix<int>& mpRowCol, const CPUMatrix<int>& mpRowIndices, const CPUMatrix<int>& indices, CPUMatrix<ElemType>& grad, const bool poolIncludePad) const
{
#pragma omp parallel for
for (int64_t sample = 0; sample < (int64_t)GetNumCols(); sample++)
{
for (size_t row = 0; row < GetNumRows(); row++)
{
int colBase = mpRowCol(row, 0);
assert(0 <= colBase && colBase < grad.GetNumRows());
int i0 = mpRowIndices(row, 0);
int size = indices(i0++, 0);
int tmp = size;
if (poolIncludePad)
size = indices(0, 0);
assert(size > 0);
ElemType g = (*this)(row, sample) / size;
size = tmp;
for (int i = 0; i < size; i++)
{
int dcol = indices(i0 + i, 0);
assert(0 <= colBase + dcol && colBase + dcol < grad.GetNumRows());
#pragma omp atomic
grad(colBase + dcol, sample) += g;
}
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::BatchNormalizationForward(const CPUMatrix<ElemType>& scale, const CPUMatrix<ElemType>& bias, bool inferenceOnly, double expAvgFactor, double blendFactor,
CPUMatrix<ElemType>& runMean, CPUMatrix<ElemType>& runVariance, CPUMatrix<ElemType>& out, double epsilon,
CPUMatrix<ElemType>& saveMean, CPUMatrix<ElemType>& saveInvStdDev) const
{
if (GetNumRows() % scale.GetNumRows() != 0)
LogicError("The number of rows of this matrx must be multiple of the number of rows of the scale matrix.");
if (!inferenceOnly || expAvgFactor != 0 || blendFactor != 1)
RuntimeError("Batch normalization training on CPU is not yet implemented.");
saveMean.Resize(0, 0); // only doing inference: these two are not produced
saveInvStdDev.Resize(0, 0);
bool spatial = GetNumRows() != scale.GetNumRows();
if (spatial)
{
size_t spatialSize = GetNumRows() / scale.GetNumRows();
#pragma omp parallel for
for (long icol = 0; icol < out.GetNumCols(); icol++)
{
for (long irow = 0; irow < out.GetNumRows(); irow++)
{
size_t imap = irow / spatialSize;
ElemType stdDev = sqrt(runVariance(imap, 0) + epsilon);
out(irow, icol) = scale(imap, 0) * ((*this)(irow, icol) - runMean(imap, 0)) / stdDev + bias(imap, 0);
}
}
}
else
{
#pragma omp parallel for
for (long icol = 0; icol < out.GetNumCols(); icol++)
{
for (long irow = 0; irow < out.GetNumRows(); irow++)
{
ElemType stdDev = sqrt(runVariance(irow, 0) + epsilon);
out(irow, icol) = scale(irow, 0) * ((*this)(irow, icol) - runMean(irow, 0)) / stdDev + bias(irow, 0);
}
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::BatchNormalizationBackward(const CPUMatrix<ElemType>& in, CPUMatrix<ElemType>& grad, const CPUMatrix<ElemType>& scale, double blendFactor,
const CPUMatrix<ElemType>& saveMean, const CPUMatrix<ElemType>& saveInvStdDev,
CPUMatrix<ElemType>& scaleGrad, CPUMatrix<ElemType>& biasGrad) const
{
UNUSED(in); UNUSED(grad); UNUSED(scale); UNUSED(blendFactor), UNUSED(saveMean); UNUSED(saveInvStdDev); UNUSED(scaleGrad); UNUSED(biasGrad);
RuntimeError("Batch normalization training on CPU is not yet implemented.");
}
#pragma region Static BLAS Functions
/// <summary>Matrix-matrix multiply with col-major matrices (a and b may be transposed): c = alpha * op(a) * op(b) + beta*c</summary>
/// <param name="alpha">Scalar</param>
/// <param name="a">Input matrix</param>
/// <param name="transposeA">Whether matrix a is transposed</param>
/// <param name="b">Input matrix</param>
/// <param name="transposeB">Whether matrix b is transposed</param>
/// <param name="beta">Scalar</param>
/// <param name="c">Resulting matrix, user is responsible for allocating this</param>
template <class ElemType>
void CPUMatrix<ElemType>::MultiplyAndWeightedAdd(ElemType alpha, const CPUMatrix<ElemType>& a, const bool transposeA, const CPUMatrix<ElemType>& b, const bool transposeB,
ElemType beta, CPUMatrix<ElemType>& c, shared_ptr<QuantizedMultiplier<ElemType>> pQuantizedMultiplier)
{
if (a.IsEmpty() || b.IsEmpty())
return;
int m, n, k, l;
int lda, ldb, ldc;
CBLAS_TRANSPOSE mklTransA;
CBLAS_TRANSPOSE mklTransB;
if (transposeA)
{
m = (int) a.GetNumCols();
k = (int) a.GetNumRows();
lda = k;
mklTransA = CBLAS_TRANSPOSE::CblasTrans;
}
else
{
m = (int) a.GetNumRows();
k = (int) a.GetNumCols();
lda = m;
mklTransA = CBLAS_TRANSPOSE::CblasNoTrans;
}
if (transposeB)
{
l = (int) b.GetNumCols();
n = (int) b.GetNumRows();
ldb = n;
mklTransB = CBLAS_TRANSPOSE::CblasTrans;
}
else
{
l = (int) b.GetNumRows();
n = (int) b.GetNumCols();
ldb = l;
mklTransB = CBLAS_TRANSPOSE::CblasNoTrans;
}
assert(m > 0 && k > 0 && l > 0 && n > 0); // converting from size_t to int may cause overflow
if (k != l)
InvalidArgument("CPUMatrix<ElemType>::MultiplyAndWeightedAdd : The inner dimensions of a and b must match.");
if (beta == 0)
c.RequireSize(m, n);
else
c.VerifySize(m, n); // Can't resize if beta != 0
ldc = (int) c.GetNumRows();
if (pQuantizedMultiplier == nullptr)
{
if (sizeof(ElemType) == sizeof(double))
{
cblas_dgemm((CBLAS_ORDER) (int)MatrixOrder::ColMajor, mklTransA, mklTransB, m, n, k, alpha, reinterpret_cast<double*>(a.Data()), lda, reinterpret_cast<double*>(b.Data()), ldb, beta, reinterpret_cast<double*>(c.Data()), ldc);
}
else
{
#pragma warning(suppress : 4244)
cblas_sgemm((CBLAS_ORDER) (int)MatrixOrder::ColMajor, mklTransA, mklTransB, m, n, k, alpha, reinterpret_cast<float*>(a.Data()), lda, reinterpret_cast<float*>(b.Data()), ldb, beta, reinterpret_cast<float*>(c.Data()), ldc);
}
}
else
{
// TODO: support transpose product
if (mklTransA == CBLAS_TRANSPOSE::CblasTrans || mklTransB == CBLAS_TRANSPOSE::CblasTrans)
LogicError("Quantized multiplier currently doesn't support transpose.");
pQuantizedMultiplier->Multiply(m, n, k, a.Data(), b.Data(), c.Data());
}
}
template <class ElemType>
void CPUMatrix<ElemType>::Multiply1x1AndWeightedAdd(ElemType alpha, const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b,
ElemType beta, CPUMatrix<ElemType>& c)
{
if (a.GetNumElements() != 1)
InvalidArgument("the argument a must be a scalar"); // a is a scalar
ElemType f = alpha * a.Get00Element();
if (beta == 0) // don't even read the memory if beta is 0
#pragma omp parallel for
foreach_coord (i, j, c)
c(i, j) = b(i, j) * f;
else
#pragma omp parallel for
foreach_coord (i, j, c)
c(i, j) = b(i, j) * f + c(i, j) * beta;
}
template <class ElemType>
void CPUMatrix<ElemType>::ColumnwiseScaleAndWeightedAdd(ElemType alpha, const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& v, ElemType beta, CPUMatrix<ElemType>& c)
{
if (v.GetNumRows() != 1 && v.GetNumCols() != 1)
InvalidArgument("the argument v must be a vector"); // v is a vector
if (beta == 0)
c.RequireSize(a.GetNumRows(), a.GetNumCols());
else
c.VerifySize(a.GetNumRows(), a.GetNumCols()); // Can't resize if beta != 0
const ElemType* vd = v.Data();
if (beta == 0) // don't even read the memory if beta is 0
#pragma omp parallel for
foreach_coord(i, j, c)
c(i, j) = alpha * a(i, j) * vd[j];
else
#pragma omp parallel for
foreach_coord(i, j, c)
c(i, j) = alpha * a(i, j) * vd[j] + c(i, j) * beta;
}
/* compute singular value decomposition as
A = U*SIGMA*VT
W is used as temp working memory
*/
template <class ElemType>
void CPUMatrix<ElemType>::SVD(const CPUMatrix<ElemType>& A, CPUMatrix<ElemType>& SIGMA, CPUMatrix<ElemType>& U, CPUMatrix<ElemType>& VT, CPUMatrix<ElemType>& W)
{
if (A.IsEmpty())
LogicError("SVD: input matrix is empty.");
int info;
int m, n, lda, ldu, ldvt;
m = (int) A.GetNumRows();
n = (int) A.GetNumCols();
W.GetNumRows(); // W is used as temp working memory
lda = m;
ldu = m;
ldvt = n;
U.RequireSize(m, m);
SIGMA.RequireSize(std::min(m, n), 1);
VT.RequireSize(n, n);
if (sizeof(ElemType) == sizeof(double))
{
#ifdef USE_MKL
double wkopt;
int lwork = -1;
dgesvd("All", "All", &m, &n, reinterpret_cast<double*>(A.Data()), &lda, reinterpret_cast<double*>(SIGMA.Data()), reinterpret_cast<double*>(U.Data()), &ldu, reinterpret_cast<double*>(VT.Data()), &ldvt, &wkopt, &lwork, &info);
lwork = (int) wkopt;
W.RequireSize(lwork, 1);
dgesvd("All", "All", &m, &n, reinterpret_cast<double*>(A.Data()), &lda, reinterpret_cast<double*>(SIGMA.Data()), reinterpret_cast<double*>(U.Data()), &ldu, reinterpret_cast<double*>(VT.Data()), &ldvt, reinterpret_cast<double*>(W.Data()), &lwork, &info);
#else
std::vector<double> superb(std::max(std::min(m, n) - 1, 1));
info = LAPACKE_dgesvd((int) MatrixOrder::ColMajor, 'A', 'A', (int) m, (int) n, reinterpret_cast<double*>(A.Data()), (int) lda, reinterpret_cast<double*>(SIGMA.Data()),
reinterpret_cast<double*>(U.Data()), (int) ldu, reinterpret_cast<double*>(VT.Data()), (int) ldvt, &superb[0]);
#endif
}
else
{
#ifdef USE_MKL
float wkopt;
int lwork = -1;
sgesvd("All", "All", &m, &n, reinterpret_cast<float*>(A.Data()), &lda, reinterpret_cast<float*>(SIGMA.Data()), reinterpret_cast<float*>(U.Data()), &ldu, reinterpret_cast<float*>(VT.Data()), &ldvt, &wkopt, &lwork, &info);
lwork = (int) wkopt;
W.RequireSize(lwork, 1);
sgesvd("All", "All", &m, &n, reinterpret_cast<float*>(A.Data()), &lda, reinterpret_cast<float*>(SIGMA.Data()), reinterpret_cast<float*>(U.Data()), &ldu, reinterpret_cast<float*>(VT.Data()), &ldvt, reinterpret_cast<float*>(W.Data()), &lwork, &info);
#else
std::vector<float> superb(std::max(std::min(m, n) - 1, 1));
info = LAPACKE_sgesvd((int) MatrixOrder::ColMajor, 'A', 'A', (int) m, (int) n, reinterpret_cast<float*>(A.Data()), (int) lda, reinterpret_cast<float*>(SIGMA.Data()),
reinterpret_cast<float*>(U.Data()), (int) ldu, reinterpret_cast<float*>(VT.Data()), (int) ldvt, &superb[0]);
#endif
}
if (info > 0)
{
RuntimeError("The algorithm computing SVD failed to converge.\n");
}
}
/// <summary>Matrix-matrix multiply with col-major matrices (a and b may be transposed): c = op(a) * op(b) + c</summary>
/// <param name="a">Input matrix</param>
/// <param name="transposeA">Whether matrix a is transposed</param>
/// <param name="b">Input matrix</param>
/// <param name="transposeB">Whether matrix b is transposed</param>
/// <param name="c">Resulting matrix, user is responsible for allocating this</param>
template <class ElemType>
void CPUMatrix<ElemType>::MultiplyAndAdd(const CPUMatrix<ElemType>& a, const bool transposeA, const CPUMatrix<ElemType>& b, const bool transposeB,
CPUMatrix<ElemType>& c)
{
return CPUMatrix<ElemType>::MultiplyAndWeightedAdd(1.0, a, transposeA, b, transposeB, 1.0, c);
}
template <class ElemType>
void CPUMatrix<ElemType>::AssignSoftmaxSum(const CPUMatrix<ElemType>& softmax, CPUMatrix<ElemType>& c)
{
ElemType log_likelihood = 0.0;
size_t batch_size = GetNumCols();
#pragma omp parallel for reduction(+ : log_likelihood)
for (int instance_id = 0; instance_id < batch_size; instance_id++)
{
int sample = (int) (*this)(0, instance_id);
log_likelihood += softmax(instance_id, sample);
}
c(0, 0) = -log_likelihood;
}
template <class ElemType>
void CPUMatrix<ElemType>::AssignNCEUnnormalizedEval(const CPUMatrix<ElemType>& a,
const CPUMatrix<ElemType>& b, const CPUMatrix<ElemType>& bias, CPUMatrix<ElemType>& c)
//this: samples+probs
// a: hidden
// b: embedding
// tmp: softmax
// c: loglikelihood
{
ElemType log_likelihood = 0.0;
size_t batch_size = GetNumCols();
#pragma omp parallel for reduction(+ : log_likelihood)
for (int instance_id = 0; instance_id < batch_size; instance_id++)
{
int sample = -(int) (*this)(0, instance_id);
ElemType score = bias(sample, 0);
for (int dim = 0; dim < b.GetNumRows(); dim++)
score += b(dim, sample) * a(dim, instance_id);
log_likelihood += score;
}
c(0, 0) = -log_likelihood;
}
//samples+prob gradient hidden embedding embedding/hidden
//a.m_CPUMatrix->AssignNCEDerivative(*tmp.m_CPUMatrix, *a.m_CPUMatrix, *b.m_CPUMatrix, inputIndex, *c.m_CPUMatrix);
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignNCEDerivative(const CPUMatrix<ElemType>& tmp, const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, size_t inputIndex, CPUMatrix<ElemType>& c)
{
size_t sample_size = GetNumRows() / 2;
size_t batch_size = GetNumCols();
if (inputIndex == 1)
{
#pragma omp parallel for
for (int instance_id = 0; instance_id < batch_size; instance_id++)
for (int sample_id = 0; sample_id < sample_size; sample_id++)
{
int sample = (int) (*this)(2 * sample_id, instance_id);
for (int dim = 0; dim < b.GetNumRows(); dim++)
c(dim, instance_id) -= b(dim, sample) * tmp(sample_id, instance_id);
}
}
else if (inputIndex == 2)
{
int i_blocks = omp_get_num_threads() * 16;
// Assume only one block in k direction.
// We don't need to explicitly block in the j direction.
#pragma omp parallel for
for (int ib = 0; ib < i_blocks; ib++)
for (int instance_id = 0; instance_id < batch_size; instance_id++)
for (int sample_id = 0; sample_id < sample_size; sample_id++)
{
int sample = (int) (*this)(2 * sample_id, instance_id);
if (sample % i_blocks == ib)
for (int dim = 0; dim < b.GetNumRows(); dim++)
c(dim, sample) -= a(dim, instance_id) * tmp(sample_id, instance_id);
}
}
else if (inputIndex == 3)
{
// Assume only one block in k direction.
// We don't need to explicitly block in the j direction.
for (int instance_id = 0; instance_id < batch_size; instance_id++)
for (int sample_id = 0; sample_id < sample_size; sample_id++)
{
int sample = (int) (*this)(2 * sample_id, instance_id);
c(0, sample) -= tmp(sample_id, instance_id);
}
}
else
InvalidArgument("The argument inputIndex must be 1 or 2 or 3.");
return *this;
}
template <class ElemType>
void CPUMatrix<ElemType>::AssignNoiseContrastiveEstimation(const CPUMatrix<ElemType>& a,
const CPUMatrix<ElemType>& b, const CPUMatrix<ElemType>& bias, CPUMatrix<ElemType>& tmp, CPUMatrix<ElemType>& c)
//this: samples+probs
// a: hidden
// b: embedding
// tmp: softmax
// c: loglikelihood
{
double log_likelihood = 0.0;
size_t sample_size = GetNumRows() / 2;
size_t batch_size = GetNumCols();
size_t num_noise_samples = sample_size - 1;
double log_num_noise_samples = std::log(num_noise_samples);
#pragma omp parallel for reduction(+ : log_likelihood)
for (int instance_id = 0; instance_id < batch_size; instance_id++)
for (int sample_id = 0; sample_id < sample_size; sample_id++)
{
int sample = (int) (*this)(2 * sample_id, instance_id);
double score = bias(0, sample);
for (int dim = 0; dim < b.GetNumRows(); dim++)
score += a(dim, instance_id) * b(dim, sample);
double sample_prob = -(*this)(2 * sample_id + 1, instance_id);
if (sample_id == 0)
sample_prob = -sample_prob;
double score_noise = log_num_noise_samples + sample_prob;
double z = LogAdd(score, score_noise);
double logprob = score - z;
double logprob_noise = score_noise - z;
tmp(sample_id, instance_id) = (ElemType) -std::exp(logprob);
if (sample_id == 0)
tmp(sample_id, instance_id) += 1;
log_likelihood += sample_id == 0 ? logprob : logprob_noise;
}
c(0, 0) = (ElemType) -log_likelihood;
}
/// <summary>Matrix-matrix multiply with col-major matrices (a and b may be transposed): c = op(a) * op(b)</summary>
/// <param name="a">Input matrix</param>
/// <param name="transposeA">Whether matrix a is transposed</param>
/// <param name="b">Input matrix</param>
/// <param name="transposeB">Whether matrix b is transposed</param>
/// <param name="c">Resulting matrix, user is responsible for allocating this</param>
template <class ElemType>
void CPUMatrix<ElemType>::Multiply(const CPUMatrix<ElemType>& a, const bool transposeA, const CPUMatrix<ElemType>& b, const bool transposeB,
CPUMatrix<ElemType>& c)
{
return CPUMatrix<ElemType>::MultiplyAndWeightedAdd(1.0, a, transposeA, b, transposeB, 0.0, c);
}
/// <summary>Matrix-matrix multiply with col-major matrices (a and b are not transposed): c = a * b</summary>
/// <param name="a">Input matrix</param>
/// <param name="b">Input matrix</param>
/// <param name="c">Resulting matrix, user is responsible for allocating this</param>
template <class ElemType>
void CPUMatrix<ElemType>::Multiply(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, CPUMatrix<ElemType>& c)
{
return CPUMatrix<ElemType>::MultiplyAndWeightedAdd(1.0, a, false, b, false, 0.0, c);
}
/// <summary>Matrix-scalar multiply with col-major matrices: c = alpha * a + c</summary>
/// if a is a column vector, add to all columns of c
/// if a is a row vector, add to all rows of c
/// if a is a scalar, add to all rows of c
/// <param name="alpha">Scalar</param>
/// <param name="a">Input matrix</param>
/// <param name="c">Resulting matrix, user is responsible for allocating this</param>
template <class ElemType>
void CPUMatrix<ElemType>::ScaleAndAdd(ElemType alpha, const CPUMatrix<ElemType>& a, CPUMatrix<ElemType>& c)
{
if (a.IsEmpty() || c.IsEmpty())
LogicError("ScaleAndAdd: one of the input matrices is empty.");
if (a.GetNumRows() != 1 && a.GetNumCols() != 1) // a is not a col or row vector
{
const int m = (int) a.GetNumRows();
const int n = (int) a.GetNumCols();
const int len = m * n;
const int incx = 1;
const int incy = 1;
assert(m > 0 && n > 0 && len > 0); // converting from size_t to int may cause overflow
if ((int) c.GetNumRows() != m || (int) c.GetNumCols() != n)
InvalidArgument("Dimension of matrix c does not match dimension of matrix a.");
if (sizeof(ElemType) == sizeof(double))
{
cblas_daxpy(len, alpha, reinterpret_cast<double*>(a.Data()), incx, reinterpret_cast<double*>(c.Data()), incy);
}
else
{
#pragma warning(suppress : 4244)
cblas_saxpy(len, alpha, reinterpret_cast<float*>(a.Data()), incx, reinterpret_cast<float*>(c.Data()), incy);
}
}
else if (a.GetNumElements() == 1) // scalar, add to all elements
{
ElemType v = alpha * a(0, 0);
long m = (long) c.GetNumRows(), n = (long) c.GetNumCols();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
c(i, j) += v;
c(i + 1, j) += v;
c(i + 2, j) += v;
c(i + 3, j) += v;
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
c(i, j) += v;
}
}
}
else if (a.GetNumCols() == 1) // col vector, add it to all columns
{
int m = (int) c.GetNumRows();
if (m != (int) a.GetNumRows())
InvalidArgument("To add column vector, rows should match.");
ElemType* aBufPtr = a.Data();
ElemType* cBufPtr = c.Data();
if (sizeof(ElemType) == sizeof(double))
{
#pragma omp parallel for
foreach_column (j, c)
{
cblas_daxpy(m, alpha, reinterpret_cast<double*>(aBufPtr), 1, reinterpret_cast<double*>(cBufPtr + c.LocateColumn(j)), 1);
}
}
else
{
#pragma omp parallel for
foreach_column (j, c)
{
#pragma warning(suppress : 4244)
cblas_saxpy(m, alpha, reinterpret_cast<float*>(aBufPtr), 1, reinterpret_cast<float*>(cBufPtr + c.LocateColumn(j)), 1);
}
}
}
else // row vector, add it to all rows
{
int m = (int) c.GetNumRows();
int n = (int) c.GetNumCols();
if (n != (int) a.GetNumCols())
InvalidArgument("To add row vector, cols should match.");
ElemType* aBufPtr = a.Data();
ElemType* cBufPtr = c.Data();
if (sizeof(ElemType) == sizeof(double))
{
#pragma omp parallel for
foreach_row (i, c)
{
cblas_daxpy(n, alpha, reinterpret_cast<double*>(aBufPtr), 1, reinterpret_cast<double*>(cBufPtr + i), m);
}
}
else
{
#pragma omp parallel for
foreach_row (i, c)
{
#pragma warning(suppress : 4244)
cblas_saxpy(n, alpha, reinterpret_cast<float*>(aBufPtr), 1, reinterpret_cast<float*>(cBufPtr + i), m);
}
}
}
}
/// <summary>c += alpha * (a-b)</summary>
/// if a, b, c must have same dim
/// <param name="alpha">Scalar</param>
/// <param name="a">Input matrix</param>
/// <param name="b">Input matrix</param>
/// <param name="c">Resulting matrix, user is responsible for allocating this</param>
template <class ElemType>
void CPUMatrix<ElemType>::AddScaledDifference(const ElemType alpha, const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, CPUMatrix<ElemType>& c)
{
if (!(a.GetNumRows() == b.GetNumRows() && a.GetNumRows() == c.GetNumRows() &&
a.GetNumCols() == b.GetNumCols() && a.GetNumCols() == c.GetNumCols()))
{
InvalidArgument("AddScaledDifference: a, b, and c must have same dimension.");
}
if (a.IsEmpty())
LogicError("AddScaledDifference: Input matrix a is empty.");
ElemType* aBufPtr = a.Data();
ElemType* bBufPtr = b.Data();
ElemType* cBufPtr = c.Data();
long m = (long) c.GetNumElements();
#pragma omp parallel for
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
cBufPtr[i] += alpha * (aBufPtr[i] - bBufPtr[i]);
cBufPtr[i + 1] += alpha * (aBufPtr[i + 1] - bBufPtr[i + 1]);
cBufPtr[i + 2] += alpha * (aBufPtr[i + 2] - bBufPtr[i + 2]);
cBufPtr[i + 3] += alpha * (aBufPtr[i + 3] - bBufPtr[i + 3]);
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
cBufPtr[i] += alpha * (aBufPtr[i] - bBufPtr[i]);
}
}
/// <summary> c = alpha * (a-b)</summary>
/// if a, b, c must have same dim
/// <param name="alpha">Scalar</param>
/// <param name="a">Input matrix</param>
/// <param name="b">Input matrix</param>
/// <param name="c">Resulting matrix, user is responsible for allocating this</param>
template <class ElemType>
void CPUMatrix<ElemType>::AssignScaledDifference(const ElemType alpha, const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, CPUMatrix<ElemType>& c)
{
if (!(a.GetNumRows() == b.GetNumRows() && a.GetNumCols() == b.GetNumCols()))
{
InvalidArgument("AssignScaledDifference: a, b must have same dimension.");
}
if (a.IsEmpty())
LogicError("AssignScaledDifference: Input matrix a is empty.");
if (&c != &a && &c != &b)
c.RequireSize(a.GetNumRows(), a.GetNumCols());
ElemType* aBufPtr = a.Data();
ElemType* bBufPtr = b.Data();
ElemType* cBufPtr = c.Data();
long m = (long) c.GetNumElements();
#pragma omp parallel for
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
cBufPtr[i] = alpha * (aBufPtr[i] - bBufPtr[i]);
cBufPtr[i + 1] = alpha * (aBufPtr[i + 1] - bBufPtr[i + 1]);
cBufPtr[i + 2] = alpha * (aBufPtr[i + 2] - bBufPtr[i + 2]);
cBufPtr[i + 3] = alpha * (aBufPtr[i + 3] - bBufPtr[i + 3]);
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
cBufPtr[i] = alpha * (aBufPtr[i] - bBufPtr[i]);
}
}
// c[ci,cj] += a[ai,aj]
template <class ElemType>
void CPUMatrix<ElemType>::AddElementToElement(ElemType beta, const CPUMatrix<ElemType>& a, const size_t ai, const size_t aj, CPUMatrix<ElemType>& c, const size_t ci, const size_t cj)
{
if (ai >= a.GetNumRows() || aj >= a.GetNumCols() ||
ci >= c.GetNumRows() || cj >= c.GetNumCols())
InvalidArgument("AddElementToElement: index out of range.");
ElemType us = beta ? beta * c(ci, cj) : 0; // do not multiply if beta is 0, could be a NaN
us += a(ai, aj);
c(ci, cj) = us;
}
////c[ci,cj] += a[ai,aj]
//template<class ElemType>
//void CPUMatrix<ElemType>::AddLogElementToElement(const CPUMatrix<ElemType>& a, const size_t ai, const size_t aj, CPUMatrix<ElemType>& c, const size_t ci, const size_t cj)
//{
// if (ai >= a.GetNumRows() || aj >=a.GetNumCols() ||
// ci >= c.GetNumRows() || cj >=c.GetNumCols())
// InvalidArgument("AddElementToElement: index out of range.");
//
// ElemType v = a(ai,aj);
// c(ci, cj) += ((v < EPS_IN_LOG) ? LOG_OF_EPS_IN_LOG : log(v));
//}
#if 0 // now done as AddElementToElement (beta=0)
// c[ci,cj] = a[ai,aj]
template <class ElemType>
void CPUMatrix<ElemType>::AssignElementToElement(const CPUMatrix<ElemType>& a, const size_t ai, const size_t aj, CPUMatrix<ElemType>& c, const size_t ci, const size_t cj)
{
if (ai >= a.GetNumRows() || aj >= a.GetNumCols() ||
ci >= c.GetNumRows() || cj >= c.GetNumCols())
InvalidArgument("AssignElementToElement: index out of range.");
c(ci, cj) = a(ai, aj);
}
#endif
/// <summary>c += alpha * (a-b)</summary>
/// if a, b, c must have same dim
/// <param name="alpha">1X1 matrix</param>
/// <param name="a">Input matrix</param>
/// <param name="b">Input matrix</param>
/// <param name="c">Resulting matrix, user is responsible for allocating this</param>
template <class ElemType>
void CPUMatrix<ElemType>::AddScaledDifference(const CPUMatrix<ElemType>& alpha, const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, CPUMatrix<ElemType>& c)
{
if (alpha.GetNumElements() != 1)
InvalidArgument("AddScaledDifference: alpha must be a 1X1 matrix.");
AddScaledDifference(alpha(0, 0), a, b, c);
}
/// <summary> c = alpha * (a-b)</summary>
/// if a, b, c must have same dim
/// <param name="alpha">1X1 matrix</param>
/// <param name="a">Input matrix</param>
/// <param name="b">Input matrix</param>
/// <param name="c">Resulting matrix, user is responsible for allocating this</param>
template <class ElemType>
void CPUMatrix<ElemType>::AssignScaledDifference(const CPUMatrix<ElemType>& alpha, const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, CPUMatrix<ElemType>& c)
{
if (alpha.GetNumElements() != 1)
InvalidArgument("AddScaledDifference: alpha must be a 1X1 matrix.");
AssignScaledDifference(alpha(0, 0), a, b, c);
}
/// <summary>Matrix-scalar multiply with col-major matrices: c = alpha * a</summary>
/// <param name="alpha">Scalar</param>
/// <param name="a">Input matrix</param>
/// <param name="c">Resulting matrix, user is responsible for allocating this</param>
template <class ElemType>
/*static*/ void CPUMatrix<ElemType>::Scale(ElemType alpha, const CPUMatrix<ElemType>& a, CPUMatrix<ElemType>& c)
{
if (a.IsEmpty())
LogicError("Scale: Input matrix a is empty.");
const int m = (int) a.GetNumRows();
const int n = (int) a.GetNumCols();
assert(m > 0 && n > 0); // converting from size_t to int may cause overflow
c.RequireSize(m, n);
ElemType* aBufPtr = a.Data();
ElemType* cBufPtr = c.Data();
if (alpha == 0)
{
memset(cBufPtr, 0, sizeof(ElemType) * c.GetNumElements());
return;
}
long size = (long) c.GetNumElements();
#pragma omp parallel for
// four-way unrolling
for (long i = 0; i < (size & ~3); i += 4)
{
cBufPtr[i] = alpha * aBufPtr[i];
cBufPtr[i + 1] = alpha * aBufPtr[i + 1];
cBufPtr[i + 2] = alpha * aBufPtr[i + 2];
cBufPtr[i + 3] = alpha * aBufPtr[i + 3];
}
// remaining elements
for (long i = size & ~3; i < size; i++)
{
cBufPtr[i] = alpha * aBufPtr[i];
}
}
/// <summary>Matrix-scalar multiply with col-major matrices: a = alpha * a</summary>
/// <param name="alpha">Scalar</param>
/// <param name="a">Input matrix</param>
template <class ElemType>
/*static*/ void CPUMatrix<ElemType>::Scale(ElemType alpha, CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("Scale: Input matrix a is empty.");
const int m = (int) a.GetNumRows();
const int n = (int) a.GetNumCols();
const int len = m * n;
const int incx = 1;
assert(m > 0 && n > 0 && len > 0); // converting from size_t to int may cause overflow
if (alpha == 0 && incx == 1)
{
memset(a.Data(), 0, sizeof(ElemType) * len);
}
else if (sizeof(ElemType) == sizeof(double))
{
cblas_dscal(len, alpha, reinterpret_cast<double*>(a.Data()), incx);
}
else
{
#pragma warning(suppress : 4244)
cblas_sscal(len, alpha, reinterpret_cast<float*>(a.Data()), incx);
}
}
/// <summary>Matrix multiply with col-major matrices: a = alpha[1,1] * a</summary>
/// <param name="alpha">1x1 matrix</param>
/// <param name="a">Input matrix</param>
template <class ElemType>
/*static*/ void CPUMatrix<ElemType>::Scale(CPUMatrix<ElemType> alpha, CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("Scale: Input matrix a is empty.");
if (alpha.GetNumElements() != 1)
LogicError("Matrix alpha must be 1x1");
CPUMatrix<ElemType>::Scale(alpha(0, 0), a);
}
template <class ElemType>
void CPUMatrix<ElemType>::InnerProduct(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, CPUMatrix<ElemType>& c, const bool isColWise)
{
if (a.IsEmpty() || b.IsEmpty())
LogicError("InnerProduct: one of the input matrices is empty.");
const int m = (int) a.GetNumRows();
const int n = (int) a.GetNumCols();
const int k = (int) b.GetNumRows();
const int l = (int) b.GetNumCols();
assert(m > 0 && n > 0 && k > 0 && l > 0); // converting from size_t to int may cause overflow
if (m != k || n != l)
InvalidArgument("InnerProduct: Matrices a and b should have same dimension.");
if ((isColWise && m == 1) || !isColWise && n == 1) // in this case it's equivalent to element-wise product
{
c.AssignElementProductOf(a, b);
}
else if (isColWise) // col-wise
{
c.RequireSize(1, n);
ElemType* aBufPtr = a.Data();
ElemType* bBufPtr = b.Data();
if (sizeof(ElemType) == sizeof(double))
{
#pragma omp parallel for
foreach_column (j, c)
{
c(0, j) = (ElemType) cblas_ddot(m, reinterpret_cast<double*>(aBufPtr + a.LocateColumn(j)), 1, reinterpret_cast<double*>(bBufPtr + b.LocateColumn(j)), 1);
}
}
else
{
#pragma omp parallel for
foreach_column (j, c)
{
#pragma warning(suppress : 4244)
c(0, j) = (ElemType) cblas_sdot(m, reinterpret_cast<float*>(aBufPtr + a.LocateColumn(j)), 1, reinterpret_cast<float*>(bBufPtr + b.LocateColumn(j)), 1);
}
}
}
else
{
c.RequireSize(m, 1);
ElemType* aBufPtr = a.Data();
ElemType* bBufPtr = b.Data();
if (sizeof(ElemType) == sizeof(double))
{
#pragma omp parallel for
foreach_row (i, c)
{
c(i, 0) = cblas_ddot(n, reinterpret_cast<double*>(aBufPtr + i), m, reinterpret_cast<double*>(bBufPtr + i), m);
}
}
else
{
#pragma omp parallel for
foreach_row (i, c)
{
#pragma warning(suppress : 4244)
c(i, 0) = cblas_sdot(n, reinterpret_cast<float*>(aBufPtr + i), m, reinterpret_cast<float*>(bBufPtr + i), m);
}
}
}
}
// treat matrices as vectors. do vec(a)^T vec(b)
template <class ElemType>
ElemType CPUMatrix<ElemType>::InnerProductOfMatrices(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b)
{
if (a.IsEmpty() || b.IsEmpty())
LogicError("InnerProductOfMatrices: one of the input matrices is empty.");
const int m = (int) a.GetNumRows();
const int n = (int) a.GetNumCols();
const int k = (int) b.GetNumRows();
const int l = (int) b.GetNumCols();
assert(m > 0 && n > 0 && k > 0 && l > 0); // converting from size_t to int may cause overflow
if (m != k || n != l)
InvalidArgument("InnerProductOfMatrices: Matrices a and b should have same dimension.");
if (sizeof(ElemType) == sizeof(double))
{
return (ElemType) cblas_ddot((int) a.GetNumElements(), reinterpret_cast<double*>(a.Data()), 1, reinterpret_cast<double*>(b.Data()), 1);
}
else
{
#pragma warning(suppress : 4244)
return (ElemType) cblas_sdot((int) a.GetNumElements(), reinterpret_cast<float*>(a.Data()), 1, reinterpret_cast<float*>(b.Data()), 1);
}
}
template <class ElemType>
void CPUMatrix<ElemType>::ElementWisePower(ElemType alpha, const CPUMatrix<ElemType>& a, CPUMatrix<ElemType>& c)
{
if (a.IsEmpty())
LogicError("Scale: The input matrix a is empty.");
c.RequireSize(a.GetNumRows(), a.GetNumCols());
if (alpha == 2)
{
#pragma omp parallel for
foreach_coord (i, j, c)
{
c(i, j) = a(i, j) * a(i, j);
}
}
else if (alpha == 3)
{
#pragma omp parallel for
foreach_coord (i, j, c)
{
c(i, j) = a(i, j) * a(i, j) * a(i, j);
}
}
else
{
#pragma omp parallel for
foreach_coord (i, j, c)
{
c(i, j) = pow(a(i, j), alpha);
}
}
}
template <class ElemType>
bool CPUMatrix<ElemType>::AreEqual(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, const ElemType threshold /*= 1e-8*/)
{
if (a.GetNumRows() != b.GetNumRows() || a.GetNumCols() != b.GetNumCols())
return false;
bool result = true;
#pragma omp parallel for
foreach_coord (i, j, a)
{
if (abs(a(i, j) - b(i, j)) > threshold)
{
result = false;
break;
}
}
return result;
}
// see Matrix<ElemType>::TensorShuffleScaleAndAdd() for comments
template <class ElemType>
void CPUMatrix<ElemType>::TensorShuffleScaleAndAdd(ElemType keepWeight, const CPUMatrix<ElemType>& a, size_t D, size_t S, size_t M, size_t K, size_t T, ElemType scaleFactor, const CPUMatrix<ElemType>& b, CPUMatrix<ElemType>& c)
{
size_t N = D * S * M * K * T;
const auto pa = a.Data();
const auto pb = b.Data();
auto pc = c.Data();
// Note: This code is written to match a GPU implementation. It is not super-efficient on the CPU.
for (size_t na = 0; na < N; na++) // loop over all elements
{
// recover the 5 indices from the loop counter
size_t d = na % D;
size_t s = (na / D) % S;
size_t m = (na / D / S) % M;
size_t k = (na / D / S / M) % K;
size_t t = (na / D / S / M / K) % T;
// compute index for the a and b/c tensors
assert(na == (((t * K + k) * M + m) * S + s) * D + d); // input tensor of dimension (D x S x M x K x T)
size_t nb = (((t * S + s) * M + m) * K + k) * D + d; // output tensor of dimension (D x K x M x S x T): k/K and s/S swapped
assert(nb < N);
// perform the computation
ElemType cval = keepWeight ? keepWeight * pb[nb] : 0; // if weight is 0 then don't bother to read memory (efficiency) or to multiply (NaN-safe)
cval += scaleFactor * pa[na];
pc[nb] = cval;
}
}
template <class ElemType>
CPUMatrix<ElemType> CPUMatrix<ElemType>::Ones(const size_t rows, const size_t cols)
{
CPUMatrix<ElemType> c(rows, cols); // will initialize to 0
c.SetValue(1);
return c;
}
template <class ElemType>
CPUMatrix<ElemType> CPUMatrix<ElemType>::Zeros(const size_t rows, const size_t cols)
{
CPUMatrix<ElemType> c(rows, cols); // will initialize to 0
c.SetValue(0);
return c;
}
template <class ElemType>
CPUMatrix<ElemType> CPUMatrix<ElemType>::Eye(const size_t rows)
{
CPUMatrix<ElemType> c(rows, rows); // will initialize to 0
c.SetDiagonalValue(1);
return c;
}
template <class ElemType>
CPUMatrix<ElemType> CPUMatrix<ElemType>::RandomUniform(const size_t rows, const size_t cols, const ElemType low, const ElemType high, unsigned long seed)
{
CPUMatrix<ElemType> c(rows, cols); // will initialize to 0
c.SetUniformRandomValue(low, high, seed);
return c;
}
template <class ElemType>
CPUMatrix<ElemType> CPUMatrix<ElemType>::RandomGaussian(const size_t rows, const size_t cols, const ElemType mean, const ElemType sigma, unsigned long seed)
{
CPUMatrix<ElemType> c(rows, cols); // will initialize to 0
c.SetGaussianRandomValue(mean, sigma, seed);
return c;
}
template <class ElemType>
bool CPUMatrix<ElemType>::HasElement(const CPUMatrix<ElemType>& mat, const ElemType v)
{
bool bHas = false;
bool isvFinite = std::isfinite(v);
#pragma omp parallel for
for (long j = 0; j < mat.GetNumElements(); j++)
{
#pragma omp flush(bHas)
if (!bHas)
{
ElemType cur = mat.Data()[j];
if (isvFinite && std::isfinite(cur))
{
if (cur == v)
bHas = true;
}
else if (std::isnan(v) && std::isnan(cur))
bHas = true;
else if (std::isinf(v) && std::isinf(cur) && std::signbit(v) == std::signbit(cur))
bHas = true;
}
}
return bHas;
}
// CPUMatrix<ElemType>& AssignElementProductOfWithShiftNeg(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, size_t shift, size_t negnumber);
//[this]=a .* b
// here, a and b must be two row vectors of the same size, i.e. [1,m]
// the inputs are two rwo vectors
// the output is a matrix of size(neg+1, col)
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignElementProductOfWithShiftNeg(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, size_t shift, size_t negnumber)
{
if (a.IsEmpty() || b.IsEmpty())
LogicError("AssignElementProductOfWithShiftNeg: Matrix is empty.");
if (!(a.GetNumRows() == b.GetNumRows() && a.GetNumCols() == b.GetNumCols()))
InvalidArgument("AssignElementProductOfWithShiftNeg: The input matrix dimensions do not match.");
if (a.GetNumRows() != 1)
InvalidArgument("AssignElementProductOfWithShiftNeg: The input matrix must be a row vector.");
auto& us = *this;
if (this != &a)
{
RequireSize(negnumber + 1, a.GetNumCols());
// RequireSize(a.GetNumRows(), a.GetNumCols());
}
long m = (long) GetNumRows(), n = (long) GetNumCols(); // a and b are of size (1,n)
// #pragma omp parallel for
for (long j = 0; j < n; j++)
{
us(0, j) = a(0, j) * b(0, j);
}
for (long j = 0; j < n; j++)
{
for (long i = 1; i < m; i++)
{
us(i, j) = a(0, j) * b(0, (j + shift + i - 1) % n);
}
}
return *this;
}
template <class ElemType>
void CPUMatrix<ElemType>::InnerProductWithShiftNeg(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, CPUMatrix<ElemType>& c, const bool isColWise, size_t shift, size_t negnumber)
{
if (a.IsEmpty() || b.IsEmpty())
LogicError("InnerProduct: one of the input matrices is empty.");
const int m = (int) a.GetNumRows();
const int n = (int) a.GetNumCols();
const int k = (int) b.GetNumRows();
const int l = (int) b.GetNumCols();
assert(m > 0 && n > 0 && k > 0 && l > 0); // converting from size_t to int may cause overflow
if (m != k || n != l)
InvalidArgument("InnerProduct: Matrices a and b should have same dimension.");
if ((isColWise && m == 1) || !isColWise && n == 1) // in this case it's equivalent to element-wise product
{
InvalidArgument("InnerProduct: Both matrices should be normal ones, not vectors");
// c.AssignElementProductOf(a, b);
}
else if (isColWise) // col-wise
{
c.RequireSize(negnumber + 1, n); // this line ischanged
ElemType* aBufPtr = a.Data();
ElemType* bBufPtr = b.Data();
if (sizeof(ElemType) == sizeof(double))
{
for (long j = 0; j < n; j++)
{
c(0, j) = (ElemType) cblas_ddot(m, reinterpret_cast<double*>(aBufPtr + a.LocateColumn(j)), 1, reinterpret_cast<double*>(bBufPtr + b.LocateColumn(j)), 1);
}
for (long j = 0; j < n; j++)
{
for (long i = 1; i < negnumber + 1; i++)
{
c(i, j) = (ElemType) cblas_ddot(m, reinterpret_cast<double*>(aBufPtr + a.LocateColumn(j)), 1, reinterpret_cast<double*>(bBufPtr + b.LocateColumn((j + shift + i - 1) % n)), 1);
}
}
}
else
{
for (long j = 0; j < n; j++)
{
c(0, j) = (ElemType) cblas_sdot(m, reinterpret_cast<float*>(aBufPtr + a.LocateColumn(j)), 1, reinterpret_cast<float*>(bBufPtr + b.LocateColumn(j)), 1);
}
for (long j = 0; j < n; j++)
{
for (long i = 1; i < negnumber + 1; i++)
{
c(i, j) = (ElemType) cblas_sdot(m, reinterpret_cast<float*>(aBufPtr + a.LocateColumn(j)), 1, reinterpret_cast<float*>(bBufPtr + b.LocateColumn((j + shift + i - 1) % n)), 1);
}
}
}
}
else
{
InvalidArgument("InnerProduct: Rowwise is not supported yet");
c.RequireSize(m, 1);
ElemType* aBufPtr = a.Data();
ElemType* bBufPtr = b.Data();
if (sizeof(ElemType) == sizeof(double))
{
#pragma omp parallel for
foreach_row (i, c)
{
c(i, 0) = (ElemType) cblas_ddot(n, reinterpret_cast<double*>(aBufPtr + i), m, reinterpret_cast<double*>(bBufPtr + i), m);
}
}
else
{
#pragma omp parallel for
foreach_row (i, c)
{
#pragma warning(suppress : 4244)
c(i, 0) = cblas_sdot(n, reinterpret_cast<float*>(aBufPtr + i), m, reinterpret_cast<float*>(bBufPtr + i), m);
}
}
}
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::GetARowByIndex(const CPUMatrix<ElemType>& a, size_t index)
{
if (a.IsEmpty())
LogicError("GetARowByIndex: the input matrices is empty.");
const int m = (int) a.GetNumRows();
const int n = (int) a.GetNumCols();
if (index < 0 || index >= m)
LogicError("GetARowByIndex: the row index is out of range.");
assert(m > 0 && n > 0); // converting from size_t to int may cause overflow
auto& us = *this;
RequireSize(1, n);
for (long j = 0; j < n; j++)
{
us(0, j) = a(index, j);
}
return *this;
}
// input: a, a row vector
// input: b, a matrix. b.col == a.col
// input firstmatrixfixed: If true, keep a's order. Otherwise, keep b's order
// output: c, a matrix. c.size == b.size
/*
Example, a = [a1 a2 a3]
b = [b11 b12 b13;
b21 b22 b23 ]
if true:
shift = 1
then c = [a1*b12 a2*b13 a3*b11
a1*b22 a2*b23 a3*b21]
if shift = 2
then c = [ a1*b13 a2*b11 a3*b12
a1*b23 a2*b21 a3*b22]
i.e. we do column-wise shift
if false:
shift = 1
then c = [a2*b11 a3*b12 a1*b13
a2*b21 a3*b22 a1*b23]
shift = 2
then c = [ a3*b11 a1*b12 a2*b13
a3*b21 a1*b22 a2*b23]
*/
template <class ElemType>
void CPUMatrix<ElemType>::ConductRowElementMultiplyWithShift(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, CPUMatrix<ElemType>& c, size_t shift, bool bFirstmatrixfixed)
{
if (a.IsEmpty() || b.IsEmpty())
LogicError("InnerProduct: one of the input matrices is empty.");
const int m = (int) a.GetNumRows();
const int n = (int) a.GetNumCols();
const int k = (int) b.GetNumRows();
const int l = (int) b.GetNumCols();
assert(m > 0 && n > 0 && k > 0 && l > 0); // converting from size_t to int may cause overflow
if (m != 1 || n != l)
InvalidArgument("InnerProduct: Matrices a and b should have same dimension.");
c.RequireSize(k, l); // c must the the same size of b
if (bFirstmatrixfixed)
{
for (long j = 0; j < l; j++)
{
for (long i = 0; i < k; i++)
{
c(i, j) = a(0, j) * b(i, (j + shift) % l);
}
}
}
else
{
for (long j = 0; j < l; j++)
{
for (long i = 0; i < k; i++)
{
c(i, j) = a(0, (j + shift) % l) * b(i, j);
}
}
}
}
// CPUMatrix<ElemType>& AssignElementProductOfWithShift(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, size_t shift);
//[this]=a .* b
// here, a and b must be two row vectors of the same size, i.e. [1,m]. We will do element product with shift.
// inputs are 2 row vectors
// output is a row vector
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignElementProductOfWithShift(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, size_t shift)
{
if (a.IsEmpty() || b.IsEmpty())
LogicError("AssignElementProductOfWithShiftNeg: Matrix is empty.");
if (a.GetNumRows() != b.GetNumRows() || a.GetNumCols() != b.GetNumCols())
InvalidArgument("AssignElementProductOfWithShiftNeg: The input matrix dimensions do not match.");
if (a.GetNumRows() != 1)
InvalidArgument("AssignElementProductOfWithShiftNeg: The input matrix must be a row vector.");
auto& us = *this;
if (this != &a)
{
RequireSize(1, a.GetNumCols());
// RequireSize(a.GetNumRows(), a.GetNumCols());
}
// long m = (long)GetNumRows(), n = (long)GetNumCols(); // a and b are of size (1,n)
long n = (long) GetNumCols(); // a and b are of size (1,n)
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
us(0, j) = a(0, j) * b(0, (j + shift) % n);
}
return *this;
}
#pragma endregion Static BLAS Functions
// 'double' version of LogAdd
inline double LogAddD(double x, double y)
{
return LogAdd(x, y);
}
template <class ElemType>
ElemType CPUMatrix<ElemType>::LogSumOfElements() const
{
ElemType fAlpha = (ElemType) LZERO;
ElemType* bufPtr = Data();
for (int k = 0; k < GetNumElements(); k++)
fAlpha = (ElemType) LogAddD(fAlpha, bufPtr[k]);
return fAlpha;
}
template <class ElemType>
void CPUMatrix<ElemType>::RCRFBackwardCompute(const CPUMatrix<ElemType>& alpha, CPUMatrix<ElemType>& beta,
const CPUMatrix<ElemType>& lbls,
const CPUMatrix<ElemType>& pair_scores)
{
int iNumPos = (int) lbls.GetNumCols();
int iNumLab = (int) lbls.GetNumRows();
int lastLbl = -1;
for (int ik = 0; ik < lbls.GetNumRows(); ik++)
if (lbls(ik, iNumPos - 1) != 0)
{
lastLbl = ik;
break;
}
beta.RequireSize(iNumLab, iNumPos);
for (int t = iNumPos - 1; t >= 0; t--)
{
#pragma omp parallel for
for (int k = 0; k < iNumLab; k++)
{
_rcrfBackwardCompute(t, k, alpha, beta, pair_scores);
}
}
};
// Calculate alpha in forward-backward calculation. equation (6), (7) in ftp://ftp.idsia.ch/pub/juergen/icml2006.pdf
// GPU x dimension corresponds to utterances, y dimension corresponds to phone sequence in each utterance
// prob (input): the posterior output from the network
// alpha (output): alpha for forward-backward calculation.
// phoneSeq (input): phone ID sequence for each utterance in this minibatch, each col is one utterance
// phoneBound (input): phone boundary (frame index) of each phone for each utterance in this minibatch, each col is one utterance
// uttToChanInd (input): map from utterance ID to minibatch channel ID. We need this because each channel may contain more than one utterance.
// uttFrameNum (input): the frame number of each utterance. The size of this vector = the number of all utterances in this minibatch
// uttBeginFrame(input): the positon of the first frame of each utterance in the minibatch channel. We need this because each channel may contain more than one utterance.
// uttPhoneNum (input): the phone number of each utterance. The size of this vector = the number of all utterances in this minibatch
// numChannels (input): channel number in this minibatch
// uttNum (input): number of utterances
// t (input): time stamp to process
// maxPhoneNum (input): the max number of phones between utterances
// totalPhoneNum (input): the total number of phones of all utterances
// blankTokenId (input): id of the CTC blank token
// delayConstraint -- label output delay constraint introduced during training that allows to have shorter delay during inference.
// Alpha and Beta scores outside of the delay boundary are set to zero.
// Setting this parameter smaller will result in shorted delay between label output during decoding.
// delayConstraint=-1 means no constraint
template<class ElemType>
void _assignAlphaScore(
const ElemType *prob,
ElemType *alphaScore,
ElemType *phoneSeq,
ElemType *phoneBound,
const std::vector<size_t>& uttToChanInd,
const std::vector<size_t>& uttFrameNum,
const std::vector<size_t>& uttBeginFrame,
const std::vector<size_t>& uttPhoneNum,
size_t numChannels,
const size_t uttNum,
const size_t t,
const size_t maxPhoneNum, // Maximum length of utterance in this MB
const size_t totalPhoneNum, // Total number of phones
const size_t blankTokenId,
const int delayConstraint)
{
for (size_t uttId = 0;uttId < uttNum;uttId++) {
// Number of phones and frames in this utterance
size_t frameNum = uttFrameNum[uttId];
if (t >= frameNum) continue;
size_t phoneNum = uttPhoneNum[uttId];
#pragma omp parallel for
for (int phoneSeqId = 1;phoneSeqId < phoneNum - 1;phoneSeqId++) {
// Index of the label in the sequence
// Current and previous phone indices in phoneSeq matrix
size_t labelid = uttId*maxPhoneNum + phoneSeqId;
// Actual current phone label
size_t phoneId = (size_t)(phoneSeq[labelid]);
// Index of the current frame in minibatch
size_t timeId = (t + uttBeginFrame[uttId])*numChannels + uttToChanInd[uttId];
// Index of probability of observing phoneId at frame timeId
size_t probId = timeId*totalPhoneNum + phoneId;
size_t alphaId = maxPhoneNum* timeId + phoneSeqId; // alpha_t(s)
if (t == 0)
{
// Initialize recursion
if (phoneSeqId == 1 || phoneSeqId == 2)
{
alphaScore[alphaId] = prob[probId];
}
}
else
{
if (phoneSeqId >= 1)
{
size_t timeId_1 = timeId - numChannels; // Index corresponding to (t-1)
size_t alphaId_0 = maxPhoneNum* timeId_1 + phoneSeqId; // alpha_{t-1}(s)
size_t alphaId_1 = alphaId_0 - 1; // alpha_{t-1}(s-1)
size_t alphaId_2 = alphaId_0 - 2; // alpha_{t-1}(s-2)
ElemType x = LZERO;
ElemType ascore;
if (phoneSeqId > 2)
{
size_t labelid_2 = labelid - 2;
// if current label is not blank and not equal prev non-blank label
if ((size_t)(phoneSeq[labelid]) != blankTokenId && phoneId != (size_t)(phoneSeq[labelid_2]))
{
x = LogAdd(x, alphaScore[alphaId_2]);
}
}
if (phoneSeqId > 1)
{
x = LogAdd(x, alphaScore[alphaId_1]);
}
x = LogAdd(x, alphaScore[alphaId_0]);
if (phoneId != SIZE_MAX)
ascore = prob[probId]; // Probability of observing given label at given time
else
ascore = 0;
alphaScore[alphaId] = (ElemType)x + ascore;
if (delayConstraint != -1)
{
size_t labelid_r = labelid + 2;
size_t phoneBoundId_r = (size_t)(phoneBound[labelid_r]);
if (phoneId == blankTokenId)
{
// only constraint right side
if (t > phoneBoundId_r + delayConstraint - 1)
alphaScore[alphaId] = LZERO;
}
else if (phoneId != blankTokenId)
{
if (t > phoneBoundId_r + delayConstraint)
alphaScore[alphaId] = LZERO;
}
}
}
}
}
}
}
// Calculate beta in forward-backward calculation, equation (10), (11) in ftp://ftp.idsia.ch/pub/juergen/icml2006.pdf
// See _assignAlphaScore for the explanation of parameters
template<class ElemType>
void _assignBetaScore(
const ElemType *prob,
ElemType *betaScore,
ElemType *phoneSeq,
ElemType *phoneBound,
const std::vector<size_t>& uttToChanInd,
const std::vector<size_t>& uttFrameNum,
const std::vector<size_t>& uttBeginFrame,
const std::vector<size_t>& uttPhoneNum,
const size_t numChannels,
const size_t uttNum,
const long t,
const size_t maxPhoneNum,
const size_t totalPhoneNum,
const size_t blankTokenId,
const int delayConstraint)
{
for (size_t uttId = 0;uttId < uttNum;uttId++) {
// Number of phones and frames in this utterance
size_t frameNum = uttFrameNum[uttId];
if (t >= frameNum) continue;
size_t phoneNum = uttPhoneNum[uttId];
#pragma omp parallel for
for (int phoneSeqId = 1;phoneSeqId < phoneNum - 1;phoneSeqId++) {
size_t labelid = uttId*maxPhoneNum + phoneSeqId;
size_t labelid_2 = labelid + 2;
size_t phoneId = (LONG64)(phoneSeq[labelid]);
size_t timeId = (t + uttBeginFrame[uttId])*numChannels + uttToChanInd[uttId];
size_t probId = timeId*totalPhoneNum + phoneId;
size_t betaid = maxPhoneNum* timeId + phoneSeqId;
size_t timeId_1 = timeId + numChannels;
size_t betaid_0 = maxPhoneNum* timeId_1 + phoneSeqId;
size_t betaid_1 = betaid_0 + 1;
size_t betaid_2 = betaid_0 + 2;
if (t == frameNum - 1)
{
if (phoneSeqId == phoneNum - 3 || phoneSeqId == phoneNum - 2)
{
betaScore[betaid] = prob[probId];
}
}
else
{
if (phoneSeqId >= 1)
{
ElemType x = LZERO;
ElemType ascore;
if (phoneSeqId < phoneNum - 3)
{
if (phoneSeq[labelid] != blankTokenId && phoneId != phoneSeq[labelid_2])
{
x = LogAdd(x, betaScore[betaid_2]);
}
}
if (phoneSeqId < phoneNum - 2)
{
x = LogAdd(x, betaScore[betaid_1]);
}
x = LogAdd(x, betaScore[betaid_0]);
if (phoneId != SIZE_MAX)
ascore = prob[probId];
else
ascore = 0;
betaScore[betaid] = (ElemType)x + ascore;
if (delayConstraint != -1)
{
size_t phoneBoundId_r = (size_t)(phoneBound[labelid_2]);
if (phoneId == blankTokenId)
{
if (t > phoneBoundId_r + delayConstraint - 1)
betaScore[betaid] = LZERO;
}
else if (phoneId != blankTokenId)
{
if (t > phoneBoundId_r + delayConstraint)
betaScore[betaid] = LZERO;
}
}
}
}
}
}
}
// Calculate CTC score. equation (8) in ftp://ftp.idsia.ch/pub/juergen/icml2006.pdf
template<class ElemType>
void _assignTotalScore(ElemType *betaScore,
std::vector<ElemType>& totalScore,
const size_t uttNum,
const std::vector<size_t>& uttToChanInd,
const std::vector<size_t>& uttBeginFrame,
const size_t numChannels,
const size_t maxPhoneNum)
{
#pragma omp parallel for
for (int uttId = 0; uttId < uttNum; uttId++) {
if (uttId < uttNum)
{
LONG64 alphaId_0 = (uttBeginFrame[uttId] * numChannels + uttToChanInd[uttId]) * maxPhoneNum;
betaScore[alphaId_0] = LogAdd(betaScore[alphaId_0 + 1], betaScore[alphaId_0 + 2]);
totalScore[uttId] = betaScore[alphaId_0];
}
}
}
// Calculate derivative, equation (15) in ftp://ftp.idsia.ch/pub/juergen/icml2006.pdf
// See _assignAlphaScore for the explanation of parameters
template<class ElemType>
void _assignCTCScore(
ElemType *CTCscore,
ElemType *prob,
ElemType *alphaScore,
ElemType *betaScore,
ElemType *phoneSeq,
const size_t uttNum,
const std::vector<size_t>& uttToChanInd,
const std::vector<size_t>& uttBeginFrame,
const std::vector<size_t>& uttPhoneNum,
const std::vector<size_t>& uttFrameNum,
const size_t numChannels,
const size_t maxPhoneNum,
const size_t totalPhoneNum)
{
for (size_t uttId = 0;uttId < uttNum;uttId++) {
#pragma omp parallel for
for (int t = 0; t < uttFrameNum[uttId]; t++) {
size_t phoneNum = uttPhoneNum[uttId];
size_t alphaId_0 = (uttBeginFrame[uttId] * numChannels + uttToChanInd[uttId]) * maxPhoneNum;
size_t timeId = (t + uttBeginFrame[uttId])*numChannels + uttToChanInd[uttId];
ElemType P_lx = betaScore[alphaId_0];
for (int s = 1; s < phoneNum - 1; s++)
{
long phoneId = phoneSeq[uttId*maxPhoneNum + s];
size_t alphaId = maxPhoneNum* timeId + s;
size_t probId = timeId*totalPhoneNum + phoneId;
if (phoneId != SIZE_MAX)
{
ElemType logoccu = alphaScore[alphaId] + betaScore[alphaId] - prob[probId] - (ElemType)P_lx;
CTCscore[probId] = LogAdd(CTCscore[probId], logoccu);
}
}
for (int s = 0; s < totalPhoneNum; s++)
{
size_t probId = timeId*totalPhoneNum + s;
ElemType logoccu = CTCscore[probId];
if (logoccu < LZERO)
CTCscore[probId] = 0.0f;
else
CTCscore[probId] = exp(logoccu);
}
}
}
}
template<class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignCTCScore(
const CPUMatrix<ElemType>& prob, CPUMatrix<ElemType>& alpha, CPUMatrix<ElemType>& beta,
const CPUMatrix<ElemType>& phoneSeq, const CPUMatrix<ElemType>& phoneBoundary, CPUMatrix<ElemType> & totalScore, const std::vector<size_t>& uttToChanInd, const std::vector<size_t> & uttBeginFrame, const std::vector<size_t> & uttFrameNum,
const std::vector<size_t> & uttPhoneNum, const size_t numParallelSequences, const size_t maxFrameNum, const size_t blankTokenId, const int delayConstraint, const bool isColWise)
{
// Column wise representation of sequences in input matrices (each column is one sequence/utterance)
if (isColWise)
{
// Total number of phones
size_t totalPhoneNum = prob.GetNumRows();
size_t uttNum = uttFrameNum.size();
// Max number of phones in utterances in this minibatch
size_t maxPhoneNum = phoneSeq.GetNumRows();
for (size_t t = 0; t < maxFrameNum; t++)
{
_assignAlphaScore(prob.Data(), alpha.Data(), phoneSeq.Data(), phoneBoundary.Data(), uttToChanInd,
uttFrameNum, uttBeginFrame, uttPhoneNum, numParallelSequences, uttNum, t, maxPhoneNum, totalPhoneNum, blankTokenId, delayConstraint);
}
for (LONG64 t = maxFrameNum - 1; t >= 0; t--)
{
_assignBetaScore(prob.Data(), beta.Data(), phoneSeq.Data(), phoneBoundary.Data(), uttToChanInd,
uttFrameNum, uttBeginFrame, uttPhoneNum, numParallelSequences, uttNum, t, maxPhoneNum, totalPhoneNum, blankTokenId, delayConstraint);
}
std::vector<ElemType> scores(uttNum);
_assignTotalScore(beta.Data(), scores, uttNum, uttToChanInd, uttBeginFrame, numParallelSequences, maxPhoneNum);
_assignCTCScore(Data(), prob.Data(), alpha.Data(), beta.Data(), phoneSeq.Data(), uttNum, uttToChanInd,
uttBeginFrame, uttPhoneNum, uttFrameNum, numParallelSequences, maxPhoneNum, totalPhoneNum);
totalScore(0, 0) = 0.0;
for (size_t utt = 0; utt < uttNum; utt++)
{
totalScore(0,0) -= scores[utt];
}
return *this;
}
else {
LogicError("Only ColWise minibatch layout is supported.");
}
return *this;
}
/// the kernel function for RCRF backward computation
template <class ElemType>
void CPUMatrix<ElemType>::_rcrfBackwardCompute(size_t t, size_t k, const CPUMatrix<ElemType>& alpha,
CPUMatrix<ElemType>& beta,
const CPUMatrix<ElemType>& pair_scores)
{
size_t iNumLab = alpha.GetNumRows();
size_t iNumPos = alpha.GetNumCols();
ElemType fSum;
ElemType fTmp = (ElemType) LZERO;
if (t == iNumPos - 1)
{
fSum = (ElemType) LZERO;
for (int j = 0; j < iNumLab; j++)
{
fSum = (ElemType) LogAddD(fSum, alpha(j, t));
}
fTmp = alpha(k, t) - fSum;
beta(k, t) = fTmp;
}
else
{
for (int j = 0; j < iNumLab; j++)
{
fSum = (ElemType) LZERO;
for (int m = 0; m < iNumLab; m++)
{
fSum = (ElemType) LogAddD(fSum, alpha(m, t) + pair_scores(j, m));
}
fTmp = (ElemType) LogAddD(fTmp, beta(j, t + 1) + alpha(k, t) + pair_scores(j, k) - fSum);
}
beta(k, t) = fTmp;
}
}
template <class ElemType>
void CPUMatrix<ElemType>::RCRFTransGrdCompute(const CPUMatrix<ElemType>& lbls,
const CPUMatrix<ElemType>& alpha,
const CPUMatrix<ElemType>& beta,
const CPUMatrix<ElemType>& pair_scores,
CPUMatrix<ElemType>& grd)
{
int iNumPos = (int) alpha.GetNumCols();
int iNumLab = (int) alpha.GetNumRows();
int firstLbl = -1;
for (int ik = 0; ik < lbls.GetNumRows(); ik++)
if (lbls(ik, 0) != 0)
{
firstLbl = ik;
break;
}
for (size_t tPos = 0; tPos < iNumPos; tPos++)
{
CPUMatrix<ElemType> b = beta.ColumnSlice(tPos, 1);
CPUMatrix<ElemType> a;
if (tPos > 0)
a = alpha.ColumnSlice(tPos - 1, 1);
#pragma omp parallel for
for (int i = 0; i < iNumLab; i++)
{
_rcrfTransGrdCompute(i, lbls, alpha, beta, pair_scores, grd, tPos);
}
// transition score
int i = -1;
if (tPos == 0)
i = firstLbl;
else
{
for (int ik = 0; ik < lbls.GetNumRows(); ik++)
if (lbls(ik, tPos - 1) != 0)
{
i = ik;
break;
}
}
int j = -1;
for (int ik = 0; ik < lbls.GetNumRows(); ik++)
{
if (lbls(ik, tPos) != 0)
{
j = ik;
break;
}
}
grd(j, i) -= 1.0;
}
};
template <class ElemType>
void CPUMatrix<ElemType>::_rcrfTransGrdCompute(size_t i,
const CPUMatrix<ElemType>& lbls,
const CPUMatrix<ElemType>& alpha,
const CPUMatrix<ElemType>& beta,
const CPUMatrix<ElemType>& pair_scores,
CPUMatrix<ElemType>& grd,
const size_t tPos // position
)
{
int iNumLab = (int) alpha.GetNumRows();
int firstLbl = -1;
for (int ik = 0; ik < lbls.GetNumRows(); ik++)
if (lbls(ik, 0) != 0)
{
firstLbl = ik;
break;
}
CPUMatrix<ElemType> b = beta.ColumnSlice(tPos, 1);
CPUMatrix<ElemType> a;
if (tPos > 0)
a = alpha.ColumnSlice(tPos - 1, 1);
{
ElemType fTmp = (ElemType) LZERO;
for (int j = 0; j < iNumLab; j++)
{
if (tPos == 0)
{
if (i == firstLbl)
{
fTmp = 0;
}
else
{
fTmp = (ElemType) LZERO;
}
}
else
{
fTmp = a(i, 0);
}
fTmp += pair_scores(j, i);
ElemType fSum = (ElemType) LZERO;
for (int k = 0; k < iNumLab; k++)
{
ElemType fTmp2;
if (tPos == 0)
{
if (k == firstLbl)
{
fTmp2 = 0;
}
else
{
fTmp2 = (ElemType) LZERO;
}
}
else
{
fTmp2 = a(k, 0);
}
fSum = (ElemType) LogAddD(fSum, fTmp2 + pair_scores(j, k));
}
fTmp -= fSum;
fTmp += b(j, 0);
grd(j, i) += exp(fTmp);
}
}
};
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::DropFrame(const CPUMatrix<ElemType>& label, const CPUMatrix<ElemType>& gamma, const ElemType& threshhold)
{
auto& us = *this;
if (us.GetNumCols() != gamma.GetNumCols() || us.GetNumRows() != gamma.GetNumRows())
LogicError("DropFrame: target matrix is not in the same size as gamm matrix.");
#pragma omp parallel for
foreach_column (j, label)
{
bool dropframe = false;
foreach_row (i, label)
{
if (fabs(label(i, j) - 1.0f) < 0.1)
{
if (gamma(i, j) < threshhold)
dropframe = true;
break;
}
}
foreach_row (i, label)
{
us(i, j) = 0.0f;
}
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignSequenceError(const ElemType hsmoothingWeight, const CPUMatrix<ElemType>& label,
const CPUMatrix<ElemType>& dnnoutput, const CPUMatrix<ElemType>& gamma, ElemType alpha)
{
auto& us = *this;
foreach_coord (i, j, us)
us(i, j) += alpha * (label(i, j) - (1 - hsmoothingWeight) * dnnoutput(i, j) - hsmoothingWeight * gamma(i, j));
return *this;
}
// note: this function does not depend on the <ElemType> parameter
template <class ElemType>
int CPUMatrix<ElemType>::SetNumThreads(int numThreads)
{
if (numThreads == 0) // use default
return numThreads;
int mthreads = (int) std::thread::hardware_concurrency();
if (numThreads <= 0)
numThreads = std::max(1, mthreads + numThreads);
if (numThreads > mthreads)
numThreads = mthreads;
#ifdef _OPENMP
omp_set_num_threads(numThreads);
numThreads = omp_get_max_threads();
#ifdef USE_MKL
mkl_set_num_threads(numThreads);
#elif defined(USE_OPENBLAS)
openblas_set_num_threads(numThreads);
#endif
#endif
return numThreads;
}
template <class ElemType>
int CPUMatrix<ElemType>::GetMaxNumThreads()
{
int numThreads = (int)std::thread::hardware_concurrency();
#ifdef _OPENMP
numThreads = omp_get_max_threads();
#endif
return numThreads;
}
// To ensure Intel MKL calls return the same results on all Intel or Intel compatible CPUs,
// the function set CBWR compatible mode.
template <class ElemType>
void CPUMatrix<ElemType>::SetCompatibleMode()
{
#ifdef USE_MKL
if (mkl_cbwr_set(MKL_CBWR_COMPATIBLE) != MKL_CBWR_SUCCESS)
RuntimeError("Could not set MKL compatible mode.");
#endif
}
// =======================================================================
// TensorView support
// =======================================================================
// To save time, this makes extensive use of templates and macros.
// -----------------------------------------------------------------------
// function to compute the value for a given output location (perform reduction if needed)
// -----------------------------------------------------------------------
// perform loop over reduction index m
// This function is declared inside a wrapper struct to allow partial specialization (m = -1).
template <class ElemType, typename OPFN, typename ReductionOp, size_t N, int m>
struct TensorOpReduction
{
// reduction case (non-reduction case is specialized)
static inline ElemType Loop(array<ElemType*, N> pointers, const OPFN& opfn, const ReductionOp& reductionOp,
const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, N>& reducingStrides)
{
array<ptrdiff_t, N - 1> strides; // N-1 because last one is the result pointer, which is unused in reduction
for (size_t i = 0; i < N - 1; i++) // N = a small constant, this will be unrolled
strides[i] = reducingStrides[i][(size_t) m];
double aggregate = TensorOpReduction<ElemType, OPFN, ReductionOp, N, m - 1>::Loop(pointers, opfn, reductionOp, reducingOpDims, reducingStrides);
for (size_t dim = reducingOpDims[(size_t)m] - 1; dim-- > 0;)
{
// advance the pointers
for (size_t i = 0; i < N - 1; i++)
pointers[i] += strides[i]; // note: last pointer (result) is unused and untouched here
// need to descend into one loop deeper
aggregate = reductionOp(aggregate, TensorOpReduction<ElemType, OPFN, ReductionOp, N, m - 1>::Loop(pointers, opfn, reductionOp, reducingOpDims, reducingStrides));
}
// Actually it would be nicer to return double but we keep ElementType so that test don't return different numbers than previous implementation.
return static_cast<double>(aggregate);
}
};
// perform loop over reduction index m
// This is the specialized version for m = -1, which terminates the recursion.
template <class ElemType, typename OPFN, typename ReductionOp, size_t N>
struct TensorOpReduction<ElemType, OPFN, ReductionOp, N, -1>
{
static inline ElemType Loop(array<ElemType*, N> pointers, const OPFN& opfn, const ReductionOp& reductionOp,
const SmallVector<size_t>&, const array<SmallVector<ptrdiff_t>, N>&)
{
return opfn(pointers); // finally we are doing some work!!!
}
};
// perform loop over reduction index m, while keeping track of the number of elements and their corresponding indices.
// This function is declared inside a wrapper struct to allow partial specialization (m = -1).
template <class ElemType, size_t N, int m>
struct TensorArgOpReduction
{
static inline std::pair<ElemType, size_t> ReduceAll(array<ElemType*, N> pointers, const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, N>& reducingStrides,
ElementWiseOperator reductionOp)
{
size_t counter = 0;
size_t index = 0;
ElemType val = (ElemType)0;
switch (reducingOpDims.size())
{
case 3:
val = TensorArgOpReduction<ElemType, N, 2>::Loop(pointers, reducingOpDims, reducingStrides, reductionOp, counter, index);
break;
case 2:
val = TensorArgOpReduction<ElemType, N, 1>::Loop(pointers, reducingOpDims, reducingStrides, reductionOp, counter, index);
break;
case 1:
val = TensorArgOpReduction<ElemType, N, 0>::Loop(pointers, reducingOpDims, reducingStrides, reductionOp, counter, index);
break;
case 0:
val = TensorArgOpReduction<ElemType, N, -1>::Loop(pointers, reducingOpDims, reducingStrides, reductionOp, counter, index);
break;
default:
LogicError("TensorOp: %d non-flattened input dimensions are not supported.", (int)reducingOpDims.size());
}
return make_pair(val, index);
}
// reduction case (non-reduction case is specialized)
static inline ElemType Loop(array<ElemType*, N> pointers, const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, N>& reducingStrides,
ElementWiseOperator reductionOp, size_t& counter, size_t& index)
{
array<ptrdiff_t, N - 1> strides; // N-1 because last one is the result pointer, which is unused in reduction
for (size_t i = 0; i < N - 1; i++) // N = a small constant, this will be unrolled
strides[i] = reducingStrides[i][(size_t)m];
ElemType aggregate = TensorArgOpReduction<ElemType, N, m - 1>::Loop(pointers, reducingOpDims, reducingStrides, reductionOp, counter, index);
for (size_t dim = reducingOpDims[(size_t)m] - 1; dim-- > 0;)
{
// advance the pointers
for (size_t i = 0; i < N - 1; i++)
pointers[i] += strides[i]; // note: last pointer (result) is unused and untouched here
ElemType val = TensorArgOpReduction<ElemType, N, m - 1>::Loop(pointers, reducingOpDims, reducingStrides, reductionOp, counter, index);
bool update = false;
switch (reductionOp)
{
case ElementWiseOperator::opArgmin:
update = (aggregate > val);
break;
case ElementWiseOperator::opArgmax:
update = (aggregate < val);
break;
}
if (update)
{
aggregate = val;
index = counter - 1;
}
}
return aggregate;
}
};
// perform loop over reduction index m
// This is the specialized version for m = -1, which terminates the recursion.
template <class ElemType, size_t N>
struct TensorArgOpReduction<ElemType, N, -1>
{
static inline ElemType Loop(array<ElemType*, N> pointers,
const SmallVector<size_t>&, const array<SmallVector<ptrdiff_t>, N>&, ElementWiseOperator reductionOp, size_t& counter, size_t& index)
{
counter++;
return *pointers[0]; // finally we are doing some work!!!
}
};
// -----------------------------------------------------------------------
// perform loop over regular index k for N-nary operations (N counting the output)
// -----------------------------------------------------------------------
// perform loop over regular index k and reducing index m for N operands (counting the output)
template <class ElemType, typename OPFN, typename ReductionOp, size_t N, bool vectorizable, int m, int k>
struct TensorOpIteration
{
static inline void Loop(ElemType beta, array<ElemType*, N> pointers, ElemType alpha, const OPFN& opfn, const ReductionOp& reductionOp,
const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, N>& regularStrides,
const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, N>& reducingStrides)
{
// non-scalar case: still nested result loops left
array<ptrdiff_t, N> strides;
for (size_t i = 0; i < N; i++) // N = a small constant, this will be unrolled
strides[i] = regularStrides[i][(size_t) k];
for (size_t dim = regularOpDims[(size_t) k]; dim-- > 0;)
{
// need to descend into one loop deeper
TensorOpIteration<ElemType, OPFN, ReductionOp, N, vectorizable, m, k - 1>::Loop(beta, pointers, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides);
// advance the pointers
for (size_t i = 0; i < N; i++)
pointers[i] += strides[i];
}
}
};
// Special version for innermost loop with strides all being 1 and no further reduction. Compiler can use SSE.
// This is a very common case, e.g. adding vectors or computing the Sigmoid.
template <class ElemType, typename OPFN, typename ReductionOp>
struct TensorOpIteration<ElemType, OPFN, ReductionOp, 3, true /*vectorizable*/, -1 /*no reduction*/, 0 /*innermost loop*/>
{
static inline void Loop(ElemType beta, array<ElemType*, 3> pointers, ElemType alpha, const OPFN& opfn, const ReductionOp& reductionOp,
const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, 3>& regularStrides,
const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, 3>& reducingStrides)
{
ElemType* pa = pointers[0];
ElemType* pb = pointers[1];
ElemType* pc = pointers[2];
size_t K = regularOpDims[0];
// special-case beta and alpha to allow the compiler to short-circuit it
if (beta != 0)
#pragma omp parallel for
for (int k = 0; k < (int) K; k++)
TensorOpIteration<ElemType, OPFN, ReductionOp, 3, true /*vectorizable*/, -1 /*no reduction*/, -1 /*scalar*/>::Loop(beta, array<ElemType*, 3>{pa + k, pb + k, pc + k}, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides);
else if (alpha != 1)
#pragma omp parallel for
for (int k = 0; k < (int) K; k++)
TensorOpIteration<ElemType, OPFN, ReductionOp, 3, true /*vectorizable*/, -1 /*no reduction*/, -1 /*scalar*/>::Loop(0, array<ElemType*, 3>{pa + k, pb + k, pc + k}, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides);
else
#pragma omp parallel for
for (int k = 0; k < (int) K; k++)
TensorOpIteration<ElemType, OPFN, ReductionOp, 3, true /*vectorizable*/, -1 /*no reduction*/, -1 /*scalar*/>::Loop(0, array<ElemType*, 3>{pa + k, pb + k, pc + k}, 1, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides);
// TODO: According to Amit, the VS compiler is not able to vectorize into lambdas. Solution: change the lambda to take an N, or to implement the loop inside (with 1 element by default).
// TODO: The signedness of k (required for omp) causes an extra sign-extend.
// TODO: OMP adds LOTS of overhead. Do we need a guard, a min size when to use it?
}
};
// and unary
template <class ElemType, typename OPFN, typename ReductionOp>
struct TensorOpIteration<ElemType, OPFN, ReductionOp, 2, true /*vectorizable*/, -1 /*no reduction*/, 0 /*innermost loop*/>
{
static inline void Loop(ElemType beta, array<ElemType*, 2> pointers, ElemType alpha, const OPFN& opfn, const ReductionOp& reductionOp,
const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, 2>& regularStrides,
const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, 2>& reducingStrides)
{
ElemType* pa = pointers[0];
ElemType* pb = pointers[1];
size_t K = regularOpDims[0];
// special-case beta and alpha to allow the compiler to short-circuit it
if (beta != 0)
#pragma omp parallel for
for (int k = 0; k < (int) K; k++)
TensorOpIteration<ElemType, OPFN, ReductionOp, 2, true /*vectorizable*/, -1 /*no reduction*/, -1 /*scalar*/>::Loop(beta, array<ElemType*, 2>{pa + k, pb + k}, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides);
else if (alpha != 1)
#pragma omp parallel for
for (int k = 0; k < (int) K; k++)
TensorOpIteration<ElemType, OPFN, ReductionOp, 2, true /*vectorizable*/, -1 /*no reduction*/, -1 /*scalar*/>::Loop(0, array<ElemType*, 2>{pa + k, pb + k}, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides);
else
#pragma omp parallel for
for (int k = 0; k < (int) K; k++)
TensorOpIteration<ElemType, OPFN, ReductionOp, 2, true /*vectorizable*/, -1 /*no reduction*/, -1 /*scalar*/>::Loop(0, array<ElemType*, 2>{pa + k, pb + k}, 1, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides);
}
};
template <class ElemType, typename OPFN, typename ReductionOp, size_t N, bool vectorizable, int m>
struct TensorOpIteration<ElemType, OPFN, ReductionOp, N, vectorizable, m, -1>
{
static inline void Loop(ElemType beta, array<ElemType*, N> pointers, ElemType alpha, const OPFN& opfn, const ReductionOp& reductionOp,
const SmallVector<size_t>&, const array<SmallVector<ptrdiff_t>, N>&,
const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, N>& reducingStrides)
{
// we are at element level for the result: perform the op (there may still be reduction)
ElemType val = TensorOpReduction<ElemType, OPFN, ReductionOp, N, m>::Loop(pointers, opfn, reductionOp, reducingOpDims, reducingStrides);
// scale
val *= alpha;
// combine with previous value in target matrix, then write it out
auto* pout = pointers.back();
if (beta != 0)
val += beta * *pout;
// save
*pout = val;
return;
}
};
// perform loop over regular index k and reducing index m for N operands (counting the output), the difference
// between TensorOpIteration and TensorArgOpIteration, is that the latter store the index of the result, instead of
// the result. The reason that they aren't combined is because of performance.
template <class ElemType, size_t N, int k>
struct TensorArgOpIteration
{
static inline void Loop(array<ElemType*, N> pointers,
const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, N>& regularStrides,
const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, N>& reducingStrides, ElementWiseOperator reductionOp)
{
// non-scalar case: still nested result loops left
array<ptrdiff_t, N> strides;
for (size_t i = 0; i < N; i++) // N = a small constant, this will be unrolled
strides[i] = regularStrides[i][(size_t)k];
for (size_t dim = regularOpDims[(size_t)k]; dim-- > 0;)
{
// need to descend into one loop deeper
TensorArgOpIteration<ElemType, N, k - 1>::Loop(pointers, regularOpDims, regularStrides, reducingOpDims, reducingStrides, reductionOp);
// advance the pointers
for (size_t i = 0; i < N; i++)
pointers[i] += strides[i];
}
}
};
template <class ElemType, size_t N>
struct TensorArgOpIteration<ElemType, N, -1>
{
static inline void Loop(array<ElemType*, N> pointers,
const SmallVector<size_t>&, const array<SmallVector<ptrdiff_t>, N>&,
const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, N>& reducingStrides, ElementWiseOperator reductionOp)
{
// we are at element level for the result: perform the op (there may still be reduction)
auto val = TensorArgOpReduction<ElemType, N, 2>::ReduceAll(pointers, reducingOpDims, reducingStrides, reductionOp);
auto* pout = pointers.back();
*pout = (ElemType)val.second;
return;
}
};
// -----------------------------------------------------------------------
// map runtime parameters N to template parameters
// -----------------------------------------------------------------------
// tensor operation with k+1 dimensions (-1 means scalar)
template <class ElemType, typename OPFN, typename ReductionOp, size_t N, int k>
static void TensorOpWithRegularLoop(ElemType beta, const array<ElemType*, N>& pointers, ElemType alpha, const OPFN& opfn, ReductionOp reductionOp,
const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, N>& regularStrides,
const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, N>& reducingStrides)
{
size_t dims = reducingOpDims.size();
switch (dims)
{
case 2:
return TensorOpIteration<ElemType, OPFN, ReductionOp, N, false /*vectorizable*/, 1, k>::Loop(beta, pointers, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides);
case 1:
return TensorOpIteration<ElemType, OPFN, ReductionOp, N, false /*vectorizable*/, 0, k>::Loop(beta, pointers, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides);
case 0:
{
// if all leading dimensions are 1, we can let the compiler do some unrolling
bool leadingAllOne = true;
for (size_t i = 0; i < N; i++)
leadingAllOne &= k >= 0 && regularStrides[i][0] == 1;
if (leadingAllOne) // special version that uses a hard-coded increment of 1 for all leading dimensions
return TensorOpIteration<ElemType, OPFN, ReductionOp, N, true /*vectorizable*/, -1, k>::Loop(beta, pointers, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides);
else
return TensorOpIteration<ElemType, OPFN, ReductionOp, N, false /*vectorizable*/, -1, k>::Loop(beta, pointers, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides);
}
default:
LogicError("TensorOp: %d non-flattened reduction dimensions are not supported.", (int) dims);
}
}
// tensor operation, generalized in number of arguments, operation already provided as a lambda
// This function now expands into different k.
template <class ElemType, typename OPFN, typename ReductionOp, size_t N>
static void TensorOpWithFnAndReduction(ElemType beta, array<ElemType*, N> pointers, ElemType alpha, const OPFN& opfn, const ReductionOp& reductionOp,
const array<size_t, N>& offsets,
const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, N>& regularStrides,
const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, N>& reducingStrides)
{
for (size_t i = 0; i < N; i++) // N = a small constant, this will be unrolled
pointers[i] += offsets[i];
size_t dims = regularOpDims.size();
switch (dims)
{
case 4:
return TensorOpWithRegularLoop<ElemType, OPFN, ReductionOp, N, 3>(beta, pointers, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides);
case 3:
return TensorOpWithRegularLoop<ElemType, OPFN, ReductionOp, N, 2>(beta, pointers, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides);
case 2:
return TensorOpWithRegularLoop<ElemType, OPFN, ReductionOp, N, 1>(beta, pointers, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides);
case 1:
return TensorOpWithRegularLoop<ElemType, OPFN, ReductionOp, N, 0>(beta, pointers, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides);
case 0:
return TensorOpWithRegularLoop<ElemType, OPFN, ReductionOp, N, -1>(beta, pointers, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides);
default:
LogicError("TensorOp: %d non-flattened input dimensions are not supported.", (int)dims);
}
}
// tensor operation, generalized in number of arguments, operation already provided as a lambda
// This function now expands into different reductionOps
template <class ElemType, typename OPFN, size_t N>
static void TensorOpWithFn(ElemType beta, array<ElemType*, N> pointers, ElemType alpha, const OPFN& opfn, ElementWiseOperator reductionOp,
const array<size_t, N>& offsets,
const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, N>& regularStrides,
const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, N>& reducingStrides)
{
// BUGBUG: Using always 'double' as type of aggregator even for ElemType==float. Reason: otherwise some e2e test would fail as historically we
// used double for aggregator of sum. But:
// * for min and max reductions this is meaningless.
// * It is not consitent with what we do on GPU, there we aggregate on ElemType.
// * It costs performance.
// TODO: apdapt e2e tests to run with aggregator of type ElemType.
#define CaseTensorOpWithFnAndReduction(oper) \
case ElementWiseOperator::op##oper: \
return TensorOpWithFnAndReduction(beta, pointers, alpha, opfn, [](double a, double b) \
{ \
return Op##oper(a, b); \
}, \
offsets, regularOpDims, regularStrides, reducingOpDims, reducingStrides)
switch (reductionOp)
{
CaseTensorOpWithFnAndReduction(Sum);
CaseTensorOpWithFnAndReduction(LogSum);
CaseTensorOpWithFnAndReduction(Min);
CaseTensorOpWithFnAndReduction(Max);
CaseTensorOpWithFnAndReduction(ElementwiseProduct);
default:
LogicError("Specified ElementWiseOperator op %d not suported as reduction operation.", (int)reductionOp);
}
}
// -----------------------------------------------------------------------
// entry points from Matrix.cpp; also map op to a lambda
// -----------------------------------------------------------------------
// perform unary operation 'op' on a giving 'this', reinterpreting the matrices as tensors as specified by the dims and strides
// This maps 'op' to a lambda.
template <class ElemType>
void CPUMatrix<ElemType>::TensorOp(ElemType beta, const CPUMatrix<ElemType>& a, ElemType alpha, ElementWiseOperator op, ElementWiseOperator reductionOp,
const array<size_t, 2>& offsets,
const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, 2>& regularStrides,
const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, 2>& reducingStrides)
{
if (reductionOp != ElementWiseOperator::opSum &&
reductionOp != ElementWiseOperator::opLogSum &&
reductionOp != ElementWiseOperator::opMin &&
reductionOp != ElementWiseOperator::opMax &&
reductionOp != ElementWiseOperator::opElementwiseProduct)
InvalidArgument("TensorOp: Unary reduction operations other than opMax, opMin, opSum, and opLogSum are not implemented.");
// TODO: Change the lambda to take a pointer and a number of elements, so that we can pass it 1 or 4 elements, in order for it to SSE-vectorize.
#define CaseUnaryTensorOp(oper) \
case ElementWiseOperator::op##oper: \
return TensorOpWithFn(beta, pointers, alpha, [](const array<ElemType*, 2>& pp) \
{ \
return Op##oper((*(pp[0]))); \
}, \
reductionOp, offsets, regularOpDims, regularStrides, reducingOpDims, reducingStrides)
array<ElemType*, 2> pointers = {a.Data(), Data()};
switch (op)
{
ForAllUnaryOps(CaseUnaryTensorOp);
default:
LogicError("TensorOp: Unknown unary op code %d.", (int) op);
}
}
// perform binary operation 'op' on a and b giving 'this', reinterpreting the matrices as tensors as specified by the dims and strides
// This maps 'op' to a lambda.
template <class ElemType>
void CPUMatrix<ElemType>::TensorOp(ElemType beta, const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, ElemType alpha, ElementWiseOperator op, ElementWiseOperator reductionOp,
const array<size_t, 3>& offsets,
const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, 3>& regularStrides,
const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, 3>& reducingStrides)
{
if (reductionOp != ElementWiseOperator::opSum)
InvalidArgument("TensorOp (binary): The only permitted binary reduction operation is opSum.");
#define CaseBinaryTensorOp(oper) \
case ElementWiseOperator::op##oper: \
return TensorOpWithFn(beta, pointers, alpha, [](const array<ElemType*, 3>& pp) \
{ \
return Op##oper((*(pp[0])), (*(pp[1]))); \
}, \
reductionOp, offsets, regularOpDims, regularStrides, reducingOpDims, reducingStrides)
array<ElemType*, 3> pointers = {a.Data(), b.Data(), Data()};
switch (op)
{
ForAllBinaryOps(CaseBinaryTensorOp);
default:
LogicError("TensorOp: Unknown op binary code %d.", (int) op);
}
}
// perform ternary operation 'op' on a, and c giving 'this', reinterpreting the matrices as tensors as specified by the dims and strides
// This maps 'op' to a lambda.
template <class ElemType>
void CPUMatrix<ElemType>::TensorOp(ElemType beta, const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, const CPUMatrix<ElemType>& c, ElemType alpha, ElementWiseOperator op, ElementWiseOperator reductionOp,
const array<size_t, 4>& offsets,
const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, 4>& regularStrides,
const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, 4>& reducingStrides)
{
if (reductionOp != ElementWiseOperator::opSum)
InvalidArgument("TensorOp: The only permitted ternary reduction operation is opSum.");
#define CaseTernaryTensorOp(oper) \
case ElementWiseOperator::op##oper: \
return TensorOpWithFn(beta, pointers, alpha, [](const array<ElemType*, 4>& pp) \
{ \
return Op##oper((*(pp[0])), (*(pp[1])), (*(pp[2]))); \
}, \
reductionOp, offsets, regularOpDims, regularStrides, reducingOpDims, reducingStrides)
array<ElemType*, 4> pointers = {a.Data(), b.Data(), c.Data(), Data()};
switch (op)
{
ForAllTernaryOps(CaseTernaryTensorOp);
default:
LogicError("TensorOp: Unknown ternary op code %d.", (int) op);
}
}
template <class ElemType>
int CPUMatrix<ElemType>::Argmin() const
{
int minArg = -1;
ElemType minValue = std::numeric_limits<ElemType>::max();
#pragma omp parallel
{
int localMinArg = -1;
ElemType localMinValue = std::numeric_limits<ElemType>::max();
#pragma omp for
for (int index = 0; index < (int)GetNumElements(); ++index)
{
if (localMinValue > Data()[index])
{
localMinArg = index;
localMinValue = Data()[index];
}
// If we have more then one min value, select the one with lower index.
else if ((localMinValue == Data()[index]) && (localMinArg > index))
{
localMinArg = index;
}
}
#pragma omp critical
{
if (minValue > localMinValue)
{
minArg = localMinArg;
minValue = localMinValue;
}
// If we have more then one min value, select the one with lower index.
else if ((minValue == localMinValue) && (minArg > localMinArg))
{
minArg = localMinArg;
}
}
}
return minArg;
}
template <class ElemType>
int CPUMatrix<ElemType>::Argmax() const
{
int maxArg = -1;
ElemType maxValue = std::numeric_limits<ElemType>::min();
#pragma omp parallel
{
int localMaxArg = -1;
ElemType localMaxValue = std::numeric_limits<ElemType>::min();
#pragma omp for
for (int index = 0; index < (int)GetNumElements(); ++index)
{
if (localMaxValue < Data()[index])
{
localMaxArg = index;
localMaxValue = Data()[index];
}
// If we have more then one max value, select the one with lower index.
else if ((localMaxValue == Data()[index]) && (localMaxArg > index))
{
localMaxArg = index;
}
}
#pragma omp critical
{
if (maxValue < localMaxValue)
{
maxArg = localMaxArg;
maxValue = localMaxValue;
}
// If we have more then one max value, select the one with lower index.
else if ((maxValue == localMaxValue) && (maxArg > localMaxArg))
{
maxArg = localMaxArg;
}
}
}
return maxArg;
}
template <class ElemType>
int CPUMatrix<ElemType>::ArgOp(ElementWiseOperator reductionOp) const
{
switch (reductionOp)
{
case ElementWiseOperator::opArgmin:
return Argmin();
break;
case ElementWiseOperator::opArgmax:
return Argmax();
break;
}
InvalidArgument("ArgOp: Arg reduction operations other than opArgmax, and opArgmin are not implemented.");
return -1;
}
template <class ElemType>
void CPUMatrix<ElemType>::TensorArgOp(const CPUMatrix<ElemType>& a, ElementWiseOperator reductionOp,
const array<size_t, 2>& offsets,
const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, 2>& regularStrides,
const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, 2>& reducingStrides)
{
if (reductionOp != ElementWiseOperator::opArgmin &&
reductionOp != ElementWiseOperator::opArgmax)
InvalidArgument("TensorOp: Arg reduction operations other than opArgmax, and opArgmin are not implemented.");
if (GetNumElements() == 1)
{
Data()[0] = (ElemType) a.ArgOp(reductionOp);
}
else
{
const size_t N = 2;
array<ElemType*, N> pointers = { a.Data(), Data() };
for (size_t i = 0; i < N; i++)
pointers[i] += offsets[i];
switch (regularOpDims.size())
{
case 2:
TensorArgOpIteration<ElemType, N, 1>::Loop(pointers, regularOpDims, regularStrides, reducingOpDims, reducingStrides, reductionOp);
break;
case 1:
TensorArgOpIteration<ElemType, N, 0>::Loop(pointers, regularOpDims, regularStrides, reducingOpDims, reducingStrides, reductionOp);
break;
case 0:
TensorArgOpIteration<ElemType, N, -1>::Loop(pointers, regularOpDims, regularStrides, reducingOpDims, reducingStrides, reductionOp);
break;
default:
LogicError("TensorOp: %d non-flattened input dimensions are not supported.", (int)regularOpDims.size());
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::ScatterValues(ElemType* indices, ElemType* value, ElemType* data, ElemType alpha, size_t num_indices, size_t rows, size_t cols, size_t indices_step)
{
if (!indices || !value || !data)
LogicError("ScatterValues: input data is null.");
#pragma omp parallel
{
int ithread = omp_get_thread_num();
int nthread = omp_get_num_threads();
for (auto i = 0; i < num_indices; i++)
{
auto col_r = indices[i * indices_step];
if (std::isnan(col_r) || col_r < 0)
continue;
auto col = (size_t)col_r;
//ignore the elements that is not partitioned into this thread
if (col % nthread != ithread)
continue;
if (col >= cols)
InvalidArgument("ScatterValues: Indices map out of bounds. %ld >= %ld", (long int)col, (long int)cols);
auto index = col * rows;
auto offset = i * rows;
for (auto j = 0; j < rows; j++)
data[index + j] = data[index + j] + alpha * value[offset + j];
}
}
}
// We use Matrix<char> as the backing store for QuantizedMatrix
// Let's explicitly instantiate the methods we need for that purpose
template CPUMatrix<char>::CPUMatrix(const size_t numRows, const size_t numCols);
template CPUMatrix<char>::CPUMatrix(const size_t numRows, const size_t numCols, char* pArray, const size_t matrixFlags);
template CPUMatrix<char>::CPUMatrix();
template CPUMatrix<char>::CPUMatrix(CPUMatrix<char> const&);
template CPUMatrix<char>::CPUMatrix(CPUMatrix<char>&&);
template size_t CPUMatrix<char>::LocateElement(size_t, size_t) const;
template CPUMatrix<char> CPUMatrix<char>::ColumnSlice(size_t startColumn, size_t numCols) const;
template CPUMatrix<char>& CPUMatrix<char>::operator=(CPUMatrix<char>&&);
template void CPUMatrix<char>::SetValue(const char);
template void CPUMatrix<char>::SetValue(const size_t numRows, const size_t numCols, char* pArray, size_t matrixFlags);
template void CPUMatrix<char>::SetValue(CPUMatrix<char> const&);
//template void CPUMatrix<char>::SetValue(GPUMatrix<char> const&);
//template void CPUMatrix<char>::SetValue(CPUSparseMatrix<char> const&);
//template void CPUMatrix<char>::SetValue(GPUSparseMatrix<char> const&);
template void CPUMatrix<char>::RequireSize(const size_t numRows, const size_t numCols, bool growOnly);
template void CPUMatrix<char>::Resize(const size_t numRows, const size_t numCols, bool growOnly);
template char* CPUMatrix<char>::CopyToArray(void) const;
template void CPUMatrix<char>::CopySection(size_t numRows, size_t numCols, char* dst, size_t colStride) const;
template void CPUMatrix<char>::Reshape(const size_t, const size_t);
// Support <short>
template CPUMatrix<short>::CPUMatrix(const size_t numRows, const size_t numCols);
template CPUMatrix<short>::CPUMatrix(const size_t numRows, const size_t numCols, short* pArray, const size_t matrixFlags);
template CPUMatrix<short>::CPUMatrix();
template CPUMatrix<short>::CPUMatrix(CPUMatrix<short> const&);
template CPUMatrix<short>::CPUMatrix(CPUMatrix<short>&&);
template size_t CPUMatrix<short>::LocateElement(size_t, size_t) const;
template CPUMatrix<short> CPUMatrix<short>::ColumnSlice(size_t startColumn, size_t numCols) const;
template CPUMatrix<short>& CPUMatrix<short>::operator=(CPUMatrix<short>&&);
template void CPUMatrix<short>::SetValue(const short);
template void CPUMatrix<short>::SetValue(const size_t numRows, const size_t numCols, short* pArray, size_t matrixFlags);
template void CPUMatrix<short>::SetValue(CPUMatrix<short> const&);
//template void CPUMatrix<short>::SetValue(GPUMatrix<short> const&);
//template void CPUMatrix<short>::SetValue(CPUSparseMatrix<short> const&);
//template void CPUMatrix<short>::SetValue(GPUSparseMatrix<short> const&);
template void CPUMatrix<short>::RequireSize(const size_t numRows, const size_t numCols, bool growOnly);
template void CPUMatrix<short>::Resize(const size_t numRows, const size_t numCols, bool growOnly);
template short* CPUMatrix<short>::CopyToArray(void) const;
template void CPUMatrix<short>::CopySection(size_t numRows, size_t numCols, short* dst, size_t colStride) const;
template void CPUMatrix<short>::Reshape(const size_t, const size_t);
template CPUMatrix<int>::CPUMatrix(const size_t, const size_t, int*, const size_t);
}}}
|
convolutiondepthwise_3x3.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void convdw3x3s1_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int outw = top_blob.w;
int outh = top_blob.h;
const int group = bottom_blob.c;
const float* kernel = _kernel;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int g=0; g<group; g++)
{
Mat out = top_blob.channel(g);
const float bias0 = bias ? bias[g] : 0.f;
const float* kernel0 = kernel + g*9;
float* outptr = out;
float* outptr2 = outptr + outw;
const float* img0 = bottom_blob.channel(g);
const float* r0 = img0;
const float* r1 = img0 + w;
const float* r2 = img0 + w*2;
const float* r3 = img0 + w*3;
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
int i = 0;
for (; i+1 < outh; i+=2)
{
int remain = outw;
for (; remain>0; remain--)
{
float sum = bias0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
float sum2 = bias0;
sum2 += r1[0] * k0[0];
sum2 += r1[1] * k0[1];
sum2 += r1[2] * k0[2];
sum2 += r2[0] * k1[0];
sum2 += r2[1] * k1[1];
sum2 += r2[2] * k1[2];
sum2 += r3[0] * k2[0];
sum2 += r3[1] * k2[1];
sum2 += r3[2] * k2[2];
*outptr = sum;
*outptr2 = sum2;
r0++;
r1++;
r2++;
r3++;
outptr++;
outptr2++;
}
r0 += 2 + w;
r1 += 2 + w;
r2 += 2 + w;
r3 += 2 + w;
outptr += outw;
outptr2 += outw;
}
for (; i < outh; i++)
{
int remain = outw;
for (; remain>0; remain--)
{
float sum = bias0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
*outptr = sum;
r0++;
r1++;
r2++;
outptr++;
}
r0 += 2;
r1 += 2;
r2 += 2;
}
}
}
static void convdw3x3s2_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int outw = top_blob.w;
int outh = top_blob.h;
const int group = bottom_blob.c;
const int tailstep = w - 2*outw + w;
const float* kernel = _kernel;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int g=0; g<group; g++)
{
Mat out = top_blob.channel(g);
const float bias0 = bias ? bias[g] : 0.f;
const float* kernel0 = kernel + g*9;
float* outptr = out;
const float* img0 = bottom_blob.channel(g);
const float* r0 = img0;
const float* r1 = img0 + w;
const float* r2 = img0 + w*2;
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
int i = 0;
for (; i < outh; i++)
{
int remain = outw;
for (; remain>0; remain--)
{
float sum = bias0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
*outptr = sum;
r0 += 2;
r1 += 2;
r2 += 2;
outptr++;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
}
}
|
DRB071-targetparallelfor-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
use of omp target: len is not mapped. It should be firstprivate within target.
*/
int main(int argc, char* argv[])
{
int i;
int len = 1000;
int a[len];
for (i=0; i<len; i++)
a[i]= i;
#pragma omp target map(a[0:len])
#pragma omp parallel for
for (i=0;i< len;i++)
a[i]=a[i]+1;
return 0;
}
|
DRB084-threadprivatemissing-orig-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
A file-scope variable used within a function called by a parallel region.
No threadprivate is used to avoid data races.
Data race pairs sum0@61:3 vs. sum0@61:8
sum0@61:3 vs. sum0@61:3
*/
#include "omprace.h"
#include <omp.h>
#include <stdio.h>
#include <assert.h>
int sum0=0, sum1=0;
//#pragma omp threadprivate(sum0)
void foo (int i)
{
sum0=sum0+i;
}
int main()
{
omprace_init();
int i, sum=0;
#pragma omp parallel
{
#pragma omp for
for (i=1;i<=1000;i++)
{
foo (i);
}
#pragma omp critical
{
sum= sum+sum0;
}
}
/* reference calculation */
for (i=1;i<=1000;i++)
{
sum1=sum1+i;
}
printf("sum=%d; sum1=%d\n",sum,sum1);
// assert(sum==sum1);
omprace_fini();
return 0;
}
|
GB_unaryop__lnot_int8_fp64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_int8_fp64
// op(A') function: GB_tran__lnot_int8_fp64
// C type: int8_t
// A type: double
// cast: int8_t cij ; GB_CAST_SIGNED(cij,aij,8)
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
double
#define GB_CTYPE \
int8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, x) \
int8_t z ; GB_CAST_SIGNED(z,x,8) ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_INT8 || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_int8_fp64
(
int8_t *restrict Cx,
const double *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_int8_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
cprint.c | // compile with
// /home/rlieberm/rocm/aomp_13.0-2/bin/clang -O2 -target x86_64-pc-linux-gnu -fopenmp -fopenmp-targets=amdgcn-amd-amdhsa -Xopenmp-target=amdgcn-amd-amdhsa -march=gfx906 -c cprint.c -emit-llvm -o cprint.bc -save-temps
// dump the ll
// cp cprint-openmp-amdgcn-amd-amdhsa-gfx906.tmp.ll ~/git/aomp12/aomp-extras/aomp-device-libs/aompextras/src/cprint.ll
#include <stdio.h>
#pragma omp declare target
void f90print_(char *s) {
printf("%s\n", s);
}
void f90printi_(char *s, int *i) {
printf("%s %d\n", s, *i);
}
void f90printl_(char *s, long *i) {
printf("%s %ld\n", s, *i);
}
void f90printf_(char *s, float *f) {
printf("%s %f\n", s, *f);
}
void f90printd_(char *s, double *d) {
printf("%s %g\n", s, *d);
}
#pragma omp end declare target
|
GB_unop__minv_uint32_uint32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__minv_uint32_uint32)
// op(A') function: GB (_unop_tran__minv_uint32_uint32)
// C type: uint32_t
// A type: uint32_t
// cast: uint32_t cij = aij
// unaryop: cij = GB_IMINV_UNSIGNED (aij, 32)
#define GB_ATYPE \
uint32_t
#define GB_CTYPE \
uint32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IMINV_UNSIGNED (x, 32) ;
// casting
#define GB_CAST(z, aij) \
uint32_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint32_t z = aij ; \
Cx [pC] = GB_IMINV_UNSIGNED (z, 32) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_UINT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__minv_uint32_uint32)
(
uint32_t *Cx, // Cx and Ax may be aliased
const uint32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint32_t aij = Ax [p] ;
uint32_t z = aij ;
Cx [p] = GB_IMINV_UNSIGNED (z, 32) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint32_t aij = Ax [p] ;
uint32_t z = aij ;
Cx [p] = GB_IMINV_UNSIGNED (z, 32) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__minv_uint32_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
lbfgsbsolver.h | // CppNumericalSolver
// based on:
// L-BFGS-B: A LIMITED MEMORY ALGORITHM FOR BOUND CONSTRAINED OPTIMIZATION
// Richard H. Byrd, Peihuang Lu, Jorge Nocedal and Ciyou Zhu
#include <iostream>
#include <list>
#include <Eigen/LU>
#include "isolver.h"
#include "../boundedproblem.h"
#include "../linesearch/morethuente.h"
#ifndef LBFGSBSOLVER_H
#define LBFGSBSOLVER_H
namespace cppoptlib {
template<typename TProblem>
class LbfgsbSolver : public ISolver<TProblem, 1> {
public:
using Superclass = ISolver<TProblem, 1>;
using typename Superclass::Scalar;
using typename Superclass::TVector;
using MatrixType = Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic>;
using VariableTVector = Eigen::Matrix<Scalar, Eigen::Dynamic, 1>;
protected:
// workspace matrices
MatrixType W, M;
Scalar theta;
int DIM;
int m_historySize = 5;
/**
* @brief sort pairs (k,v) according v ascending
* @details [long description]
*
* @param v [description]
* @return [description]
*/
std::vector<int> sort_indexes(const std::vector< std::pair<int, Scalar> > &v) {
std::vector<int> idx(v.size());
for (size_t i = 0; i != idx.size(); ++i)
idx[i] = v[i].first;
sort(idx.begin(), idx.end(), [&v](size_t i1, size_t i2) {
return v[i1].second < v[i2].second;
});
return idx;
}
/**
* @brief Algorithm CP: Computation of the generalized Cauchy point
* @details PAGE 8
*
* @param c [description]
*/
void getGeneralizedCauchyPoint(const TProblem &problem, const TVector &x, const TVector &g, TVector &x_cauchy, VariableTVector &c) {
const int DIM = x.rows();
// Given x,l,u,g, and B = \theta I-WMW
// {all t_i} = { (idx,value), ... }
// TODO: use "std::set" ?
std::vector<std::pair<int, Scalar> > SetOfT;
// the feasible set is implicitly given by "SetOfT - {t_i==0}"
TVector d = -g;
// n operations
for (int j = 0; j < DIM; j++) {
if (g(j) == 0) {
SetOfT.push_back(std::make_pair(j, std::numeric_limits<Scalar>::max()));
} else {
Scalar tmp = 0;
if (g(j) < 0) {
tmp = (x(j) - problem.upperBound()(j)) / g(j);
} else {
tmp = (x(j) - problem.lowerBound()(j)) / g(j);
}
SetOfT.push_back(std::make_pair(j, tmp));
if (tmp == 0) d(j) = 0;
}
}
// sortedindices [1,0,2] means the minimal element is on the 1-st entry
std::vector<int> sortedIndices = sort_indexes(SetOfT);
x_cauchy = x;
// Initialize
// p := W^Scalar*p
VariableTVector p = (W.transpose() * d); // (2mn operations)
// c := 0
c = VariableTVector::Zero(W.cols());
// f' := g^Scalar*d = -d^Td
Scalar f_prime = -d.dot(d); // (n operations)
// f'' := \theta*d^Scalar*d-d^Scalar*W*M*W^Scalar*d = -\theta*f' - p^Scalar*M*p
Scalar f_doubleprime = (Scalar)(-1.0 * theta) * f_prime - p.dot(M * p); // (O(m^2) operations)
f_doubleprime = std::max<Scalar>(std::numeric_limits<Scalar>::epsilon(), f_doubleprime);
Scalar f_dp_orig = f_doubleprime;
// \delta t_min := -f'/f''
Scalar dt_min = -f_prime / f_doubleprime;
// t_old := 0
Scalar t_old = 0;
// b := argmin {t_i , t_i >0}
int i = 0;
for (int j = 0; j < DIM; j++) {
i = j;
if (SetOfT[sortedIndices[j]].second > 0)
break;
}
int b = sortedIndices[i];
// see below
// t := min{t_i : i in F}
Scalar t = SetOfT[b].second;
// \delta Scalar := t - 0
Scalar dt = t ;
// examination of subsequent segments
while ((dt_min >= dt) && (i < DIM)) {
if (d(b) > 0)
x_cauchy(b) = problem.upperBound()(b);
else if (d(b) < 0)
x_cauchy(b) = problem.lowerBound()(b);
// z_b = x_p^{cp} - x_b
Scalar zb = x_cauchy(b) - x(b);
// c := c +\delta t*p
c += dt * p;
// cache
VariableTVector wbt = W.row(b);
f_prime += dt * f_doubleprime + (Scalar) g(b) * g(b) + (Scalar) theta * g(b) * zb - (Scalar) g(b) *
wbt.transpose() * (M * c);
f_doubleprime += (Scalar) - 1.0 * theta * g(b) * g(b)
- (Scalar) 2.0 * (g(b) * (wbt.dot(M * p)))
- (Scalar) g(b) * g(b) * wbt.transpose() * (M * wbt);
f_doubleprime = std::max<Scalar>(std::numeric_limits<Scalar>::epsilon() * f_dp_orig, f_doubleprime);
p += g(b) * wbt.transpose();
d(b) = 0;
dt_min = -f_prime / f_doubleprime;
t_old = t;
++i;
if (i < DIM) {
b = sortedIndices[i];
t = SetOfT[b].second;
dt = t - t_old;
}
}
dt_min = std::max<Scalar>(dt_min, (Scalar)0.0);
t_old += dt_min;
#pragma omp parallel for
for (int ii = i; ii < x_cauchy.rows(); ii++) {
x_cauchy(sortedIndices[ii]) = x(sortedIndices[ii]) + t_old * d(sortedIndices[ii]);
}
c += dt_min * p;
}
/**
* @brief find alpha* = max {a : a <= 1 and l_i-xc_i <= a*d_i <= u_i-xc_i}
* @details [long description]
*
* @param FreeVariables [description]
* @return [description]
*/
Scalar findAlpha(const TProblem &problem, TVector &x_cp, VariableTVector &du, std::vector<int> &FreeVariables) {
Scalar alphastar = 1;
const unsigned int n = FreeVariables.size();
assert(du.rows() == n);
for (unsigned int i = 0; i < n; i++) {
if (du(i) > 0) {
alphastar = std::min<Scalar>(alphastar, (problem.upperBound()(FreeVariables[i]) - x_cp(FreeVariables[i])) / du(i));
} else {
alphastar = std::min<Scalar>(alphastar, (problem.lowerBound()(FreeVariables[i]) - x_cp(FreeVariables[i])) / du(i));
}
}
return alphastar;
}
/**
* @brief solving unbounded probelm
* @details [long description]
*
* @param SubspaceMin [description]
*/
void SubspaceMinimization(const TProblem &problem, TVector &x_cauchy, TVector &x, VariableTVector &c, TVector &g,
TVector &SubspaceMin) {
Scalar theta_inverse = 1 / theta;
std::vector<int> FreeVariablesIndex;
for (int i = 0; i < x_cauchy.rows(); i++) {
if ((x_cauchy(i) != problem.upperBound()(i)) && (x_cauchy(i) != problem.lowerBound()(i))) {
FreeVariablesIndex.push_back(i);
}
}
const int FreeVarCount = FreeVariablesIndex.size();
MatrixType WZ = MatrixType::Zero(W.cols(), FreeVarCount);
for (int i = 0; i < FreeVarCount; i++)
WZ.col(i) = W.row(FreeVariablesIndex[i]);
TVector rr = (g + theta * (x_cauchy - x) - W * (M * c));
// r=r(FreeVariables);
MatrixType r = MatrixType::Zero(FreeVarCount, 1);
for (int i = 0; i < FreeVarCount; i++)
r.row(i) = rr.row(FreeVariablesIndex[i]);
// STEP 2: "v = w^T*Z*r" and STEP 3: "v = M*v"
VariableTVector v = M * (WZ * r);
// STEP 4: N = 1/theta*W^T*Z*(W^T*Z)^T
MatrixType N = theta_inverse * WZ * WZ.transpose();
// N = I - MN
N = MatrixType::Identity(N.rows(), N.rows()) - M * N;
// STEP: 5
// v = N^{-1}*v
if (v.size() > 0)
v = N.lu().solve(v);
// STEP: 6
// HERE IS A MISTAKE IN THE ORIGINAL PAPER!
VariableTVector du = -theta_inverse * r - theta_inverse * theta_inverse * WZ.transpose() * v;
// STEP: 7
Scalar alpha_star = findAlpha(problem, x_cauchy, du, FreeVariablesIndex);
// STEP: 8
VariableTVector dStar = alpha_star * du;
SubspaceMin = x_cauchy;
for (int i = 0; i < FreeVarCount; i++) {
SubspaceMin(FreeVariablesIndex[i]) = SubspaceMin(FreeVariablesIndex[i]) + dStar(i);
}
}
public:
void setHistorySize(const int hs) { m_historySize = hs; }
void minimize(TProblem &problem, TVector &x0) {
if(!problem.isValid(x0))
std::cerr << "start with invalid x0" << std::endl;
DIM = x0.rows();
theta = 1.0;
W = MatrixType::Zero(DIM, 0);
M = MatrixType::Zero(0, 0);
MatrixType yHistory = MatrixType::Zero(DIM, 0);
MatrixType sHistory = MatrixType::Zero(DIM, 0);
TVector x = x0, g = x0;
Scalar f = problem.value(x);
problem.gradient(x, g);
// conv. crit.
auto noConvergence =
[&](TVector &x, TVector &g)->bool {
return (((x - g).cwiseMax(problem.lowerBound()).cwiseMin(problem.upperBound()) - x).template lpNorm<Eigen::Infinity>() >= 1e-4);
};
this->m_current.reset();
this->m_status = Status::Continue;
while (problem.callback(this->m_current, x) && noConvergence(x, g) && (this->m_status == Status::Continue)) {
Scalar f_old = f;
TVector x_old = x;
TVector g_old = g;
// STEP 2: compute the cauchy point
TVector CauchyPoint = TVector::Zero(DIM);
VariableTVector c = VariableTVector::Zero(W.cols());
getGeneralizedCauchyPoint(problem, x, g, CauchyPoint, c);
// STEP 3: compute a search direction d_k by the primal method for the sub-problem
TVector SubspaceMin;
SubspaceMinimization(problem, CauchyPoint, x, c, g, SubspaceMin);
// STEP 4: perform linesearch and STEP 5: compute gradient
Scalar alpha_init = 1.0;
// ORIGINAL
//const Scalar rate = MoreThuente<TProblem, 1>::linesearch(x, SubspaceMin-x , problem, alpha_init);
// MODIFIED I
int info = 0;
const Scalar rate = MoreThuente<TProblem, 1>::linesearch(x, SubspaceMin-x , problem, alpha_init, f, g, info);
//std::cout << "[ INFO = " << info << " \t x = " << x.transpose() << " ]\n";
// MODIFIED II
//TVector s = (SubspaceMin-x).eval();
//MoreThuente<TProblem, 1>::cvsrch(problem, x, f, g, alpha_init, s);
//const Scalar rate = alpha_init;
//MoreThuente<TProblem, 1>::cvsrch(problem, x, f, g, alpha_init, s, f_old);
//MoreThuente<TProblem, 1>::cvsrch(problem, x, f, g, alpha_init, s, this->m_stop, this->m_current, this->m_status);
//std::cout << "L-BFGS x = " << (x.array().exp()).matrix().transpose() << std::endl;
//if ( !(this->m_status == Status::Continue) )
// break;
x = x - rate*(x-SubspaceMin);
if ( info != 1 )
{
std::cout << "\n[*] WARNING: Unexpected line-search exit status; re-evaluating...\n";
f = problem.value(x);
problem.gradient(x, g);
}
///
/// UPDATE RELATIVE ERROR AND ASSIGN TO FDELTA FOR CONVERGENCE CRIETERIA TESTS
/// ( see https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.fmin_l_bfgs_b.html )
// update the fDelta convergence status
this->m_current.fDelta = std::abs(f-f_old)/(std::max(std::max(std::abs(f),std::abs(f_old)), 1.0));
// prepare for next iteration
TVector newY = g - g_old;
TVector newS = x - x_old;
// STEP 6:
Scalar test = newS.dot(newY);
test = (test < 0) ? -1.0 * test : test;
if (test > 1e-7 * newY.squaredNorm()) {
if (yHistory.cols() < m_historySize) {
yHistory.conservativeResize(DIM, yHistory.cols() + 1);
sHistory.conservativeResize(DIM, sHistory.cols() + 1);
} else {
yHistory.leftCols(m_historySize - 1) = yHistory.rightCols(m_historySize - 1).eval();
sHistory.leftCols(m_historySize - 1) = sHistory.rightCols(m_historySize - 1).eval();
}
yHistory.rightCols(1) = newY;
sHistory.rightCols(1) = newS;
// STEP 7:
theta = (Scalar)(newY.transpose() * newY) / (newY.transpose() * newS);
W = MatrixType::Zero(yHistory.rows(), yHistory.cols() + sHistory.cols());
W << yHistory, (theta * sHistory);
MatrixType A = sHistory.transpose() * yHistory;
MatrixType L = A.template triangularView<Eigen::StrictlyLower>();
MatrixType MM(A.rows() + L.rows(), A.rows() + L.cols());
MatrixType D = -1 * A.diagonal().asDiagonal();
MM << D, L.transpose(), L, ((sHistory.transpose() * sHistory) * theta);
M = MM.inverse();
}
if (fabs(f_old - f) < 1e-8) {
// successive function values too similar
break;
}
++this->m_current.iterations;
this->m_current.gradNorm = g.norm();
this->m_status = checkConvergence(this->m_stop, this->m_current);
}
x0 = x;
if (this->m_debug > DebugLevel::None) {
std::cout << "Stop status was: " << this->m_status << std::endl;
std::cout << "Stop criteria were: " << std::endl << this->m_stop << std::endl;
std::cout << "Current values are: " << std::endl << this->m_current << std::endl;
}
}
};
}
/* namespace cppoptlib */
#endif /* LBFGSBSOLVER_H_ */
|
GB_unaryop__identity_bool_uint16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_bool_uint16
// op(A') function: GB_tran__identity_bool_uint16
// C type: bool
// A type: uint16_t
// cast: bool cij = (bool) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint16_t
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, aij) \
bool z = (bool) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_BOOL || GxB_NO_UINT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_bool_uint16
(
bool *Cx, // Cx and Ax may be aliased
uint16_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_bool_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
dgz2png.c | /*
Copyright (C) 2015 Lauri Kasanen
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, version 3 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <lzo/lzo1x.h>
#include <png.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
static size_t sread(void *ptr, size_t size, size_t nmemb, FILE *stream) {
if (fread(ptr, size, nmemb, stream) != nmemb) {
fprintf(stderr, "Read failure\n");
abort();
}
}
static void writepng(FILE * const f, const uint8_t *data,
const uint32_t w, const uint32_t h) {
png_structp png_ptr = png_create_write_struct(PNG_LIBPNG_VER_STRING, NULL, NULL, NULL);
if (!png_ptr) abort();
png_infop info = png_create_info_struct(png_ptr);
if (!info) abort();
if (setjmp(png_jmpbuf(png_ptr))) abort();
png_init_io(png_ptr, f);
png_set_IHDR(png_ptr, info, w, h, 8, PNG_COLOR_TYPE_RGB, PNG_INTERLACE_NONE,
PNG_COMPRESSION_TYPE_BASE, PNG_FILTER_TYPE_BASE);
png_write_info(png_ptr, info);
png_set_filler(png_ptr, 0, PNG_FILLER_AFTER);
const uint32_t rowlen = w * 4;
uint32_t i;
for (i = 0; i < h; i++)
png_write_row(png_ptr, (uint8_t *) data + i * rowlen);
png_write_end(png_ptr, NULL);
png_destroy_info_struct(png_ptr, &info);
png_destroy_write_struct(&png_ptr, NULL);
}
static void convert(const char file[]) {
char outname[PATH_MAX];
strncpy(outname, file, PATH_MAX);
outname[PATH_MAX - 1] = '\0';
uint32_t len = strlen(outname);
if (len > 4) {
outname[len - 3] = 'p';
outname[len - 2] = 'n';
outname[len - 1] = 'g';
} else {
strcat(outname, ".png");
}
FILE *f = fopen(file, "r");
if (!f) {
fprintf(stderr, "Failed to open %s\n", file);
return;
}
FILE *out = fopen(outname, "w");
if (!out) {
fprintf(stderr, "Failed to open %s\n", outname);
return;
}
uint8_t is32;
uint32_t w, h;
sread(&is32, 1, 1, f);
sread(&w, 4, 1, f);
sread(&h, 4, 1, f);
if (is32 > 2) {
fprintf(stderr, "This is not a dgz file\n");
return;
}
len = w * h * (is32 ? 4 : 2);
uint8_t *uncompressed, *compressed;
compressed = calloc(len, 1);
uncompressed = calloc(len, 1);
const uint32_t complen = fread(compressed, 1, len, f);
fclose(f);
lzo_uint destlen = len;
int ret = lzo1x_decompress(compressed, complen, uncompressed, &destlen, NULL);
if (ret != LZO_E_OK) {
fprintf(stderr, "LZO error %d\n", ret);
fclose(out);
return;
}
free(compressed);
if (!is32) {
// Convert to 32-bit
uint8_t *newdata = calloc(w * h * 4, 1);
uint32_t x, y;
for (y = 0; y < h; y++) {
for (x = 0; x < w; x++) {
const uint16_t * const ptr = (uint16_t *) (uncompressed +
y * w * 2 + x * 2);
uint16_t pixel;
memcpy(&pixel, ptr, 2);
uint32_t outpixel = 0;
// Convert 5-6-5
outpixel |= (pixel & 0x1f) << 19;
outpixel |= ((pixel >> 5) & 0x3f) << 10;
outpixel |= ((pixel >> 11) & 0x1f) << 3;
uint32_t * const newptr = (uint32_t *) (newdata +
y * w * 4 + x * 4);
*newptr = outpixel; // aligned, so no need for memcpy
}
}
free(uncompressed);
uncompressed = newdata;
} else if (is32 == 2) { // BGR
uint32_t x, y;
for (y = 0; y < h; y++) {
for (x = 0; x < w; x++) {
uint32_t pixel;
uint32_t * const ptr = (uint32_t *) (uncompressed +
y * w * 4 + x * 4);
memcpy(&pixel, ptr, 4);
const uint8_t red = pixel & 0xff;
const uint8_t blue = (pixel >> 16) & 0xff;
pixel &= 0xff00;
pixel |= blue;
pixel |= red << 16;
memcpy(ptr, &pixel, 4);
}
}
}
writepng(out, uncompressed, w, h);
free(uncompressed);
fclose(out);
}
int main(int argc, char **argv) {
int i;
if (lzo_init() != LZO_E_OK) {
fprintf(stderr, "LZO init failed\n");
return 1;
}
if (argc < 2 || argv[1][0] == '-') {
printf("Usage: %s file1.dgz file2.dgz...\n", argv[0]);
return 0;
}
#pragma omp parallel for
for (i = 1; i < argc; i++) {
convert(argv[i]);
}
return 0;
}
|
disposable.c | #include "disposable.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <limits.h>
#include <math.h>
#include <omp.h>
void compute_commit_dsv(Box* box_arr) {
omp_set_dynamic(0); // Explicitly disable dynamic teams
omp_set_num_threads(NUM_THREADS);
int tid;
int nthreads;
#pragma omp parallel shared(box_arr) private(tid)
{
int nthreads = omp_get_num_threads();
tid = omp_get_thread_num();
#pragma omp for
for (int i = 0; i < NUM_BOXES; i++) {
box_arr[i].waat = 0;
// Get weighted average of top neighbours
if (box_arr[i].num_top != 0) {
int j;
for (j = 0; j < box_arr[i].num_top; j++) {
int cur_topid = box_arr[i].top_ids[j];
int overlap = box_arr[i].top_ov[j];
box_arr[i].waat = box_arr[i].waat + box_arr[cur_topid].dsv * overlap;
}
}
else {
box_arr[i].waat = box_arr[i].waat + box_arr[i].width * box_arr[i].dsv;
}
// Get weighted average of bottom neighbours
if (box_arr[i].num_bottom != 0) {
int j;
for (j = 0; j < box_arr[i].num_bottom; j++) {
int cur_bottomid = box_arr[i].bottom_ids[j];
int overlap = box_arr[i].bottom_ov[j];
box_arr[i].waat = box_arr[i].waat + box_arr[cur_bottomid].dsv * overlap;
}
}
else {
box_arr[i].waat = box_arr[i].waat + box_arr[i].width * box_arr[i].dsv;
}
// Get weighted average of left neighbours
if (box_arr[i].num_left != 0) {
int j;
for (j = 0; j < box_arr[i].num_left; j++) {
int cur_leftid = box_arr[i].left_ids[j];
int overlap = box_arr[i].left_ov[j];
box_arr[i].waat = box_arr[i].waat + box_arr[cur_leftid].dsv * overlap;
}
}
else {
box_arr[i].waat = box_arr[i].waat + box_arr[i].height * box_arr[i].dsv;
}
// Get weighted average of right neighbours
if (box_arr[i].num_right != 0) {
int j;
for (j = 0; j < box_arr[i].num_right; j++) {
int cur_rightid = box_arr[i].right_ids[j];
int overlap = box_arr[i].right_ov[j];
box_arr[i].waat = box_arr[i].waat + box_arr[cur_rightid].dsv * overlap;
}
}
else {
box_arr[i].waat = box_arr[i].waat + box_arr[i].height * box_arr[i].dsv;
}
// Find the weighted average by dividing with the perimeter
box_arr[i].waat = box_arr[i].waat / box_arr[i].perimeter;
}
#pragma omp barrier
double LOCAL_MIN_DSV = INT_MAX;
double LOCAL_MAX_DSV = INT_MIN;
#pragma omp for
for (int i = 0; i < NUM_BOXES; i++) {
if (box_arr[i].waat > box_arr[i].dsv) {
box_arr[i].dsv = box_arr[i].dsv + AFFECT_RATE * (box_arr[i].waat - box_arr[i].dsv);
}
else {
box_arr[i].dsv = box_arr[i].dsv - AFFECT_RATE * (box_arr[i].dsv - box_arr[i].waat);
}
if (box_arr[i].dsv < LOCAL_MIN_DSV) LOCAL_MIN_DSV = box_arr[i].dsv;
if (box_arr[i].dsv > LOCAL_MAX_DSV) LOCAL_MAX_DSV = box_arr[i].dsv;
}
#pragma omp critical
{
if (LOCAL_MIN_DSV < MIN_DSV)
MIN_DSV = LOCAL_MIN_DSV;
}
#pragma omp critical
{
if (LOCAL_MAX_DSV > MAX_DSV)
MAX_DSV = LOCAL_MAX_DSV;
}
}
}
void readgridparam() {
// Assuming each line in the datafile is Max 500 characters
char line[MAXLEN] = "";
fflush(stdin);
if (fgets(line, sizeof(line), stdin)) {
// If the first line of the file contains -1, exit
if (line[0] == '-') {
fprintf(stderr, "First line of the file contains -1. Exiting....");
exit(EXIT_FAILURE);
}
else {
// We only expect 3 numbers in the first line
// <number of grid boxes> <num_grid_rows> <num_grid_cols>
int arr[3];
parseline(arr, line, 0);
NUM_BOXES = arr[0];
NUM_ROWS = arr[1];
NUM_COLS = arr[2];
}
}
else {
fprintf(stderr, "File may not exist or is empty. Exiting....");
exit(EXIT_FAILURE);
}
}
void populate(Box* box_arr) {
char line1[MAXLEN] = "";
int box_count = 0;
// Read rest of file and populate the data structure
fflush(stdin);
while (fgets(line1, sizeof(line1), stdin)) {
if (line1[0] == '-') {
break;
}
else if (!strcmp(line1, "")) continue;
else if (!(line1[0] >= '0' && line1[0] <= '9')) continue;
else {
// Create new Box element
// Get Box id;
int id[1];
parseline(id, line1, 0);
box_arr[box_count].id = id[0];
// Get location, height and width
fflush(stdin);
fgets(line1, sizeof(line1), stdin);
int box_loc[4];
parseline(box_loc, line1, 0);
box_arr[box_count].up_left_y = box_loc[0];
box_arr[box_count].up_left_x = box_loc[1];
box_arr[box_count].height = box_loc[2];
box_arr[box_count].width = box_loc[3];
box_arr[box_count].perimeter = 2 * (box_arr[box_count].height + box_arr[box_count].width);
// Get top neighbours
fflush(stdin);
fgets(line1, sizeof(line1), stdin);
int top_num;
top_num = parsefirst(line1);
box_arr[box_count].num_top = top_num;
int* toparr = (int*)malloc(top_num * sizeof(int));
int* toparrov = (int*)malloc(top_num * sizeof(int));
parseline(toparr, line1, 1);
box_arr[box_count].top_ids = toparr;
box_arr[box_count].top_ov = toparrov;
if (top_num == 0) {
box_arr[box_count].top_ids = NULL;
}
// Get bottom neighbours
fflush(stdin);
fgets(line1, sizeof(line1), stdin);
int bottom_num;
bottom_num = parsefirst(line1);
box_arr[box_count].num_bottom = bottom_num;
int* bottomarr = (int*)malloc(bottom_num * sizeof(int));
int* bottomarrov = (int*)malloc(bottom_num * sizeof(int));
parseline(bottomarr, line1, 1);
box_arr[box_count].bottom_ids = bottomarr;
box_arr[box_count].bottom_ov = bottomarrov;
if (bottom_num == 0) {
box_arr[box_count].bottom_ids = NULL;
}
// Get left neighbours
fflush(stdin);
fgets(line1, sizeof(line1), stdin);
int left_num;
left_num = parsefirst(line1);
box_arr[box_count].num_left = left_num;
int* leftarr = (int*)malloc(left_num * sizeof(int));
int* leftarrov = (int*)malloc(left_num * sizeof(int));
parseline(leftarr, line1, 1);
box_arr[box_count].left_ids = leftarr;
box_arr[box_count].left_ov = leftarrov;
if (left_num == 0) {
box_arr[box_count].left_ids = NULL;
}
// Get right neighbours
fflush(stdin);
fgets(line1, sizeof(line1), stdin);
int right_num;
right_num = parsefirst(line1);
box_arr[box_count].num_right = right_num;
int* rightarr = (int*)malloc(right_num * sizeof(int));
int* rightarrov = (int*)malloc(right_num * sizeof(int));
parseline(rightarr, line1, 1);
box_arr[box_count].right_ids = rightarr;
box_arr[box_count].right_ov = rightarrov;
if (right_num == 0) {
box_arr[box_count].right_ids = NULL;
}
// Get dsv value
fflush(stdin);
fgets(line1, sizeof(line1), stdin);
double dsv_val;
dsv_val = parsedsv(line1);
box_arr[box_count].dsv = dsv_val;
// Move to next box
box_count++;
fflush(stdin);
}
}
}
void parseline(int* num, char* path, int func) {
char c;
int i = 0, digit, number = 0;
int num_count = 0;
if (func == 0) i = 0;
if (func == 1) {
while (i < strlen(path) && path[i] >= '0' && path[i] <= '9') {
i++;
}
}
for (; i < strlen(path); i++)
{
if (path[i] >= '0' && path[i] <= '9') //to confirm it's a digit
{
number = 0;
do {
digit = path[i] - '0';
number = number * 10 + digit;
i++;
} while (i < strlen(path) && path[i] >= '0' && path[i] <= '9');
num[num_count] = number;
num_count++;
}
}
}
int parsefirst(char* path) {
int i = 0, digit, number = 0;
do {
digit = path[i] - '0';
number = number * 10 + digit;
i++;
} while (i < strlen(path) && path[i] >= '0' && path[i] <= '9');
return number;
}
double parsedsv(char* path) {
double number = 0;
number = strtod(path, NULL);
return number;
}
void calcoverlap(struct Box* box_arr) {
int i;
for (i = 0; i < NUM_BOXES; i++) {
// Calculate TOP overlap for each node.
// If 0, skip.
if (box_arr[i].num_top != 0) {
int j;
for (j = 0; j < box_arr[i].num_top; j++) {
// find right most of x_left and xtop_left
int cur_topid = box_arr[i].top_ids[j];
int len2, len1;
if (box_arr[i].up_left_x >= box_arr[cur_topid].up_left_x) len1 = box_arr[i].up_left_x;
else len1 = box_arr[cur_topid].up_left_x;
if ((box_arr[i].up_left_x + box_arr[i].width) <= (box_arr[cur_topid].up_left_x + box_arr[cur_topid].width)) len2 = (box_arr[i].up_left_x + box_arr[i].width);
else len2 = (box_arr[cur_topid].up_left_x + box_arr[cur_topid].width);
box_arr[i].top_ov[j] = abs(len2 - len1);
}
}
// Calculate BOTTOM overlap for each node.
// If 0, skip.
if (box_arr[i].num_bottom != 0) {
int j;
for (j = 0; j < box_arr[i].num_bottom; j++) {
// find right most of x_left and xbottom_left
int cur_bottomid = box_arr[i].bottom_ids[j];
int len2, len1;
if (box_arr[i].up_left_x >= box_arr[cur_bottomid].up_left_x) len1 = box_arr[i].up_left_x;
else len1 = box_arr[cur_bottomid].up_left_x;
// find left most of x_left + width and xbottom_left + its width
if ((box_arr[i].up_left_x + box_arr[i].width) <= (box_arr[cur_bottomid].up_left_x + box_arr[cur_bottomid].width)) len2 = (box_arr[i].up_left_x + box_arr[i].width);
else len2 = (box_arr[cur_bottomid].up_left_x + box_arr[cur_bottomid].width);
box_arr[i].bottom_ov[j] = abs(len2 - len1);
}
}
// Calculate left overlap for each node.
// If 0, skip.
if (box_arr[i].num_left != 0) {
int j;
for (j = 0; j < box_arr[i].num_left; j++) {
// find bottom most of y_left and yleft_left
int cur_leftid = box_arr[i].left_ids[j];
int len2, len1;
if (box_arr[i].up_left_y >= box_arr[cur_leftid].up_left_y) len1 = box_arr[i].up_left_y;
else len1 = box_arr[cur_leftid].up_left_y;
// find top most of y_left + height and yleft_left + its height
if ((box_arr[i].up_left_y + box_arr[i].height) <= (box_arr[cur_leftid].up_left_y + box_arr[cur_leftid].height)) len2 = (box_arr[i].up_left_y + box_arr[i].height);
else len2 = (box_arr[cur_leftid].up_left_y + box_arr[cur_leftid].height);
box_arr[i].left_ov[j] = abs(len2 - len1);
}
}
// Calculate right overlap for each node.
// If 0, skip.
if (box_arr[i].num_right != 0) {
int j;
for (j = 0; j < box_arr[i].num_right; j++) {
// find bottom most of y_left and yright_left
int cur_rightid = box_arr[i].right_ids[j];
int len2, len1;
if (box_arr[i].up_left_y >= box_arr[cur_rightid].up_left_y) len1 = box_arr[i].up_left_y;
else len1 = box_arr[cur_rightid].up_left_y;
// find top most of y_left + height and yright_left + its height
if ((box_arr[i].up_left_y + box_arr[i].height) <= (box_arr[cur_rightid].up_left_y + box_arr[cur_rightid].height)) len2 = (box_arr[i].up_left_y + box_arr[i].height);
else len2 = (box_arr[cur_rightid].up_left_y + box_arr[cur_rightid].height);
box_arr[i].right_ov[j] = abs(len2 - len1);
}
}
}
}
void printboxes(struct Box* box_arr) {
int i;
for (i = 0; i < NUM_BOXES; i++) {
printf("================================");
printf("\n\nBox id: %d\n", box_arr[i].id);
printf("Box left_X, left_y, height, width, perimiter: %d, %d, %d, %d, %d\n", box_arr[i].up_left_x, box_arr[i].up_left_y, box_arr[i].height, box_arr[i].width, box_arr[i].perimeter);
printf("Box top neighbours and overlap: ");
int j;
for (j = 0; j < box_arr[i].num_top; j++) {
printf("%d:%d, ", box_arr[i].top_ids[j], box_arr[i].top_ov[j]);
}
printf("\n");
printf("Box bottom neighbours and overlap: ");
for (j = 0; j < box_arr[i].num_bottom; j++) {
printf("%d:%d, ", box_arr[i].bottom_ids[j], box_arr[i].bottom_ov[j]);
}
printf("\n");
printf("Box left neighbours: ");
for (j = 0; j < box_arr[i].num_left; j++) {
printf("%d:%d, ", box_arr[i].left_ids[j], box_arr[i].left_ov[j]);
}
printf("\n");
printf("Box right neighbours: ");
for (j = 0; j < box_arr[i].num_right; j++) {
printf("%d:%d, ", box_arr[i].right_ids[j], box_arr[i].right_ov[j]);
}
printf("\n");
printf("Box dsv value: %lf", box_arr[i].dsv);
printf("\n");
}
}
|
convolutiondepthwise_3x3_int8.h | // BUG1989 is pleased to support the open source community by supporting ncnn available.
//
// Copyright (C) 2019 BUG1989. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static inline signed char float2int8(float v)
{
int int32 = round(v);
if (int32 > 127) return 127;
if (int32 < -127) return -127;
return (signed char)int32;
}
static void convdw3x3s1_int8_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Option& opt)
{
int w = bottom_blob.w;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out = top_blob.channel(p);
const signed char* kernel = (const signed char*)_kernel + p * 9;
int* outptr0 = out;
int* outptr0n = outptr0 + outw;
const signed char* img0 = bottom_blob.channel(p);
const signed char* r0 = img0;
const signed char* r1 = img0 + w;
const signed char* r2 = img0 + w * 2;
const signed char* r3 = img0 + w * 3;
int i = 0;
#if __ARM_NEON
int8x16_t _k0123456789x = vld1q_s8(kernel);
int16x8_t _k_s16 = vmovl_s8(vget_low_s8(_k0123456789x));
int16x8_t _kn_s16 = vmovl_s8(vget_high_s8(_k0123456789x));
int16x4_t _k0123 = vget_low_s16(_k_s16);
int16x4_t _k4567 = vget_high_s16(_k_s16);
int16x4_t _k8xxx = vget_low_s16(_kn_s16);
#endif // __ARM_NEON
for (; i + 1 < outh; i += 2)
{
#if __ARM_NEON
int nn = outw >> 3;
int remain = outw & 7;
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
if (nn > 0)
{
asm volatile(
"0: \n"
"ld1 {v4.8b, v5.8b}, [%3] \n"
"ld1 {v6.8b, v7.8b}, [%4] \n"
"ld1 {v8.8b, v9.8b}, [%5] \n"
"ld1 {v10.8b, v11.8b}, [%6] \n"
"add %3, %3, #8 \n"
"add %4, %4, #8 \n"
"add %5, %5, #8 \n"
"add %6, %6, #8 \n"
"ext v12.8b, v4.8b, v5.8b, #1 \n"
"ext v13.8b, v4.8b, v5.8b, #2 \n"
"ext v14.8b, v6.8b, v7.8b, #1 \n"
"ext v15.8b, v6.8b, v7.8b, #2 \n"
"ext v16.8b, v8.8b, v9.8b, #1 \n"
"ext v17.8b, v8.8b, v9.8b, #2 \n"
"ext v18.8b, v10.8b, v11.8b, #1 \n"
"ext v19.8b, v10.8b, v11.8b, #2 \n"
"sshll v4.8h, v4.8b, #0 \n" // r00
"sshll v12.8h, v12.8b, #0 \n" // r01
"sshll v13.8h, v13.8b, #0 \n" // r02
"sshll v6.8h, v6.8b, #0 \n" // r10
"sshll v14.8h, v14.8b, #0 \n" // r11
"sshll v15.8h, v15.8b, #0 \n" // r12
"sshll v8.8h, v8.8b, #0 \n" // r20
"sshll v16.8h, v16.8b, #0 \n" // r21
"sshll v17.8h, v17.8b, #0 \n" // r22
"sshll v10.8h, v10.8b, #0 \n" // r30
"sshll v18.8h, v18.8b, #0 \n" // r31
"sshll v19.8h, v19.8b, #0 \n" // r32
// r0
"smull v20.4s, v4.4h, %14.h[0] \n" // (r00 - r07) * k00
"smull2 v21.4s, v4.8h, %14.h[0] \n"
"smull v22.4s, v12.4h, %14.h[1] \n" // (r01 - r08) * k01
"smull2 v23.4s, v12.8h, %14.h[1] \n"
"smull v24.4s, v13.4h, %14.h[2] \n" // (r02 - r09) * k02
"smull2 v25.4s, v13.8h, %14.h[2] \n"
// r1
"smull v26.4s, v6.4h, %14.h[0] \n" // (r10 - r17) * k00
"smull2 v27.4s, v6.8h, %14.h[0] \n"
"smull v28.4s, v14.4h, %14.h[1] \n" // (r11 - r18) * k01
"smull2 v29.4s, v14.8h, %14.h[1] \n"
"smull v30.4s, v15.4h, %14.h[2] \n" // (r12 - r19) * k02
"smull2 v31.4s, v15.8h, %14.h[2] \n"
"smlal v20.4s, v6.4h, %14.h[3] \n" // (r10 - r17) * k03
"smlal2 v21.4s, v6.8h, %14.h[3] \n"
"smlal v22.4s, v14.4h, %15.h[0] \n" // (r11 - r18) * k04
"smlal2 v23.4s, v14.8h, %15.h[0] \n"
"smlal v24.4s, v15.4h, %15.h[1] \n" // (r12 - r19) * k05
"smlal2 v25.4s, v15.8h, %15.h[1] \n"
// r2
"smlal v26.4s, v8.4h, %14.h[3] \n" // (r20 - r27) * k03
"smlal2 v27.4s, v8.8h, %14.h[3] \n"
"smlal v28.4s, v16.4h, %15.h[0] \n" // (r21 - r28) * k04
"smlal2 v29.4s, v16.8h, %15.h[0] \n"
"smlal v30.4s, v17.4h, %15.h[1] \n" // (r22 - r29) * k05
"smlal2 v31.4s, v17.8h, %15.h[1] \n"
"smlal v20.4s, v8.4h, %15.h[2] \n" // (r20 - r27) * k06
"smlal2 v21.4s, v8.8h, %15.h[2] \n"
"smlal v22.4s, v16.4h, %15.h[3] \n" // (r21 - r28) * k07
"smlal2 v23.4s, v16.8h, %15.h[3] \n"
"smlal v24.4s, v17.4h, %16.h[0] \n" // (r22 - r29) * k08
"smlal2 v25.4s, v17.8h, %16.h[0] \n"
// r3
"smlal v26.4s, v10.4h, %15.h[2] \n" // (r30 - r37) * k06
"smlal2 v27.4s, v10.8h, %15.h[2] \n"
"smlal v28.4s, v18.4h, %15.h[3] \n" // (r31 - r38) * k07
"smlal2 v29.4s, v18.8h, %15.h[3] \n"
"smlal v30.4s, v19.4h, %16.h[0] \n" // (r32 - r39) * k08
"smlal2 v31.4s, v19.8h, %16.h[0] \n"
// add and save
"add v20.4s, v20.4s, v22.4s \n"
"add v21.4s, v21.4s, v23.4s \n"
"add v26.4s, v26.4s, v28.4s \n"
"add v27.4s, v27.4s, v29.4s \n"
"add v20.4s, v20.4s, v24.4s \n"
"add v21.4s, v21.4s, v25.4s \n"
"add v26.4s, v26.4s, v30.4s \n"
"add v27.4s, v27.4s, v31.4s \n"
"st1 {v20.4s, v21.4s}, [%1], #32 \n"
"st1 {v26.4s, v27.4s}, [%2], #32 \n"
"subs %w0, %w0, #1 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr0n), // %2
"=r"(r0), // %3
"=r"(r1), // %4
"=r"(r2), // %5
"=r"(r3) // %6
: "0"(nn),
"1"(outptr0),
"2"(outptr0n),
"3"(r0),
"4"(r1),
"5"(r2),
"6"(r3),
"w"(_k0123), // %14
"w"(_k4567), // %15
"w"(_k8xxx) // %16
: "cc", "memory", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
#else
if (nn > 0)
{
asm volatile(
"0: \n"
// r0
"vld1.s8 {d30-d31}, [%3] \n" // r0
"add %3, %3, #8 \n"
"vext.s8 d10, d30, d31, #1 \n"
"vext.s8 d12, d30, d31, #2 \n"
"vmovl.s8 q15, d30 \n" // r00
"vmovl.s8 q5, d10 \n" // r01
"vmovl.s8 q6, d12 \n" // r02
// sum0
"vmull.s16 q7, d30, %P14[0] \n" // (r00 - r07) * k00
"vmull.s16 q8, d31, %P14[0] \n"
"vmull.s16 q9, d10, %P14[1] \n" // (r01 - r08) * k01
"vmull.s16 q10, d11, %P14[1] \n"
"vmlal.s16 q7, d12, %P14[2] \n" // (r02 - r09) * k02
"vmlal.s16 q8, d13, %P14[2] \n"
// r1
"vld1.s8 {d30-d31}, [%4] \n" // r1
"add %4, %4, #8 \n"
"vext.s8 d10, d30, d31, #1 \n"
"vext.s8 d12, d30, d31, #2 \n"
"vmovl.s8 q15, d30 \n" // r10
"vmovl.s8 q5, d10 \n" // r11
"vmovl.s8 q6, d12 \n" // r12
// sum0
"vmlal.s16 q7, d30, %P14[3] \n" // (r10 - r17) * k03
"vmlal.s16 q8, d31, %P14[3] \n"
"vmlal.s16 q9, d10, %P15[0] \n" // (r11 - r18) * k04
"vmlal.s16 q10, d11, %P15[0] \n"
"vmlal.s16 q7, d12, %P15[1] \n" // (r12 - r19) * k05
"vmlal.s16 q8, d13, %P15[1] \n"
// sum1
"vmull.s16 q11, d30, %P14[0] \n" // (r10 - r17) * k00
"vmull.s16 q12, d31, %P14[0] \n"
"vmull.s16 q13, d10, %P14[1] \n" // (r11 - r18) * k01
"vmull.s16 q14, d11, %P14[1] \n"
"vmlal.s16 q11, d12, %P14[2] \n" // (r12 - r19) * k02
"vmlal.s16 q12, d13, %P14[2] \n"
// r2
"vld1.s8 {d30-d31}, [%5] \n" // r2
"add %5, %5, #8 \n"
"vext.s8 d10, d30, d31, #1 \n"
"vext.s8 d12, d30, d31, #2 \n"
"vmovl.s8 q15, d30 \n" // r20
"vmovl.s8 q5, d10 \n" // r21
"vmovl.s8 q6, d12 \n" // r22
// sum0
"vmlal.s16 q7, d30, %P15[2] \n" // (r20 - r27) * k06
"vmlal.s16 q8, d31, %P15[2] \n"
"vmlal.s16 q9, d10, %P15[3] \n" // (r21 - r28) * k07
"vmlal.s16 q10, d11, %P15[3] \n"
"vmlal.s16 q7, d12, %P16[0] \n" // (r22 - r29) * k08
"vmlal.s16 q8, d13, %P16[0] \n"
// sum1
"vmlal.s16 q11, d30, %P14[3] \n" // (r20 - r27) * k03
"vmlal.s16 q12, d31, %P14[3] \n"
"vmlal.s16 q13, d10, %P15[0] \n" // (r21 - r28) * k04
"vmlal.s16 q14, d11, %P15[0] \n"
"vmlal.s16 q11, d12, %P15[1] \n" // (r22 - r29) * k05
"vmlal.s16 q12, d13, %P15[1] \n"
// r3
"vld1.s8 {d30-d31}, [%6] \n" // r3
"add %6, %6, #8 \n"
"vext.s8 d10, d30, d31, #1 \n"
"vext.s8 d12, d30, d31, #2 \n"
"vmovl.s8 q15, d30 \n" // r30
"vmovl.s8 q5, d10 \n" // r31
"vmovl.s8 q6, d12 \n" // r32
// sum1
"vmlal.s16 q11, d30, %P15[2] \n" // (r30 - r37) * k06
"vmlal.s16 q12, d31, %P15[2] \n"
"vmlal.s16 q13, d10, %P15[3] \n" // (r31 - r38) * k07
"vmlal.s16 q14, d11, %P15[3] \n"
"vmlal.s16 q11, d12, %P16[0] \n" // (r32 - r39) * k08
"vmlal.s16 q12, d13, %P16[0] \n"
"subs %0, %0, #1 \n"
// add and save
"vadd.s32 q7, q7, q9 \n"
"vadd.s32 q8, q8, q10 \n"
"vadd.s32 q11, q11, q13 \n"
"vadd.s32 q12, q12, q14 \n"
"vst1.s32 {d14-d17}, [%1]! \n"
"vst1.s32 {d22-d25}, [%2]! \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr0n), // %2
"=r"(r0), // %3
"=r"(r1), // %4
"=r"(r2), // %5
"=r"(r3) // %6
: "0"(nn),
"1"(outptr0),
"2"(outptr0n),
"3"(r0),
"4"(r1),
"5"(r2),
"6"(r3),
"w"(_k0123), // %14
"w"(_k4567), // %15
"w"(_k8xxx) // %16
: "cc", "memory", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain > 0; remain--)
{
// TODO NEON
int sum0 = 0;
int sum0n = 0;
sum0 += (int)r0[0] * kernel[0];
sum0 += (int)r0[1] * kernel[1];
sum0 += (int)r0[2] * kernel[2];
sum0 += (int)r1[0] * kernel[3];
sum0 += (int)r1[1] * kernel[4];
sum0 += (int)r1[2] * kernel[5];
sum0 += (int)r2[0] * kernel[6];
sum0 += (int)r2[1] * kernel[7];
sum0 += (int)r2[2] * kernel[8];
sum0n += (int)r1[0] * kernel[0];
sum0n += (int)r1[1] * kernel[1];
sum0n += (int)r1[2] * kernel[2];
sum0n += (int)r2[0] * kernel[3];
sum0n += (int)r2[1] * kernel[4];
sum0n += (int)r2[2] * kernel[5];
sum0n += (int)r3[0] * kernel[6];
sum0n += (int)r3[1] * kernel[7];
sum0n += (int)r3[2] * kernel[8];
*outptr0 = sum0;
*outptr0n = sum0n;
r0++;
r1++;
r2++;
r3++;
outptr0++;
outptr0n++;
}
r0 += 2 + w;
r1 += 2 + w;
r2 += 2 + w;
r3 += 2 + w;
outptr0 += outw;
outptr0n += outw;
}
for (; i < outh; i++)
{
#if __ARM_NEON
int nn = outw >> 3;
int remain = outw & 7;
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
if (nn > 0)
{
asm volatile(
"0: \n"
"ld1 {v4.8b, v5.8b}, [%2] \n"
"ld1 {v6.8b, v7.8b}, [%3] \n"
"ld1 {v8.8b, v9.8b}, [%4] \n"
"add %2, %2, #8 \n"
"add %3, %3, #8 \n"
"add %4, %4, #8 \n"
"ext v12.8b, v4.8b, v5.8b, #1 \n"
"ext v13.8b, v4.8b, v5.8b, #2 \n"
"ext v14.8b, v6.8b, v7.8b, #1 \n"
"ext v15.8b, v6.8b, v7.8b, #2 \n"
"ext v16.8b, v8.8b, v9.8b, #1 \n"
"ext v17.8b, v8.8b, v9.8b, #2 \n"
"sshll v4.8h, v4.8b, #0 \n" // r00
"sshll v12.8h, v12.8b, #0 \n" // r01
"sshll v13.8h, v13.8b, #0 \n" // r02
"sshll v6.8h, v6.8b, #0 \n" // r10
"sshll v14.8h, v14.8b, #0 \n" // r11
"sshll v15.8h, v15.8b, #0 \n" // r12
"sshll v8.8h, v8.8b, #0 \n" // r20
"sshll v16.8h, v16.8b, #0 \n" // r21
"sshll v17.8h, v17.8b, #0 \n" // r22
// r0
"smull v20.4s, v4.4h, %10.h[0] \n" // (r00 - r07) * k00
"smull2 v21.4s, v4.8h, %10.h[0] \n"
"smull v22.4s, v12.4h, %10.h[1] \n" // (r01 - r08) * k01
"smull2 v23.4s, v12.8h, %10.h[1] \n"
"smull v24.4s, v13.4h, %10.h[2] \n" // (r02 - r09) * k02
"smull2 v25.4s, v13.8h, %10.h[2] \n"
// r1
"smlal v20.4s, v6.4h, %10.h[3] \n" // (r10 - r17) * k03
"smlal2 v21.4s, v6.8h, %10.h[3] \n"
"smlal v22.4s, v14.4h, %11.h[0] \n" // (r11 - r18) * k04
"smlal2 v23.4s, v14.8h, %11.h[0] \n"
"smlal v24.4s, v15.4h, %11.h[1] \n" // (r12 - r19) * k05
"smlal2 v25.4s, v15.8h, %11.h[1] \n"
// r2
"smlal v20.4s, v8.4h, %11.h[2] \n" // (r20 - r27) * k06
"smlal2 v21.4s, v8.8h, %11.h[2] \n"
"smlal v22.4s, v16.4h, %11.h[3] \n" // (r21 - r28) * k07
"smlal2 v23.4s, v16.8h, %11.h[3] \n"
"smlal v24.4s, v17.4h, %12.h[0] \n" // (r22 - r29) * k08
"smlal2 v25.4s, v17.8h, %12.h[0] \n"
// add and save
"add v20.4s, v20.4s, v22.4s \n"
"add v21.4s, v21.4s, v23.4s \n"
"add v20.4s, v20.4s, v24.4s \n"
"add v21.4s, v21.4s, v25.4s \n"
"st1 {v20.4s, v21.4s}, [%1], #32 \n"
"subs %w0, %w0, #1 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2) // %4
: "0"(nn),
"1"(outptr0),
"2"(r0),
"3"(r1),
"4"(r2),
"w"(_k0123), // %10
"w"(_k4567), // %11
"w"(_k8xxx) // %12
: "cc", "memory", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25");
}
#else
if (nn > 0)
{
asm volatile(
"0: \n"
// r0
"vld1.s8 {d30-d31}, [%2] \n" // r0
"add %2, %2, #8 \n"
"vext.s8 d10, d30, d31, #1 \n"
"vext.s8 d12, d30, d31, #2 \n"
"vmovl.s8 q15, d30 \n" // r00
"vmovl.s8 q5, d10 \n" // r01
"vmovl.s8 q6, d12 \n" // r02
// sum0
"vmull.s16 q7, d30, %P10[0] \n" // (r00 - r07) * k00
"vmull.s16 q8, d31, %P10[0] \n"
"vmull.s16 q9, d10, %P10[1] \n" // (r01 - r08) * k01
"vmull.s16 q10, d11, %P10[1] \n"
"vmlal.s16 q7, d12, %P10[2] \n" // (r02 - r09) * k02
"vmlal.s16 q8, d13, %P10[2] \n"
// r1
"vld1.s8 {d30-d31}, [%3] \n" // r1
"add %3, %3, #8 \n"
"vext.s8 d10, d30, d31, #1 \n"
"vext.s8 d12, d30, d31, #2 \n"
"vmovl.s8 q15, d30 \n" // r10
"vmovl.s8 q5, d10 \n" // r11
"vmovl.s8 q6, d12 \n" // r12
// sum0
"vmlal.s16 q7, d30, %P10[3] \n" // (r10 - r17) * k03
"vmlal.s16 q8, d31, %P10[3] \n"
"vmlal.s16 q9, d10, %P11[0] \n" // (r11 - r18) * k04
"vmlal.s16 q10, d11, %P11[0] \n"
"vmlal.s16 q7, d12, %P11[1] \n" // (r12 - r19) * k05
"vmlal.s16 q8, d13, %P11[1] \n"
// r2
"vld1.s8 {d30-d31}, [%4] \n" // r2
"add %4, %4, #8 \n"
"vext.s8 d10, d30, d31, #1 \n"
"vext.s8 d12, d30, d31, #2 \n"
"vmovl.s8 q15, d30 \n" // r20
"vmovl.s8 q5, d10 \n" // r21
"vmovl.s8 q6, d12 \n" // r22
// sum0
"vmlal.s16 q7, d30, %P11[2] \n" // (r20 - r27) * k06
"vmlal.s16 q8, d31, %P11[2] \n"
"vmlal.s16 q9, d10, %P11[3] \n" // (r21 - r28) * k07
"vmlal.s16 q10, d11, %P11[3] \n"
"vmlal.s16 q7, d12, %P12[0] \n" // (r22 - r29) * k08
"vmlal.s16 q8, d13, %P12[0] \n"
"subs %0, %0, #1 \n"
// add and save
"vadd.s32 q7, q7, q9 \n"
"vadd.s32 q8, q8, q10 \n"
"vst1.s32 {d14-d17}, [%1]! \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2) // %4
: "0"(nn),
"1"(outptr0),
"2"(r0),
"3"(r1),
"4"(r2),
"w"(_k0123), // %10
"w"(_k4567), // %11
"w"(_k8xxx) // %12
: "cc", "memory", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain > 0; remain--)
{
int sum = 0;
sum += (int)r0[0] * kernel[0];
sum += (int)r0[1] * kernel[1];
sum += (int)r0[2] * kernel[2];
sum += (int)r1[0] * kernel[3];
sum += (int)r1[1] * kernel[4];
sum += (int)r1[2] * kernel[5];
sum += (int)r2[0] * kernel[6];
sum += (int)r2[1] * kernel[7];
sum += (int)r2[2] * kernel[8];
*outptr0 = sum;
r0++;
r1++;
r2++;
outptr0++;
}
r0 += 2;
r1 += 2;
r2 += 2;
}
}
}
static void convdw3x3s2_int8_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Option& opt)
{
int w = bottom_blob.w;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int tailstep = w - 2 * outw + w;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out = top_blob.channel(p);
const signed char* kernel = (const signed char*)_kernel + p * 9;
int* outptr = out;
const signed char* img = bottom_blob.channel(p);
const signed char* r0 = img;
const signed char* r1 = img + w;
const signed char* r2 = img + w * 2;
int i = 0;
#if __ARM_NEON
int8x16_t _k0123456789x = vld1q_s8(kernel);
int16x8_t _k_s16 = vmovl_s8(vget_low_s8(_k0123456789x));
int16x8_t _kn_s16 = vmovl_s8(vget_high_s8(_k0123456789x));
int16x4_t _k0123 = vget_low_s16(_k_s16);
int16x4_t _k4567 = vget_high_s16(_k_s16);
int16x4_t _k8xxx = vget_low_s16(_kn_s16);
#endif // __ARM_NEON
for (; i < outh; i++)
{
#if __ARM_NEON
int nn = outw >> 3;
int remain = outw & 7;
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
if (nn > 0)
{
asm volatile(
"0: \n"
"ld2 {v4.8b, v5.8b}, [%2], #16 \n"
"ld2 {v6.8b, v7.8b}, [%2] \n"
"ld2 {v8.8b, v9.8b}, [%3], #16 \n"
"ld2 {v10.8b, v11.8b}, [%3] \n"
"ld2 {v12.8b, v13.8b}, [%4], #16 \n"
"ld2 {v14.8b, v15.8b}, [%4] \n"
"ext v6.8b, v4.8b, v6.8b, #1 \n"
"ext v10.8b, v8.8b, v10.8b, #1 \n"
"ext v14.8b, v12.8b, v14.8b, #1 \n"
"sshll v4.8h, v4.8b, #0 \n" // r00
"sshll v5.8h, v5.8b, #0 \n" // r01
"sshll v6.8h, v6.8b, #0 \n" // r02
"sshll v8.8h, v8.8b, #0 \n" // r10
"sshll v9.8h, v9.8b, #0 \n" // r11
"sshll v10.8h, v10.8b, #0 \n" // r12
"sshll v12.8h, v12.8b, #0 \n" // r20
"sshll v13.8h, v13.8b, #0 \n" // r21
"sshll v14.8h, v14.8b, #0 \n" // r22
// r0
"smull v20.4s, v4.4h, %10.h[0] \n" // (r00 - r07) * k00
"smull2 v21.4s, v4.8h, %10.h[0] \n"
"smull v22.4s, v5.4h, %10.h[1] \n" // (r01 - r08) * k01
"smull2 v23.4s, v5.8h, %10.h[1] \n"
"smull v24.4s, v6.4h, %10.h[2] \n" // (r02 - r09) * k02
"smull2 v25.4s, v6.8h, %10.h[2] \n"
// r1
"smlal v20.4s, v8.4h, %10.h[3] \n" // (r10 - r17) * k03
"smlal2 v21.4s, v8.8h, %10.h[3] \n"
"smlal v22.4s, v9.4h, %11.h[0] \n" // (r11 - r18) * k04
"smlal2 v23.4s, v9.8h, %11.h[0] \n"
"smlal v24.4s, v10.4h, %11.h[1] \n" // (r12 - r19) * k05
"smlal2 v25.4s, v10.8h, %11.h[1] \n"
// r2
"smlal v20.4s, v12.4h, %11.h[2] \n" // (r20 - r27) * k06
"smlal2 v21.4s, v12.8h, %11.h[2] \n"
"smlal v22.4s, v13.4h, %11.h[3] \n" // (r21 - r28) * k07
"smlal2 v23.4s, v13.8h, %11.h[3] \n"
"smlal v24.4s, v14.4h, %12.h[0] \n" // (r22 - r29) * k08
"smlal2 v25.4s, v14.8h, %12.h[0] \n"
// add and save
"add v20.4s, v20.4s, v22.4s \n"
"add v21.4s, v21.4s, v23.4s \n"
"add v20.4s, v20.4s, v24.4s \n"
"add v21.4s, v21.4s, v25.4s \n"
"st1 {v20.4s, v21.4s}, [%1], #32 \n"
"subs %w0, %w0, #1 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2) // %4
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"w"(_k0123), // %10
"w"(_k4567), // %11
"w"(_k8xxx) // %12
: "cc", "memory", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25");
}
#else
if (nn > 0)
{
asm volatile(
"0: \n"
// r0
"vld2.s8 {d30-d31}, [%2]! \n" // r0
"vld2.s8 {d10-d11}, [%2] \n"
"vext.s8 d12, d30, d10, #1 \n"
"vmovl.s8 q5, d31 \n" // r01
"vmovl.s8 q15, d30 \n" // r00
"vmovl.s8 q6, d12 \n" // r02
// sum0
"vmull.s16 q7, d30, %P10[0] \n" // (r00 - r07) * k00
"vmull.s16 q8, d31, %P10[0] \n"
"vmull.s16 q9, d10, %P10[1] \n" // (r01 - r08) * k01
"vmull.s16 q10, d11, %P10[1] \n"
"vmlal.s16 q7, d12, %P10[2] \n" // (r02 - r09) * k02
"vmlal.s16 q8, d13, %P10[2] \n"
// r1
"vld2.s8 {d30-d31}, [%3]! \n" // r1
"vld2.s8 {d10-d11}, [%3] \n"
"vext.s8 d12, d30, d10, #1 \n"
"vmovl.s8 q5, d31 \n" // r11
"vmovl.s8 q15, d30 \n" // r10
"vmovl.s8 q6, d12 \n" // r12
// sum0
"vmlal.s16 q7, d30, %P10[3] \n" // (r10 - r17) * k03
"vmlal.s16 q8, d31, %P10[3] \n"
"vmlal.s16 q9, d10, %P11[0] \n" // (r11 - r18) * k04
"vmlal.s16 q10, d11, %P11[0] \n"
"vmlal.s16 q7, d12, %P11[1] \n" // (r12 - r19) * k05
"vmlal.s16 q8, d13, %P11[1] \n"
// r2
"vld2.s8 {d30-d31}, [%4]! \n" // r2
"vld2.s8 {d10-d11}, [%4] \n"
"vext.s8 d12, d30, d10, #1 \n"
"vmovl.s8 q5, d31 \n" // r21
"vmovl.s8 q15, d30 \n" // r20
"vmovl.s8 q6, d12 \n" // r22
// sum0
"vmlal.s16 q7, d30, %P11[2] \n" // (r20 - r27) * k06
"vmlal.s16 q8, d31, %P11[2] \n"
"vmlal.s16 q9, d10, %P11[3] \n" // (r21 - r28) * k07
"vmlal.s16 q10, d11, %P11[3] \n"
"vmlal.s16 q7, d12, %P12[0] \n" // (r22 - r29) * k08
"vmlal.s16 q8, d13, %P12[0] \n"
"subs %0, %0, #1 \n"
// add and save
"vadd.s32 q7, q7, q9 \n"
"vadd.s32 q8, q8, q10 \n"
"vst1.s32 {d14-d17}, [%1]! \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2) // %4
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"w"(_k0123), // %10
"w"(_k4567), // %11
"w"(_k8xxx) // %12
: "cc", "memory", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain > 0; remain--)
{
int sum = 0;
sum += (int)r0[0] * kernel[0];
sum += (int)r0[1] * kernel[1];
sum += (int)r0[2] * kernel[2];
sum += (int)r1[0] * kernel[3];
sum += (int)r1[1] * kernel[4];
sum += (int)r1[2] * kernel[5];
sum += (int)r2[0] * kernel[6];
sum += (int)r2[1] * kernel[7];
sum += (int)r2[2] * kernel[8];
*outptr = sum;
r0 += 2;
r1 += 2;
r2 += 2;
outptr++;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
}
}
static void convdw3x3s1_int8_requant_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, std::vector<float> scales_requant, const Option& opt)
{
int w = bottom_blob.w;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
const float scale_requant_in = scales_requant[2 * p];
const float scale_requant_out = scales_requant[2 * p + 1];
const signed char* kernel = (const signed char*)_kernel + p * 9;
signed char* outptr0 = out;
signed char* outptr0n = outptr0 + outw;
const signed char* img0 = bottom_blob.channel(p);
const signed char* r0 = img0;
const signed char* r1 = img0 + w;
const signed char* r2 = img0 + w * 2;
const signed char* r3 = img0 + w * 3;
int i = 0;
#if __ARM_NEON
int8x16_t _k0123456789x = vld1q_s8(kernel);
int16x8_t _k_s16 = vmovl_s8(vget_low_s8(_k0123456789x));
int16x8_t _kn_s16 = vmovl_s8(vget_high_s8(_k0123456789x));
int16x4_t _k0123 = vget_low_s16(_k_s16);
int16x4_t _k4567 = vget_high_s16(_k_s16);
int16x4_t _k8xxx = vget_low_s16(_kn_s16);
#endif // __ARM_NEON
for (; i + 1 < outh; i += 2)
{
#if __ARM_NEON
int nn = outw >> 3;
int remain = outw & 7;
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
if (nn > 0)
{
asm volatile(
"0: \n"
"ld1 {v4.8b, v5.8b}, [%3] \n"
"ld1 {v6.8b, v7.8b}, [%4] \n"
"ld1 {v8.8b, v9.8b}, [%5] \n"
"ld1 {v10.8b, v11.8b}, [%6] \n"
"add %3, %3, #8 \n"
"add %4, %4, #8 \n"
"add %5, %5, #8 \n"
"add %6, %6, #8 \n"
"ext v12.8b, v4.8b, v5.8b, #1 \n"
"ext v13.8b, v4.8b, v5.8b, #2 \n"
"ext v14.8b, v6.8b, v7.8b, #1 \n"
"ext v15.8b, v6.8b, v7.8b, #2 \n"
"ext v16.8b, v8.8b, v9.8b, #1 \n"
"ext v17.8b, v8.8b, v9.8b, #2 \n"
"ext v18.8b, v10.8b, v11.8b, #1 \n"
"ext v19.8b, v10.8b, v11.8b, #2 \n"
"sshll v4.8h, v4.8b, #0 \n" // r00
"sshll v12.8h, v12.8b, #0 \n" // r01
"sshll v13.8h, v13.8b, #0 \n" // r02
"sshll v6.8h, v6.8b, #0 \n" // r10
"sshll v14.8h, v14.8b, #0 \n" // r11
"sshll v15.8h, v15.8b, #0 \n" // r12
"sshll v8.8h, v8.8b, #0 \n" // r20
"sshll v16.8h, v16.8b, #0 \n" // r21
"sshll v17.8h, v17.8b, #0 \n" // r22
"sshll v10.8h, v10.8b, #0 \n" // r30
"sshll v18.8h, v18.8b, #0 \n" // r31
"sshll v19.8h, v19.8b, #0 \n" // r32
// r0
"smull v20.4s, v4.4h, %14.h[0] \n" // (r00 - r07) * k00
"smull2 v21.4s, v4.8h, %14.h[0] \n"
"smull v22.4s, v12.4h, %14.h[1] \n" // (r01 - r08) * k01
"smull2 v23.4s, v12.8h, %14.h[1] \n"
"smull v24.4s, v13.4h, %14.h[2] \n" // (r02 - r09) * k02
"smull2 v25.4s, v13.8h, %14.h[2] \n"
// r1
"smull v26.4s, v6.4h, %14.h[0] \n" // (r10 - r17) * k00
"smull2 v27.4s, v6.8h, %14.h[0] \n"
"smull v28.4s, v14.4h, %14.h[1] \n" // (r11 - r18) * k01
"smull2 v29.4s, v14.8h, %14.h[1] \n"
"smull v30.4s, v15.4h, %14.h[2] \n" // (r12 - r19) * k02
"smull2 v31.4s, v15.8h, %14.h[2] \n"
"smlal v20.4s, v6.4h, %14.h[3] \n" // (r10 - r17) * k03
"smlal2 v21.4s, v6.8h, %14.h[3] \n"
"smlal v22.4s, v14.4h, %15.h[0] \n" // (r11 - r18) * k04
"smlal2 v23.4s, v14.8h, %15.h[0] \n"
"smlal v24.4s, v15.4h, %15.h[1] \n" // (r12 - r19) * k05
"smlal2 v25.4s, v15.8h, %15.h[1] \n"
// r2
"smlal v26.4s, v8.4h, %14.h[3] \n" // (r20 - r27) * k03
"smlal2 v27.4s, v8.8h, %14.h[3] \n"
"smlal v28.4s, v16.4h, %15.h[0] \n" // (r21 - r28) * k04
"smlal2 v29.4s, v16.8h, %15.h[0] \n"
"smlal v30.4s, v17.4h, %15.h[1] \n" // (r22 - r29) * k05
"smlal2 v31.4s, v17.8h, %15.h[1] \n"
"smlal v20.4s, v8.4h, %15.h[2] \n" // (r20 - r27) * k06
"smlal2 v21.4s, v8.8h, %15.h[2] \n"
"smlal v22.4s, v16.4h, %15.h[3] \n" // (r21 - r28) * k07
"smlal2 v23.4s, v16.8h, %15.h[3] \n"
"smlal v24.4s, v17.4h, %16.h[0] \n" // (r22 - r29) * k08
"smlal2 v25.4s, v17.8h, %16.h[0] \n"
// r3
"smlal v26.4s, v10.4h, %15.h[2] \n" // (r30 - r37) * k06
"smlal2 v27.4s, v10.8h, %15.h[2] \n"
"smlal v28.4s, v18.4h, %15.h[3] \n" // (r31 - r38) * k07
"smlal2 v29.4s, v18.8h, %15.h[3] \n"
"smlal v30.4s, v19.4h, %16.h[0] \n" // (r32 - r39) * k08
"smlal2 v31.4s, v19.8h, %16.h[0] \n"
// add and save
"add v20.4s, v20.4s, v22.4s \n"
"add v21.4s, v21.4s, v23.4s \n"
"add v26.4s, v26.4s, v28.4s \n"
"add v27.4s, v27.4s, v29.4s \n"
"add v20.4s, v20.4s, v24.4s \n"
"add v21.4s, v21.4s, v25.4s \n"
"add v26.4s, v26.4s, v30.4s \n"
"add v27.4s, v27.4s, v31.4s \n"
"dup v4.4s, %w17 \n" // bias
"dup v5.4s, %w18 \n" // scale_in
"dup v6.4s, %w19 \n" // scale_out
// top_s32 -> top_f32
"scvtf v20.4s, v20.4s \n"
"scvtf v21.4s, v21.4s \n"
"scvtf v26.4s, v26.4s \n"
"scvtf v27.4s, v27.4s \n"
// top_f32 = top_f32 * scale_in
"fmul v20.4s, v20.4s, v5.4s \n"
"fmul v21.4s, v21.4s, v5.4s \n"
"fmul v26.4s, v26.4s, v5.4s \n"
"fmul v27.4s, v27.4s, v5.4s \n"
// top_f32 = top_f32 + bias
"fadd v20.4s, v20.4s, v4.4s \n"
"fadd v21.4s, v21.4s, v4.4s \n"
"fadd v26.4s, v26.4s, v4.4s \n"
"fadd v27.4s, v27.4s, v4.4s \n"
// top_f32 = top_f32 * scale_out
"fmul v20.4s, v20.4s, v6.4s \n"
"fmul v21.4s, v21.4s, v6.4s \n"
"fmul v26.4s, v26.4s, v6.4s \n"
"fmul v27.4s, v27.4s, v6.4s \n"
// top_f32 -> top_s32
"fcvtas v20.4s, v20.4s \n"
"fcvtas v21.4s, v21.4s \n"
"fcvtas v26.4s, v26.4s \n"
"fcvtas v27.4s, v27.4s \n"
// top_s32 -> top_s16
"sqxtn v7.4h, v20.4s \n"
"sqxtn v9.4h, v26.4s \n"
"sqxtn2 v7.8h, v21.4s \n"
"sqxtn2 v9.8h, v27.4s \n"
// top_s16 -> top_s8
"sqxtn v8.8b, v7.8h \n"
"sqxtn v10.8b, v9.8h \n"
// save top_s8
"st1 {v8.8b}, [%1], #8 \n"
"st1 {v10.8b}, [%2], #8 \n"
"subs %w0, %w0, #1 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr0n), // %2
"=r"(r0), // %3
"=r"(r1), // %4
"=r"(r2), // %5
"=r"(r3) // %6
: "0"(nn),
"1"(outptr0),
"2"(outptr0n),
"3"(r0),
"4"(r1),
"5"(r2),
"6"(r3),
"w"(_k0123), // %14
"w"(_k4567), // %15
"w"(_k8xxx), // %16
"r"(bias0), // %17
"r"(scale_requant_in), // %18
"r"(scale_requant_out) // %19
: "cc", "memory", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
#else
if (nn > 0)
{
asm volatile(
"0: \n"
// r0
"vld1.s8 {d30-d31}, [%3] \n" // r0
"add %3, %3, #8 \n"
"vext.s8 d10, d30, d31, #1 \n"
"vext.s8 d12, d30, d31, #2 \n"
"vmovl.s8 q15, d30 \n" // r00
"vmovl.s8 q5, d10 \n" // r01
"vmovl.s8 q6, d12 \n" // r02
// sum0
"vmull.s16 q7, d30, %P14[0] \n" // (r00 - r07) * k00
"vmull.s16 q8, d31, %P14[0] \n"
"vmull.s16 q9, d10, %P14[1] \n" // (r01 - r08) * k01
"vmull.s16 q10, d11, %P14[1] \n"
"vmlal.s16 q7, d12, %P14[2] \n" // (r02 - r09) * k02
"vmlal.s16 q8, d13, %P14[2] \n"
// r1
"vld1.s8 {d30-d31}, [%4] \n" // r1
"add %4, %4, #8 \n"
"vext.s8 d10, d30, d31, #1 \n"
"vext.s8 d12, d30, d31, #2 \n"
"vmovl.s8 q15, d30 \n" // r10
"vmovl.s8 q5, d10 \n" // r11
"vmovl.s8 q6, d12 \n" // r12
// sum0
"vmlal.s16 q7, d30, %P14[3] \n" // (r10 - r17) * k03
"vmlal.s16 q8, d31, %P14[3] \n"
"vmlal.s16 q9, d10, %P15[0] \n" // (r11 - r18) * k04
"vmlal.s16 q10, d11, %P15[0] \n"
"vmlal.s16 q7, d12, %P15[1] \n" // (r12 - r19) * k05
"vmlal.s16 q8, d13, %P15[1] \n"
// sum1
"vmull.s16 q11, d30, %P14[0] \n" // (r10 - r17) * k00
"vmull.s16 q12, d31, %P14[0] \n"
"vmull.s16 q13, d10, %P14[1] \n" // (r11 - r18) * k01
"vmull.s16 q14, d11, %P14[1] \n"
"vmlal.s16 q11, d12, %P14[2] \n" // (r12 - r19) * k02
"vmlal.s16 q12, d13, %P14[2] \n"
// r2
"vld1.s8 {d30-d31}, [%5] \n" // r2
"add %5, %5, #8 \n"
"vext.s8 d10, d30, d31, #1 \n"
"vext.s8 d12, d30, d31, #2 \n"
"vmovl.s8 q15, d30 \n" // r20
"vmovl.s8 q5, d10 \n" // r21
"vmovl.s8 q6, d12 \n" // r22
// sum0
"vmlal.s16 q7, d30, %P15[2] \n" // (r20 - r27) * k06
"vmlal.s16 q8, d31, %P15[2] \n"
"vmlal.s16 q9, d10, %P15[3] \n" // (r21 - r28) * k07
"vmlal.s16 q10, d11, %P15[3] \n"
"vmlal.s16 q7, d12, %P16[0] \n" // (r22 - r29) * k08
"vmlal.s16 q8, d13, %P16[0] \n"
// sum1
"vmlal.s16 q11, d30, %P14[3] \n" // (r20 - r27) * k03
"vmlal.s16 q12, d31, %P14[3] \n"
"vmlal.s16 q13, d10, %P15[0] \n" // (r21 - r28) * k04
"vmlal.s16 q14, d11, %P15[0] \n"
"vmlal.s16 q11, d12, %P15[1] \n" // (r22 - r29) * k05
"vmlal.s16 q12, d13, %P15[1] \n"
// r3
"vld1.s8 {d30-d31}, [%6] \n" // r3
"add %6, %6, #8 \n"
"vext.s8 d10, d30, d31, #1 \n"
"vext.s8 d12, d30, d31, #2 \n"
"vmovl.s8 q15, d30 \n" // r30
"vmovl.s8 q5, d10 \n" // r31
"vmovl.s8 q6, d12 \n" // r32
// sum1
"vmlal.s16 q11, d30, %P15[2] \n" // (r30 - r37) * k06
"vmlal.s16 q12, d31, %P15[2] \n"
"vmlal.s16 q13, d10, %P15[3] \n" // (r31 - r38) * k07
"vmlal.s16 q14, d11, %P15[3] \n"
"vmlal.s16 q11, d12, %P16[0] \n" // (r32 - r39) * k08
"vmlal.s16 q12, d13, %P16[0] \n"
"subs %0, %0, #1 \n"
// add and save
"vadd.s32 q7, q7, q9 \n"
"vadd.s32 q8, q8, q10 \n"
"vadd.s32 q11, q11, q13 \n"
"vadd.s32 q12, q12, q14 \n"
"vdup.f32 q13, %17 \n" // bias
"vdup.f32 q14, %18 \n" // scale_in
"vdup.f32 q15, %19 \n" // scale_out
// top_s32 -> top_f32
"vcvt.f32.s32 q7, q7 \n"
"vcvt.f32.s32 q8, q8 \n"
// top_f32 = top_f32 * scale_int
"vmul.f32 q0, q7, q14 \n"
"vmul.f32 q4, q8, q14 \n"
// top_f32 = top_f32 + bias
"vadd.f32 q0, q0, q13 \n"
"vadd.f32 q4, q4, q13 \n"
// top_f32 = top_f32 * scale_out
"vmul.f32 q0, q0, q15 \n"
"vmul.f32 q4, q4, q15 \n"
// top_f32 -> top_s32
"vcvtr.s32.f32 s0, s0 \n"
"vcvtr.s32.f32 s1, s1 \n"
"vcvtr.s32.f32 s2, s2 \n"
"vcvtr.s32.f32 s3, s3 \n"
"vcvtr.s32.f32 s16, s16 \n"
"vcvtr.s32.f32 s17, s17 \n"
"vcvtr.s32.f32 s18, s18 \n"
"vcvtr.s32.f32 s19, s19 \n"
// top_s32 -> top_s16
"vqmovn.s32 d14, q0 \n"
"vqmovn.s32 d15, q4 \n"
// top_s16 -> top_s8
"vqmovn.s16 d14, q7 \n"
// save top_s8
"vst1.8 {d14}, [%1]! \n"
// top_s32 -> top_f32
"vcvt.f32.s32 q11, q11 \n"
"vcvt.f32.s32 q12, q12 \n"
// top_f32 = top_f32 * scale_int
"vmul.f32 q0, q11, q14 \n"
"vmul.f32 q4, q12, q14 \n"
// top_f32 = top_f32 + bias
"vadd.f32 q0, q0, q13 \n"
"vadd.f32 q4, q4, q13 \n"
// top_f32 = top_f32 * scale_out
"vmul.f32 q0, q0, q15 \n"
"vmul.f32 q4, q4, q15 \n"
// top_f32 -> top_s32
"vcvtr.s32.f32 s0, s0 \n"
"vcvtr.s32.f32 s1, s1 \n"
"vcvtr.s32.f32 s2, s2 \n"
"vcvtr.s32.f32 s3, s3 \n"
"vcvtr.s32.f32 s16, s16 \n"
"vcvtr.s32.f32 s17, s17 \n"
"vcvtr.s32.f32 s18, s18 \n"
"vcvtr.s32.f32 s19, s19 \n"
// top_s32 -> top_s16
"vqmovn.s32 d14, q0 \n"
"vqmovn.s32 d15, q4 \n"
// top_s16 -> top_s8
"vqmovn.s16 d14, q7 \n"
// save top_s8
"vst1.8 {d14}, [%2]! \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr0n), // %2
"=r"(r0), // %3
"=r"(r1), // %4
"=r"(r2), // %5
"=r"(r3) // %6
: "0"(nn),
"1"(outptr0),
"2"(outptr0n),
"3"(r0),
"4"(r1),
"5"(r2),
"6"(r3),
"w"(_k0123), // %14
"w"(_k4567), // %15
"w"(_k8xxx), // %16
"r"(bias0), // %17
"r"(scale_requant_in), // %18
"r"(scale_requant_out) // %19
: "cc", "memory", "q0", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain > 0; remain--)
{
// TODO NEON
int sum0 = 0;
int sum0n = 0;
sum0 += (int)r0[0] * kernel[0];
sum0 += (int)r0[1] * kernel[1];
sum0 += (int)r0[2] * kernel[2];
sum0 += (int)r1[0] * kernel[3];
sum0 += (int)r1[1] * kernel[4];
sum0 += (int)r1[2] * kernel[5];
sum0 += (int)r2[0] * kernel[6];
sum0 += (int)r2[1] * kernel[7];
sum0 += (int)r2[2] * kernel[8];
sum0n += (int)r1[0] * kernel[0];
sum0n += (int)r1[1] * kernel[1];
sum0n += (int)r1[2] * kernel[2];
sum0n += (int)r2[0] * kernel[3];
sum0n += (int)r2[1] * kernel[4];
sum0n += (int)r2[2] * kernel[5];
sum0n += (int)r3[0] * kernel[6];
sum0n += (int)r3[1] * kernel[7];
sum0n += (int)r3[2] * kernel[8];
*outptr0 = float2int8(((float)sum0 * scale_requant_in + bias0) * scale_requant_out);
*outptr0n = float2int8(((float)sum0n * scale_requant_in + bias0) * scale_requant_out);
r0++;
r1++;
r2++;
r3++;
outptr0++;
outptr0n++;
}
r0 += 2 + w;
r1 += 2 + w;
r2 += 2 + w;
r3 += 2 + w;
outptr0 += outw;
outptr0n += outw;
}
for (; i < outh; i++)
{
#if __ARM_NEON
int nn = outw >> 3;
int remain = outw & 7;
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
if (nn > 0)
{
asm volatile(
"dup v26.4s, %w13 \n"
"dup v27.4s, %w14 \n"
"dup v28.4s, %w15 \n"
"0: \n"
"ld1 {v4.8b, v5.8b}, [%2] \n"
"ld1 {v6.8b, v7.8b}, [%3] \n"
"ld1 {v8.8b, v9.8b}, [%4] \n"
"add %2, %2, #8 \n"
"add %3, %3, #8 \n"
"add %4, %4, #8 \n"
"ext v12.8b, v4.8b, v5.8b, #1 \n"
"ext v13.8b, v4.8b, v5.8b, #2 \n"
"ext v14.8b, v6.8b, v7.8b, #1 \n"
"ext v15.8b, v6.8b, v7.8b, #2 \n"
"ext v16.8b, v8.8b, v9.8b, #1 \n"
"ext v17.8b, v8.8b, v9.8b, #2 \n"
"sshll v4.8h, v4.8b, #0 \n" // r00
"sshll v12.8h, v12.8b, #0 \n" // r01
"sshll v13.8h, v13.8b, #0 \n" // r02
"sshll v6.8h, v6.8b, #0 \n" // r10
"sshll v14.8h, v14.8b, #0 \n" // r11
"sshll v15.8h, v15.8b, #0 \n" // r12
"sshll v8.8h, v8.8b, #0 \n" // r20
"sshll v16.8h, v16.8b, #0 \n" // r21
"sshll v17.8h, v17.8b, #0 \n" // r22
// r0
"smull v20.4s, v4.4h, %10.h[0] \n" // (r00 - r07) * k00
"smull2 v21.4s, v4.8h, %10.h[0] \n"
"smull v22.4s, v12.4h, %10.h[1] \n" // (r01 - r08) * k01
"smull2 v23.4s, v12.8h, %10.h[1] \n"
"smull v24.4s, v13.4h, %10.h[2] \n" // (r02 - r09) * k02
"smull2 v25.4s, v13.8h, %10.h[2] \n"
// r1
"smlal v20.4s, v6.4h, %10.h[3] \n" // (r10 - r17) * k03
"smlal2 v21.4s, v6.8h, %10.h[3] \n"
"smlal v22.4s, v14.4h, %11.h[0] \n" // (r11 - r18) * k04
"smlal2 v23.4s, v14.8h, %11.h[0] \n"
"smlal v24.4s, v15.4h, %11.h[1] \n" // (r12 - r19) * k05
"smlal2 v25.4s, v15.8h, %11.h[1] \n"
// r2
"smlal v20.4s, v8.4h, %11.h[2] \n" // (r20 - r27) * k06
"smlal2 v21.4s, v8.8h, %11.h[2] \n"
"smlal v22.4s, v16.4h, %11.h[3] \n" // (r21 - r28) * k07
"smlal2 v23.4s, v16.8h, %11.h[3] \n"
"smlal v24.4s, v17.4h, %12.h[0] \n" // (r22 - r29) * k08
"smlal2 v25.4s, v17.8h, %12.h[0] \n"
// add and save
"add v20.4s, v20.4s, v22.4s \n"
"add v21.4s, v21.4s, v23.4s \n"
"add v20.4s, v20.4s, v24.4s \n"
"add v21.4s, v21.4s, v25.4s \n"
// top_s32 -> top_f32
"scvtf v20.4s, v20.4s \n"
"scvtf v21.4s, v21.4s \n"
// top_f32 = top_f32 * scale_in
"fmul v20.4s, v20.4s, v27.4s \n"
"fmul v21.4s, v21.4s, v27.4s \n"
// top_f32 = top_f32 + bias
"fadd v20.4s, v20.4s, v26.4s \n"
"fadd v21.4s, v21.4s, v26.4s \n"
// top_f32 = top_f32 * scale_out
"fmul v20.4s, v20.4s, v28.4s \n"
"fmul v21.4s, v21.4s, v28.4s \n"
// top_f32 -> top_s32
"fcvtas v20.4s, v20.4s \n"
"fcvtas v21.4s, v21.4s \n"
// top_s32 -> top_s16
"sqxtn v7.4h, v20.4s \n"
"sqxtn2 v7.8h, v21.4s \n"
// top_s16 -> top_s8
"sqxtn v8.8b, v7.8h \n"
// save top_s8
"st1 {v8.8b}, [%1], #8 \n"
"subs %w0, %w0, #1 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2) // %4
: "0"(nn),
"1"(outptr0),
"2"(r0),
"3"(r1),
"4"(r2),
"w"(_k0123), // %10
"w"(_k4567), // %11
"w"(_k8xxx), // %12
"r"(bias0), // %13
"r"(scale_requant_in), // %14
"r"(scale_requant_out) // %15
: "cc", "memory", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
#else
if (nn > 0)
{
asm volatile(
"0: \n"
// r0
"vld1.s8 {d30-d31}, [%2] \n" // r0
"add %2, %2, #8 \n"
"vext.s8 d10, d30, d31, #1 \n"
"vext.s8 d12, d30, d31, #2 \n"
"vmovl.s8 q15, d30 \n" // r00
"vmovl.s8 q5, d10 \n" // r01
"vmovl.s8 q6, d12 \n" // r02
// sum0
"vmull.s16 q7, d30, %P10[0] \n" // (r00 - r07) * k00
"vmull.s16 q8, d31, %P10[0] \n"
"vmull.s16 q9, d10, %P10[1] \n" // (r01 - r08) * k01
"vmull.s16 q10, d11, %P10[1] \n"
"vmlal.s16 q7, d12, %P10[2] \n" // (r02 - r09) * k02
"vmlal.s16 q8, d13, %P10[2] \n"
// r1
"vld1.s8 {d30-d31}, [%3] \n" // r1
"add %3, %3, #8 \n"
"vext.s8 d10, d30, d31, #1 \n"
"vext.s8 d12, d30, d31, #2 \n"
"vmovl.s8 q15, d30 \n" // r10
"vmovl.s8 q5, d10 \n" // r11
"vmovl.s8 q6, d12 \n" // r12
// sum0
"vmlal.s16 q7, d30, %P10[3] \n" // (r10 - r17) * k03
"vmlal.s16 q8, d31, %P10[3] \n"
"vmlal.s16 q9, d10, %P11[0] \n" // (r11 - r18) * k04
"vmlal.s16 q10, d11, %P11[0] \n"
"vmlal.s16 q7, d12, %P11[1] \n" // (r12 - r19) * k05
"vmlal.s16 q8, d13, %P11[1] \n"
// r2
"vld1.s8 {d30-d31}, [%4] \n" // r2
"add %4, %4, #8 \n"
"vext.s8 d10, d30, d31, #1 \n"
"vext.s8 d12, d30, d31, #2 \n"
"vmovl.s8 q15, d30 \n" // r20
"vmovl.s8 q5, d10 \n" // r21
"vmovl.s8 q6, d12 \n" // r22
// sum0
"vmlal.s16 q7, d30, %P11[2] \n" // (r20 - r27) * k06
"vmlal.s16 q8, d31, %P11[2] \n"
"vmlal.s16 q9, d10, %P11[3] \n" // (r21 - r28) * k07
"vmlal.s16 q10, d11, %P11[3] \n"
"vmlal.s16 q7, d12, %P12[0] \n" // (r22 - r29) * k08
"vmlal.s16 q8, d13, %P12[0] \n"
"subs %0, %0, #1 \n"
// add and save
"vadd.s32 q7, q7, q9 \n"
"vadd.s32 q8, q8, q10 \n"
"vdup.f32 q13, %13 \n" // bias
"vdup.f32 q14, %14 \n" // scale_in
"vdup.f32 q15, %15 \n" // scale_out
// top_s32 -> top_f32
"vcvt.f32.s32 q7, q7 \n"
"vcvt.f32.s32 q8, q8 \n"
// top_f32 = top_f32 * scale_int
"vmul.f32 q0, q7, q14 \n"
"vmul.f32 q4, q8, q14 \n"
// top_f32 = top_f32 + bias
"vadd.f32 q0, q0, q13 \n"
"vadd.f32 q4, q4, q13 \n"
// top_f32 = top_f32 * scale_out
"vmul.f32 q0, q0, q15 \n"
"vmul.f32 q4, q4, q15 \n"
// top_f32 -> top_s32
"vcvtr.s32.f32 s0, s0 \n"
"vcvtr.s32.f32 s1, s1 \n"
"vcvtr.s32.f32 s2, s2 \n"
"vcvtr.s32.f32 s3, s3 \n"
"vcvtr.s32.f32 s16, s16 \n"
"vcvtr.s32.f32 s17, s17 \n"
"vcvtr.s32.f32 s18, s18 \n"
"vcvtr.s32.f32 s19, s19 \n"
// top_s32 -> top_s16
"vqmovn.s32 d14, q0 \n"
"vqmovn.s32 d15, q4 \n"
// top_s16 -> top_s8
"vqmovn.s16 d14, q7 \n"
// save top_s8
"vst1.8 {d14}, [%1]! \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2) // %4
: "0"(nn),
"1"(outptr0),
"2"(r0),
"3"(r1),
"4"(r2),
"w"(_k0123), // %10
"w"(_k4567), // %11
"w"(_k8xxx), // %12
"r"(bias0), // %13
"r"(scale_requant_in), // %14
"r"(scale_requant_out) // %15
: "cc", "memory", "q0", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain > 0; remain--)
{
int sum = 0;
sum += (int)r0[0] * kernel[0];
sum += (int)r0[1] * kernel[1];
sum += (int)r0[2] * kernel[2];
sum += (int)r1[0] * kernel[3];
sum += (int)r1[1] * kernel[4];
sum += (int)r1[2] * kernel[5];
sum += (int)r2[0] * kernel[6];
sum += (int)r2[1] * kernel[7];
sum += (int)r2[2] * kernel[8];
*outptr0 = float2int8(((float)sum * scale_requant_in + bias0) * scale_requant_out);
r0++;
r1++;
r2++;
outptr0++;
}
r0 += 2;
r1 += 2;
r2 += 2;
}
}
}
static void convdw3x3s2_int8_requant_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, std::vector<float> scales_requant, const Option& opt)
{
int w = bottom_blob.w;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const float* bias = _bias;
const int tailstep = w - 2 * outw + w;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
const float scale_requant_in = scales_requant[2 * p];
const float scale_requant_out = scales_requant[2 * p + 1];
const signed char* kernel = (const signed char*)_kernel + p * 9;
signed char* outptr = out;
const signed char* img = bottom_blob.channel(p);
const signed char* r0 = img;
const signed char* r1 = img + w;
const signed char* r2 = img + w * 2;
int i = 0;
#if __ARM_NEON
int8x16_t _k0123456789x = vld1q_s8(kernel);
int16x8_t _k_s16 = vmovl_s8(vget_low_s8(_k0123456789x));
int16x8_t _kn_s16 = vmovl_s8(vget_high_s8(_k0123456789x));
int16x4_t _k0123 = vget_low_s16(_k_s16);
int16x4_t _k4567 = vget_high_s16(_k_s16);
int16x4_t _k8xxx = vget_low_s16(_kn_s16);
#endif // __ARM_NEON
for (; i < outh; i++)
{
#if __ARM_NEON
int nn = outw >> 3;
int remain = outw & 7;
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
if (nn > 0)
{
asm volatile(
"dup v26.4s, %w13 \n"
"dup v27.4s, %w14 \n"
"dup v28.4s, %w15 \n"
"0: \n"
"ld2 {v4.8b, v5.8b}, [%2], #16 \n"
"ld2 {v6.8b, v7.8b}, [%2] \n"
"ld2 {v8.8b, v9.8b}, [%3], #16 \n"
"ld2 {v10.8b, v11.8b}, [%3] \n"
"ld2 {v12.8b, v13.8b}, [%4], #16 \n"
"ld2 {v14.8b, v15.8b}, [%4] \n"
"ext v6.8b, v4.8b, v6.8b, #1 \n"
"ext v10.8b, v8.8b, v10.8b, #1 \n"
"ext v14.8b, v12.8b, v14.8b, #1 \n"
"sshll v4.8h, v4.8b, #0 \n" // r00
"sshll v5.8h, v5.8b, #0 \n" // r01
"sshll v6.8h, v6.8b, #0 \n" // r02
"sshll v8.8h, v8.8b, #0 \n" // r10
"sshll v9.8h, v9.8b, #0 \n" // r11
"sshll v10.8h, v10.8b, #0 \n" // r12
"sshll v12.8h, v12.8b, #0 \n" // r20
"sshll v13.8h, v13.8b, #0 \n" // r21
"sshll v14.8h, v14.8b, #0 \n" // r22
// r0
"smull v20.4s, v4.4h, %10.h[0] \n" // (r00 - r07) * k00
"smull2 v21.4s, v4.8h, %10.h[0] \n"
"smull v22.4s, v5.4h, %10.h[1] \n" // (r01 - r08) * k01
"smull2 v23.4s, v5.8h, %10.h[1] \n"
"smull v24.4s, v6.4h, %10.h[2] \n" // (r02 - r09) * k02
"smull2 v25.4s, v6.8h, %10.h[2] \n"
// r1
"smlal v20.4s, v8.4h, %10.h[3] \n" // (r10 - r17) * k03
"smlal2 v21.4s, v8.8h, %10.h[3] \n"
"smlal v22.4s, v9.4h, %11.h[0] \n" // (r11 - r18) * k04
"smlal2 v23.4s, v9.8h, %11.h[0] \n"
"smlal v24.4s, v10.4h, %11.h[1] \n" // (r12 - r19) * k05
"smlal2 v25.4s, v10.8h, %11.h[1] \n"
// r2
"smlal v20.4s, v12.4h, %11.h[2] \n" // (r20 - r27) * k06
"smlal2 v21.4s, v12.8h, %11.h[2] \n"
"smlal v22.4s, v13.4h, %11.h[3] \n" // (r21 - r28) * k07
"smlal2 v23.4s, v13.8h, %11.h[3] \n"
"smlal v24.4s, v14.4h, %12.h[0] \n" // (r22 - r29) * k08
"smlal2 v25.4s, v14.8h, %12.h[0] \n"
// add and save
"add v20.4s, v20.4s, v22.4s \n"
"add v21.4s, v21.4s, v23.4s \n"
"add v20.4s, v20.4s, v24.4s \n"
"add v21.4s, v21.4s, v25.4s \n"
// top_s32 -> top_f32
"scvtf v20.4s, v20.4s \n"
"scvtf v21.4s, v21.4s \n"
// top_f32 = top_f32 * scale_in
"fmul v20.4s, v20.4s, v27.4s \n"
"fmul v21.4s, v21.4s, v27.4s \n"
// top_f32 = top_f32 + bias
"fadd v20.4s, v20.4s, v26.4s \n"
"fadd v21.4s, v21.4s, v26.4s \n"
// top_f32 = top_f32 * scale_out
"fmul v20.4s, v20.4s, v28.4s \n"
"fmul v21.4s, v21.4s, v28.4s \n"
// top_f32 -> top_s32
"fcvtas v20.4s, v20.4s \n"
"fcvtas v21.4s, v21.4s \n"
// top_s32 -> top_s16
"sqxtn v7.4h, v20.4s \n"
"sqxtn2 v7.8h, v21.4s \n"
// top_s16 -> top_s8
"sqxtn v8.8b, v7.8h \n"
// save top_s8
"st1 {v8.8b}, [%1], #8 \n"
"subs %w0, %w0, #1 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2) // %4
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"w"(_k0123), // %10
"w"(_k4567), // %11
"w"(_k8xxx), // %12
"r"(bias0), // %13
"r"(scale_requant_in), // %14
"r"(scale_requant_out) // %15
: "cc", "memory", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
#else
if (nn > 0)
{
asm volatile(
"0: \n"
// r0
"vld2.s8 {d30-d31}, [%2]! \n" // r0
"vld2.s8 {d10-d11}, [%2] \n"
"vext.s8 d12, d30, d10, #1 \n"
"vmovl.s8 q5, d31 \n" // r01
"vmovl.s8 q15, d30 \n" // r00
"vmovl.s8 q6, d12 \n" // r02
// sum0
"vmull.s16 q7, d30, %P10[0] \n" // (r00 - r07) * k00
"vmull.s16 q8, d31, %P10[0] \n"
"vmull.s16 q9, d10, %P10[1] \n" // (r01 - r08) * k01
"vmull.s16 q10, d11, %P10[1] \n"
"vmlal.s16 q7, d12, %P10[2] \n" // (r02 - r09) * k02
"vmlal.s16 q8, d13, %P10[2] \n"
// r1
"vld2.s8 {d30-d31}, [%3]! \n" // r1
"vld2.s8 {d10-d11}, [%3] \n"
"vext.s8 d12, d30, d10, #1 \n"
"vmovl.s8 q5, d31 \n" // r11
"vmovl.s8 q15, d30 \n" // r10
"vmovl.s8 q6, d12 \n" // r12
// sum0
"vmlal.s16 q7, d30, %P10[3] \n" // (r10 - r17) * k03
"vmlal.s16 q8, d31, %P10[3] \n"
"vmlal.s16 q9, d10, %P11[0] \n" // (r11 - r18) * k04
"vmlal.s16 q10, d11, %P11[0] \n"
"vmlal.s16 q7, d12, %P11[1] \n" // (r12 - r19) * k05
"vmlal.s16 q8, d13, %P11[1] \n"
// r2
"vld2.s8 {d30-d31}, [%4]! \n" // r2
"vld2.s8 {d10-d11}, [%4] \n"
"vext.s8 d12, d30, d10, #1 \n"
"vmovl.s8 q5, d31 \n" // r21
"vmovl.s8 q15, d30 \n" // r20
"vmovl.s8 q6, d12 \n" // r22
// sum0
"vmlal.s16 q7, d30, %P11[2] \n" // (r20 - r27) * k06
"vmlal.s16 q8, d31, %P11[2] \n"
"vmlal.s16 q9, d10, %P11[3] \n" // (r21 - r28) * k07
"vmlal.s16 q10, d11, %P11[3] \n"
"vmlal.s16 q7, d12, %P12[0] \n" // (r22 - r29) * k08
"vmlal.s16 q8, d13, %P12[0] \n"
"subs %0, %0, #1 \n"
// add and save
"vadd.s32 q7, q7, q9 \n"
"vadd.s32 q8, q8, q10 \n"
"vdup.f32 q11, %13 \n" // bias
"vdup.f32 q12, %14 \n" // scale_in
"vdup.f32 q13, %15 \n" // scale_out
// top_s32 -> top_f32
"vcvt.f32.s32 q7, q7 \n"
"vcvt.f32.s32 q8, q8 \n"
// top_f32 = top_f32 * scale_int
"vmul.f32 q0, q7, q12 \n"
"vmul.f32 q4, q8, q12 \n"
// top_f32 = top_f32 + bias
"vadd.f32 q0, q0, q11 \n"
"vadd.f32 q4, q4, q11 \n"
// top_f32 = top_f32 * scale_out
"vmul.f32 q0, q0, q13 \n"
"vmul.f32 q4, q4, q13 \n"
// top_f32 -> top_s32
"vcvtr.s32.f32 s0, s0 \n"
"vcvtr.s32.f32 s1, s1 \n"
"vcvtr.s32.f32 s2, s2 \n"
"vcvtr.s32.f32 s3, s3 \n"
"vcvtr.s32.f32 s16, s16 \n"
"vcvtr.s32.f32 s17, s17 \n"
"vcvtr.s32.f32 s18, s18 \n"
"vcvtr.s32.f32 s19, s19 \n"
// top_s32 -> top_s16
"vqmovn.s32 d14, q0 \n"
"vqmovn.s32 d15, q4 \n"
// top_s16 -> top_s8
"vqmovn.s16 d14, q7 \n"
// save top_s8
"vst1.8 {d14}, [%1]! \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2) // %4
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"w"(_k0123), // %10
"w"(_k4567), // %11
"w"(_k8xxx), // %12
"r"(bias0), // %13
"r"(scale_requant_in), // %14
"r"(scale_requant_out) // %15
: "cc", "memory", "q0", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain > 0; remain--)
{
int sum = 0;
sum += (int)r0[0] * kernel[0];
sum += (int)r0[1] * kernel[1];
sum += (int)r0[2] * kernel[2];
sum += (int)r1[0] * kernel[3];
sum += (int)r1[1] * kernel[4];
sum += (int)r1[2] * kernel[5];
sum += (int)r2[0] * kernel[6];
sum += (int)r2[1] * kernel[7];
sum += (int)r2[2] * kernel[8];
*outptr = float2int8(((float)sum * scale_requant_in + bias0) * scale_requant_out);
r0 += 2;
r1 += 2;
r2 += 2;
outptr++;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
}
}
|
bmesh_operators.c | /*
* ***** BEGIN GPL LICENSE BLOCK *****
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* Contributor(s): Joseph Eagar, Geoffrey Bantle, Campbell Barton
*
* ***** END GPL LICENSE BLOCK *****
*/
/** \file blender/bmesh/intern/bmesh_operators.c
* \ingroup bmesh
*
* BMesh operator access.
*/
#include "MEM_guardedalloc.h"
#include "BLI_utildefines.h"
#include "BLI_string.h"
#include "BLI_math.h"
#include "BLI_memarena.h"
#include "BLI_mempool.h"
#include "BLI_listbase.h"
#include "BLT_translation.h"
#include "bmesh.h"
#include "intern/bmesh_private.h"
/* forward declarations */
static void bmo_flag_layer_alloc(BMesh *bm);
static void bmo_flag_layer_free(BMesh *bm);
static void bmo_flag_layer_clear(BMesh *bm);
static int bmo_name_to_slotcode(BMOpSlot slot_args[BMO_OP_MAX_SLOTS], const char *identifier);
static int bmo_name_to_slotcode_check(BMOpSlot slot_args[BMO_OP_MAX_SLOTS], const char *identifier);
static const char *bmo_error_messages[] = {
NULL,
N_("Self intersection error"),
N_("Could not dissolve vert"),
N_("Could not connect vertices"),
N_("Could not traverse mesh"),
N_("Could not dissolve faces"),
N_("Tessellation error"),
N_("Cannot deal with non-manifold geometry"),
N_("Invalid selection"),
N_("Internal mesh error"),
};
BLI_STATIC_ASSERT(ARRAY_SIZE(bmo_error_messages) + 1 == BMERR_TOTAL, "message mismatch");
/* operator slot type information - size of one element of the type given. */
const int BMO_OPSLOT_TYPEINFO[BMO_OP_SLOT_TOTAL_TYPES] = {
0, /* 0: BMO_OP_SLOT_SENTINEL */
sizeof(int), /* 1: BMO_OP_SLOT_BOOL */
sizeof(int), /* 2: BMO_OP_SLOT_INT */
sizeof(float), /* 3: BMO_OP_SLOT_FLT */
sizeof(void *), /* 4: BMO_OP_SLOT_PNT */
sizeof(void *), /* 5: BMO_OP_SLOT_PNT */
0, /* 6: unused */
0, /* 7: unused */
sizeof(float) * 3, /* 8: BMO_OP_SLOT_VEC */
sizeof(void *), /* 9: BMO_OP_SLOT_ELEMENT_BUF */
sizeof(void *) /* 10: BMO_OP_SLOT_MAPPING */
};
/* Dummy slot so there is something to return when slot name lookup fails */
// static BMOpSlot BMOpEmptySlot = {0};
void BMO_op_flag_enable(BMesh *UNUSED(bm), BMOperator *op, const int op_flag)
{
op->flag |= op_flag;
}
void BMO_op_flag_disable(BMesh *UNUSED(bm), BMOperator *op, const int op_flag)
{
op->flag &= ~op_flag;
}
/**
* \brief BMESH OPSTACK PUSH
*
* Pushes the opstack down one level and allocates a new flag layer if appropriate.
*/
void BMO_push(BMesh *bm, BMOperator *UNUSED(op))
{
bm->toolflag_index++;
BLI_assert(bm->totflags > 0);
/* add flag layer, if appropriate */
if (bm->toolflag_index > 0)
bmo_flag_layer_alloc(bm);
else
bmo_flag_layer_clear(bm);
}
/**
* \brief BMESH OPSTACK POP
*
* Pops the opstack one level and frees a flag layer if appropriate
*
* BMESH_TODO: investigate NOT freeing flag layers.
*/
void BMO_pop(BMesh *bm)
{
if (bm->toolflag_index > 0)
bmo_flag_layer_free(bm);
bm->toolflag_index--;
}
/* use for both slot_types_in and slot_types_out */
static void bmo_op_slots_init(const BMOSlotType *slot_types, BMOpSlot *slot_args)
{
BMOpSlot *slot;
uint i;
for (i = 0; slot_types[i].type; i++) {
slot = &slot_args[i];
slot->slot_name = slot_types[i].name;
slot->slot_type = slot_types[i].type;
slot->slot_subtype = slot_types[i].subtype;
// slot->index = i; // UNUSED
switch (slot->slot_type) {
case BMO_OP_SLOT_MAPPING:
slot->data.ghash = BLI_ghash_ptr_new("bmesh slot map hash");
break;
default:
break;
}
}
}
static void bmo_op_slots_free(const BMOSlotType *slot_types, BMOpSlot *slot_args)
{
BMOpSlot *slot;
uint i;
for (i = 0; slot_types[i].type; i++) {
slot = &slot_args[i];
switch (slot->slot_type) {
case BMO_OP_SLOT_MAPPING:
BLI_ghash_free(slot->data.ghash, NULL, NULL);
break;
default:
break;
}
}
}
/**
* \brief BMESH OPSTACK INIT OP
*
* Initializes an operator structure to a certain type
*/
void BMO_op_init(BMesh *bm, BMOperator *op, const int flag, const char *opname)
{
int opcode = BMO_opcode_from_opname(opname);
#ifdef DEBUG
BM_ELEM_INDEX_VALIDATE(bm, "pre bmo", opname);
#else
(void)bm;
#endif
if (opcode == -1) {
opcode = 0; /* error!, already printed, have a better way to handle this? */
}
memset(op, 0, sizeof(BMOperator));
op->type = opcode;
op->type_flag = bmo_opdefines[opcode]->type_flag;
op->flag = flag;
/* initialize the operator slot types */
bmo_op_slots_init(bmo_opdefines[opcode]->slot_types_in, op->slots_in);
bmo_op_slots_init(bmo_opdefines[opcode]->slot_types_out, op->slots_out);
/* callback */
op->exec = bmo_opdefines[opcode]->exec;
/* memarena, used for operator's slot buffers */
op->arena = BLI_memarena_new(BLI_MEMARENA_STD_BUFSIZE, __func__);
BLI_memarena_use_calloc(op->arena);
}
/**
* \brief BMESH OPSTACK EXEC OP
*
* Executes a passed in operator.
*
* This handles the allocation and freeing of temporary flag
* layers and starting/stopping the modeling loop.
* Can be called from other operators exec callbacks as well.
*/
void BMO_op_exec(BMesh *bm, BMOperator *op)
{
/* allocate tool flags on demand */
BM_mesh_elem_toolflags_ensure(bm);
BMO_push(bm, op);
if (bm->toolflag_index == 1)
bmesh_edit_begin(bm, op->type_flag);
op->exec(bm, op);
if (bm->toolflag_index == 1)
bmesh_edit_end(bm, op->type_flag);
BMO_pop(bm);
}
/**
* \brief BMESH OPSTACK FINISH OP
*
* Does housekeeping chores related to finishing up an operator.
*/
void BMO_op_finish(BMesh *bm, BMOperator *op)
{
bmo_op_slots_free(bmo_opdefines[op->type]->slot_types_in, op->slots_in);
bmo_op_slots_free(bmo_opdefines[op->type]->slot_types_out, op->slots_out);
BLI_memarena_free(op->arena);
#ifdef DEBUG
BM_ELEM_INDEX_VALIDATE(bm, "post bmo", bmo_opdefines[op->type]->opname);
/* avoid accidental re-use */
memset(op, 0xff, sizeof(*op));
#else
(void)bm;
#endif
}
/**
* \brief BMESH OPSTACK HAS SLOT
*
* \return Success if the slot if found.
*/
bool BMO_slot_exists(BMOpSlot slot_args[BMO_OP_MAX_SLOTS], const char *identifier)
{
int slot_code = bmo_name_to_slotcode(slot_args, identifier);
return (slot_code >= 0);
}
/**
* \brief BMESH OPSTACK GET SLOT
*
* Returns a pointer to the slot of type 'slot_code'
*/
BMOpSlot *BMO_slot_get(BMOpSlot slot_args[BMO_OP_MAX_SLOTS], const char *identifier)
{
int slot_code = bmo_name_to_slotcode_check(slot_args, identifier);
if (UNLIKELY(slot_code < 0)) {
//return &BMOpEmptySlot;
BLI_assert(0);
return NULL; /* better crash */
}
return &slot_args[slot_code];
}
/**
* \brief BMESH OPSTACK COPY SLOT
*
* define used.
* Copies data from one slot to another.
*/
void _bmo_slot_copy(
BMOpSlot slot_args_src[BMO_OP_MAX_SLOTS], const char *slot_name_src,
BMOpSlot slot_args_dst[BMO_OP_MAX_SLOTS], const char *slot_name_dst,
struct MemArena *arena_dst)
{
BMOpSlot *slot_src = BMO_slot_get(slot_args_src, slot_name_src);
BMOpSlot *slot_dst = BMO_slot_get(slot_args_dst, slot_name_dst);
if (slot_src == slot_dst)
return;
BLI_assert(slot_src->slot_type == slot_dst->slot_type);
if (slot_src->slot_type != slot_dst->slot_type) {
return;
}
if (slot_dst->slot_type == BMO_OP_SLOT_ELEMENT_BUF) {
/* do buffer copy */
slot_dst->data.buf = NULL;
slot_dst->len = slot_src->len;
if (slot_dst->len) {
/* check dest has all flags enabled that the source has */
const eBMOpSlotSubType_Elem src_elem_flag = (slot_src->slot_subtype.elem & BM_ALL_NOLOOP);
const eBMOpSlotSubType_Elem dst_elem_flag = (slot_dst->slot_subtype.elem & BM_ALL_NOLOOP);
if ((src_elem_flag | dst_elem_flag) == dst_elem_flag) {
/* pass */
}
else {
/* check types */
const uint tot = slot_src->len;
uint i;
uint out = 0;
BMElem **ele_src = (BMElem **)slot_src->data.buf;
for (i = 0; i < tot; i++, ele_src++) {
if ((*ele_src)->head.htype & dst_elem_flag) {
out++;
}
}
if (out != tot) {
slot_dst->len = out;
}
}
if (slot_dst->len) {
const int slot_alloc_size = BMO_OPSLOT_TYPEINFO[slot_dst->slot_type] * slot_dst->len;
slot_dst->data.buf = BLI_memarena_alloc(arena_dst, slot_alloc_size);
if (slot_src->len == slot_dst->len) {
memcpy(slot_dst->data.buf, slot_src->data.buf, slot_alloc_size);
}
else {
/* only copy compatible elements */
const uint tot = slot_src->len;
uint i;
BMElem **ele_src = (BMElem **)slot_src->data.buf;
BMElem **ele_dst = (BMElem **)slot_dst->data.buf;
for (i = 0; i < tot; i++, ele_src++) {
if ((*ele_src)->head.htype & dst_elem_flag) {
*ele_dst = *ele_src;
ele_dst++;
}
}
}
}
}
}
else if (slot_dst->slot_type == BMO_OP_SLOT_MAPPING) {
GHashIterator gh_iter;
GHASH_ITER (gh_iter, slot_src->data.ghash) {
void *key = BLI_ghashIterator_getKey(&gh_iter);
void *val = BLI_ghashIterator_getValue(&gh_iter);
BLI_ghash_insert(slot_dst->data.ghash, key, val);
}
}
else {
slot_dst->data = slot_src->data;
}
}
/*
* BMESH OPSTACK SET XXX
*
* Sets the value of a slot depending on it's type
*/
void BMO_slot_float_set(BMOpSlot slot_args[BMO_OP_MAX_SLOTS], const char *slot_name, const float f)
{
BMOpSlot *slot = BMO_slot_get(slot_args, slot_name);
BLI_assert(slot->slot_type == BMO_OP_SLOT_FLT);
if (!(slot->slot_type == BMO_OP_SLOT_FLT))
return;
slot->data.f = f;
}
void BMO_slot_int_set(BMOpSlot slot_args[BMO_OP_MAX_SLOTS], const char *slot_name, const int i)
{
BMOpSlot *slot = BMO_slot_get(slot_args, slot_name);
BLI_assert(slot->slot_type == BMO_OP_SLOT_INT);
if (!(slot->slot_type == BMO_OP_SLOT_INT))
return;
slot->data.i = i;
}
void BMO_slot_bool_set(BMOpSlot slot_args[BMO_OP_MAX_SLOTS], const char *slot_name, const bool i)
{
BMOpSlot *slot = BMO_slot_get(slot_args, slot_name);
BLI_assert(slot->slot_type == BMO_OP_SLOT_BOOL);
if (!(slot->slot_type == BMO_OP_SLOT_BOOL))
return;
slot->data.i = i;
}
/* only supports square mats */
void BMO_slot_mat_set(BMOperator *op, BMOpSlot slot_args[BMO_OP_MAX_SLOTS], const char *slot_name, const float *mat, int size)
{
BMOpSlot *slot = BMO_slot_get(slot_args, slot_name);
BLI_assert(slot->slot_type == BMO_OP_SLOT_MAT);
if (!(slot->slot_type == BMO_OP_SLOT_MAT))
return;
slot->len = 4;
slot->data.p = BLI_memarena_alloc(op->arena, sizeof(float) * 4 * 4);
if (size == 4) {
copy_m4_m4(slot->data.p, (float (*)[4])mat);
}
else if (size == 3) {
copy_m4_m3(slot->data.p, (float (*)[3])mat);
}
else {
fprintf(stderr, "%s: invalid size argument %d (bmesh internal error)\n", __func__, size);
zero_m4(slot->data.p);
}
}
void BMO_slot_mat4_get(BMOpSlot slot_args[BMO_OP_MAX_SLOTS], const char *slot_name, float r_mat[4][4])
{
BMOpSlot *slot = BMO_slot_get(slot_args, slot_name);
BLI_assert(slot->slot_type == BMO_OP_SLOT_MAT);
if (!(slot->slot_type == BMO_OP_SLOT_MAT))
return;
if (slot->data.p) {
copy_m4_m4(r_mat, BMO_SLOT_AS_MATRIX(slot));
}
else {
unit_m4(r_mat);
}
}
void BMO_slot_mat3_get(BMOpSlot slot_args[BMO_OP_MAX_SLOTS], const char *slot_name, float r_mat[3][3])
{
BMOpSlot *slot = BMO_slot_get(slot_args, slot_name);
BLI_assert(slot->slot_type == BMO_OP_SLOT_MAT);
if (!(slot->slot_type == BMO_OP_SLOT_MAT))
return;
if (slot->data.p) {
copy_m3_m4(r_mat, BMO_SLOT_AS_MATRIX(slot));
}
else {
unit_m3(r_mat);
}
}
void BMO_slot_ptr_set(BMOpSlot slot_args[BMO_OP_MAX_SLOTS], const char *slot_name, void *p)
{
BMOpSlot *slot = BMO_slot_get(slot_args, slot_name);
BLI_assert(slot->slot_type == BMO_OP_SLOT_PTR);
if (!(slot->slot_type == BMO_OP_SLOT_PTR))
return;
slot->data.p = p;
}
void BMO_slot_vec_set(BMOpSlot slot_args[BMO_OP_MAX_SLOTS], const char *slot_name, const float vec[3])
{
BMOpSlot *slot = BMO_slot_get(slot_args, slot_name);
BLI_assert(slot->slot_type == BMO_OP_SLOT_VEC);
if (!(slot->slot_type == BMO_OP_SLOT_VEC))
return;
copy_v3_v3(slot->data.vec, vec);
}
float BMO_slot_float_get(BMOpSlot slot_args[BMO_OP_MAX_SLOTS], const char *slot_name)
{
BMOpSlot *slot = BMO_slot_get(slot_args, slot_name);
BLI_assert(slot->slot_type == BMO_OP_SLOT_FLT);
if (!(slot->slot_type == BMO_OP_SLOT_FLT))
return 0.0f;
return slot->data.f;
}
int BMO_slot_int_get(BMOpSlot slot_args[BMO_OP_MAX_SLOTS], const char *slot_name)
{
BMOpSlot *slot = BMO_slot_get(slot_args, slot_name);
BLI_assert(slot->slot_type == BMO_OP_SLOT_INT);
if (!(slot->slot_type == BMO_OP_SLOT_INT))
return 0;
return slot->data.i;
}
bool BMO_slot_bool_get(BMOpSlot slot_args[BMO_OP_MAX_SLOTS], const char *slot_name)
{
BMOpSlot *slot = BMO_slot_get(slot_args, slot_name);
BLI_assert(slot->slot_type == BMO_OP_SLOT_BOOL);
if (!(slot->slot_type == BMO_OP_SLOT_BOOL))
return 0;
return slot->data.i;
}
/* if you want a copy of the elem buffer */
void *BMO_slot_as_arrayN(BMOpSlot slot_args[BMO_OP_MAX_SLOTS], const char *slot_name, int *len)
{
BMOpSlot *slot = BMO_slot_get(slot_args, slot_name);
void **ret;
/* could add support for mapping type */
BLI_assert(slot->slot_type == BMO_OP_SLOT_ELEMENT_BUF);
ret = MEM_mallocN(sizeof(void *) * slot->len, __func__);
memcpy(ret, slot->data.buf, sizeof(void *) * slot->len);
*len = slot->len;
return ret;
}
void *BMO_slot_ptr_get(BMOpSlot slot_args[BMO_OP_MAX_SLOTS], const char *slot_name)
{
BMOpSlot *slot = BMO_slot_get(slot_args, slot_name);
BLI_assert(slot->slot_type == BMO_OP_SLOT_PTR);
if (!(slot->slot_type == BMO_OP_SLOT_PTR))
return NULL;
return slot->data.p;
}
void BMO_slot_vec_get(BMOpSlot slot_args[BMO_OP_MAX_SLOTS], const char *slot_name, float r_vec[3])
{
BMOpSlot *slot = BMO_slot_get(slot_args, slot_name);
BLI_assert(slot->slot_type == BMO_OP_SLOT_VEC);
if (!(slot->slot_type == BMO_OP_SLOT_VEC))
return;
copy_v3_v3(r_vec, slot->data.vec);
}
/*
* BMO_COUNTFLAG
*
* Counts the number of elements of a certain type that have a
* specific flag enabled (or disabled if test_for_enabled is false).
*
*/
static int bmo_mesh_flag_count(
BMesh *bm, const char htype, const short oflag,
const bool test_for_enabled)
{
int count_vert = 0, count_edge = 0, count_face = 0;
#pragma omp parallel sections if ((bm->totvert + bm->totedge + bm->totface >= BM_OMP_LIMIT) && \
(ELEM(htype, BM_VERT, BM_EDGE, BM_FACE) == 0))
{
#pragma omp section
if (htype & BM_VERT) {
BMIter iter;
BMVert *ele;
BM_ITER_MESH (ele, &iter, bm, BM_VERTS_OF_MESH) {
if (BMO_vert_flag_test_bool(bm, ele, oflag) == test_for_enabled) {
count_vert++;
}
}
}
#pragma omp section
if (htype & BM_EDGE) {
BMIter iter;
BMEdge *ele;
BM_ITER_MESH (ele, &iter, bm, BM_EDGES_OF_MESH) {
if (BMO_edge_flag_test_bool(bm, ele, oflag) == test_for_enabled) {
count_edge++;
}
}
}
#pragma omp section
if (htype & BM_FACE) {
BMIter iter;
BMFace *ele;
BM_ITER_MESH (ele, &iter, bm, BM_FACES_OF_MESH) {
if (BMO_face_flag_test_bool(bm, ele, oflag) == test_for_enabled) {
count_face++;
}
}
}
}
return (count_vert + count_edge + count_face);
}
int BMO_mesh_enabled_flag_count(BMesh *bm, const char htype, const short oflag)
{
return bmo_mesh_flag_count(bm, htype, oflag, true);
}
int BMO_mesh_disabled_flag_count(BMesh *bm, const char htype, const short oflag)
{
return bmo_mesh_flag_count(bm, htype, oflag, false);
}
void BMO_mesh_flag_disable_all(BMesh *bm, BMOperator *UNUSED(op), const char htype, const short oflag)
{
#pragma omp parallel sections if ((bm->totvert + bm->totedge + bm->totface >= BM_OMP_LIMIT) && \
(ELEM(htype, BM_VERT, BM_EDGE, BM_FACE) == 0))
{
#pragma omp section
if (htype & BM_VERT) {
BMIter iter;
BMVert *ele;
BM_ITER_MESH (ele, &iter, bm, BM_VERTS_OF_MESH) {
BMO_vert_flag_disable(bm, ele, oflag);
}
}
#pragma omp section
if (htype & BM_EDGE) {
BMIter iter;
BMEdge *ele;
BM_ITER_MESH (ele, &iter, bm, BM_EDGES_OF_MESH) {
BMO_edge_flag_disable(bm, ele, oflag);
}
}
#pragma omp section
if (htype & BM_FACE) {
BMIter iter;
BMFace *ele;
BM_ITER_MESH (ele, &iter, bm, BM_FACES_OF_MESH) {
BMO_face_flag_disable(bm, ele, oflag);
}
}
}
}
void BMO_mesh_selected_remap(
BMesh *bm,
BMOpSlot *slot_vert_map,
BMOpSlot *slot_edge_map,
BMOpSlot *slot_face_map,
const bool check_select)
{
if (bm->selected.first) {
BMEditSelection *ese, *ese_next;
BMOpSlot *slot_elem_map;
for (ese = bm->selected.first; ese; ese = ese_next) {
ese_next = ese->next;
switch (ese->htype) {
case BM_VERT: slot_elem_map = slot_vert_map; break;
case BM_EDGE: slot_elem_map = slot_edge_map; break;
default: slot_elem_map = slot_face_map; break;
}
ese->ele = BMO_slot_map_elem_get(slot_elem_map, ese->ele);
if (UNLIKELY((ese->ele == NULL) ||
(check_select && (BM_elem_flag_test(ese->ele, BM_ELEM_SELECT) == false))))
{
BLI_remlink(&bm->selected, ese);
MEM_freeN(ese);
}
}
}
if (bm->act_face) {
BMFace *f = BMO_slot_map_elem_get(slot_face_map, bm->act_face);
if (f) {
bm->act_face = f;
}
}
}
int BMO_slot_buffer_count(BMOpSlot slot_args[BMO_OP_MAX_SLOTS], const char *slot_name)
{
BMOpSlot *slot = BMO_slot_get(slot_args, slot_name);
BLI_assert(slot->slot_type == BMO_OP_SLOT_ELEMENT_BUF);
/* check if its actually a buffer */
if (slot->slot_type != BMO_OP_SLOT_ELEMENT_BUF)
return 0;
return slot->len;
}
int BMO_slot_map_count(BMOpSlot slot_args[BMO_OP_MAX_SLOTS], const char *slot_name)
{
BMOpSlot *slot = BMO_slot_get(slot_args, slot_name);
BLI_assert(slot->slot_type == BMO_OP_SLOT_MAPPING);
return BLI_ghash_size(slot->data.ghash);
}
/* inserts a key/value mapping into a mapping slot. note that it copies the
* value, it doesn't store a reference to it. */
void BMO_slot_map_insert(
BMOperator *op, BMOpSlot *slot,
const void *element, const void *data)
{
(void) op; /* Ignored in release builds. */
BLI_assert(slot->slot_type == BMO_OP_SLOT_MAPPING);
BMO_ASSERT_SLOT_IN_OP(slot, op);
BLI_ghash_insert(slot->data.ghash, (void *)element, (void *)data);
}
#if 0
void *bmo_slot_buffer_grow(BMesh *bm, BMOperator *op, int slot_code, int totadd)
{
BMOpSlot *slot = &op->slots[slot_code];
void *tmp;
ssize_t allocsize;
BLI_assert(slot->slottype == BMO_OP_SLOT_ELEMENT_BUF);
/* check if its actually a buffer */
if (slot->slottype != BMO_OP_SLOT_ELEMENT_BUF)
return NULL;
if (slot->flag & BMOS_DYNAMIC_ARRAY) {
if (slot->len >= slot->size) {
slot->size = (slot->size + 1 + totadd) * 2;
allocsize = BMO_OPSLOT_TYPEINFO[bmo_opdefines[op->type]->slot_types[slot_code].type] * slot->size;
tmp = slot->data.buf;
slot->data.buf = MEM_callocN(allocsize, "opslot dynamic array");
memcpy(slot->data.buf, tmp, allocsize);
MEM_freeN(tmp);
}
slot->len += totadd;
}
else {
slot->flag |= BMOS_DYNAMIC_ARRAY;
slot->len += totadd;
slot->size = slot->len + 2;
allocsize = BMO_OPSLOT_TYPEINFO[bmo_opdefines[op->type]->slot_types[slot_code].type] * slot->len;
tmp = slot->data.buf;
slot->data.buf = MEM_callocN(allocsize, "opslot dynamic array");
memcpy(slot->data.buf, tmp, allocsize);
}
return slot->data.buf;
}
#endif
void BMO_slot_map_to_flag(
BMesh *bm, BMOpSlot slot_args[BMO_OP_MAX_SLOTS], const char *slot_name,
const char htype, const short oflag)
{
GHashIterator gh_iter;
BMOpSlot *slot = BMO_slot_get(slot_args, slot_name);
BMElemF *ele_f;
BLI_assert(slot->slot_type == BMO_OP_SLOT_MAPPING);
GHASH_ITER (gh_iter, slot->data.ghash) {
ele_f = BLI_ghashIterator_getKey(&gh_iter);
if (ele_f->head.htype & htype) {
BMO_elem_flag_enable(bm, ele_f, oflag);
}
}
}
void *BMO_slot_buffer_alloc(BMOperator *op, BMOpSlot slot_args[BMO_OP_MAX_SLOTS], const char *slot_name, const int len)
{
BMOpSlot *slot = BMO_slot_get(slot_args, slot_name);
/* check if its actually a buffer */
if (slot->slot_type != BMO_OP_SLOT_ELEMENT_BUF)
return NULL;
slot->len = len;
if (len) {
slot->data.buf = BLI_memarena_alloc(op->arena, BMO_OPSLOT_TYPEINFO[slot->slot_type] * len);
}
else {
slot->data.buf = NULL;
}
return slot->data.buf;
}
/**
* \brief BMO_ALL_TO_SLOT
*
* Copies all elements of a certain type into an operator slot.
*/
void BMO_slot_buffer_from_all(
BMesh *bm, BMOperator *op, BMOpSlot slot_args[BMO_OP_MAX_SLOTS],
const char *slot_name, const char htype)
{
BMOpSlot *output = BMO_slot_get(slot_args, slot_name);
int totelement = 0, i = 0;
BLI_assert(output->slot_type == BMO_OP_SLOT_ELEMENT_BUF);
BLI_assert(((output->slot_subtype.elem & BM_ALL_NOLOOP) & htype) == htype);
if (htype & BM_VERT) totelement += bm->totvert;
if (htype & BM_EDGE) totelement += bm->totedge;
if (htype & BM_FACE) totelement += bm->totface;
if (totelement) {
BMIter iter;
BMHeader *ele;
BMO_slot_buffer_alloc(op, slot_args, slot_name, totelement);
/* TODO - collapse these loops into one */
if (htype & BM_VERT) {
BM_ITER_MESH (ele, &iter, bm, BM_VERTS_OF_MESH) {
output->data.buf[i] = ele;
i++;
}
}
if (htype & BM_EDGE) {
BM_ITER_MESH (ele, &iter, bm, BM_EDGES_OF_MESH) {
output->data.buf[i] = ele;
i++;
}
}
if (htype & BM_FACE) {
BM_ITER_MESH (ele, &iter, bm, BM_FACES_OF_MESH) {
output->data.buf[i] = ele;
i++;
}
}
}
}
/**
* \brief BMO_HEADERFLAG_TO_SLOT
*
* Copies elements of a certain type, which have a certain header flag
* enabled/disabled into a slot for an operator.
*/
static void bmo_slot_buffer_from_hflag(
BMesh *bm, BMOperator *op, BMOpSlot slot_args[BMO_OP_MAX_SLOTS], const char *slot_name,
const char htype, const char hflag,
const bool test_for_enabled)
{
BMOpSlot *output = BMO_slot_get(slot_args, slot_name);
int totelement = 0, i = 0;
const bool respecthide = ((op->flag & BMO_FLAG_RESPECT_HIDE) != 0) && ((hflag & BM_ELEM_HIDDEN) == 0);
BLI_assert(output->slot_type == BMO_OP_SLOT_ELEMENT_BUF);
BLI_assert(((output->slot_subtype.elem & BM_ALL_NOLOOP) & htype) == htype);
BLI_assert((output->slot_subtype.elem & BMO_OP_SLOT_SUBTYPE_ELEM_IS_SINGLE) == 0);
if (test_for_enabled)
totelement = BM_mesh_elem_hflag_count_enabled(bm, htype, hflag, respecthide);
else
totelement = BM_mesh_elem_hflag_count_disabled(bm, htype, hflag, respecthide);
if (totelement) {
BMIter iter;
BMElem *ele;
BMO_slot_buffer_alloc(op, slot_args, slot_name, totelement);
/* TODO - collapse these loops into one */
if (htype & BM_VERT) {
BM_ITER_MESH (ele, &iter, bm, BM_VERTS_OF_MESH) {
if ((!respecthide || !BM_elem_flag_test(ele, BM_ELEM_HIDDEN)) &&
BM_elem_flag_test_bool(ele, hflag) == test_for_enabled)
{
output->data.buf[i] = ele;
i++;
}
}
}
if (htype & BM_EDGE) {
BM_ITER_MESH (ele, &iter, bm, BM_EDGES_OF_MESH) {
if ((!respecthide || !BM_elem_flag_test(ele, BM_ELEM_HIDDEN)) &&
BM_elem_flag_test_bool(ele, hflag) == test_for_enabled)
{
output->data.buf[i] = ele;
i++;
}
}
}
if (htype & BM_FACE) {
BM_ITER_MESH (ele, &iter, bm, BM_FACES_OF_MESH) {
if ((!respecthide || !BM_elem_flag_test(ele, BM_ELEM_HIDDEN)) &&
BM_elem_flag_test_bool(ele, hflag) == test_for_enabled)
{
output->data.buf[i] = ele;
i++;
}
}
}
}
else {
output->len = 0;
}
}
void BMO_slot_buffer_from_enabled_hflag(
BMesh *bm, BMOperator *op,
BMOpSlot slot_args[BMO_OP_MAX_SLOTS], const char *slot_name,
const char htype, const char hflag)
{
bmo_slot_buffer_from_hflag(bm, op, slot_args, slot_name, htype, hflag, true);
}
void BMO_slot_buffer_from_disabled_hflag(
BMesh *bm, BMOperator *op,
BMOpSlot slot_args[BMO_OP_MAX_SLOTS], const char *slot_name,
const char htype, const char hflag)
{
bmo_slot_buffer_from_hflag(bm, op, slot_args, slot_name, htype, hflag, false);
}
void BMO_slot_buffer_from_single(BMOperator *op, BMOpSlot *slot, BMHeader *ele)
{
BMO_ASSERT_SLOT_IN_OP(slot, op);
BLI_assert(slot->slot_type == BMO_OP_SLOT_ELEMENT_BUF);
BLI_assert(slot->slot_subtype.elem & BMO_OP_SLOT_SUBTYPE_ELEM_IS_SINGLE);
BLI_assert(slot->len == 0 || slot->len == 1);
BLI_assert(slot->slot_subtype.elem & ele->htype);
slot->data.buf = BLI_memarena_alloc(op->arena, sizeof(void *) * 4); /* XXX, why 'x4' ? */
slot->len = 1;
*slot->data.buf = ele;
}
void BMO_slot_buffer_from_array(BMOperator *op, BMOpSlot *slot, BMHeader **ele_buffer, int ele_buffer_len)
{
BMO_ASSERT_SLOT_IN_OP(slot, op);
BLI_assert(slot->slot_type == BMO_OP_SLOT_ELEMENT_BUF);
BLI_assert(slot->len == 0 || slot->len == ele_buffer_len);
if (slot->data.buf == NULL) {
slot->data.buf = BLI_memarena_alloc(op->arena, sizeof(*slot->data.buf) * ele_buffer_len);
}
slot->len = ele_buffer_len;
memcpy(slot->data.buf, ele_buffer, ele_buffer_len * sizeof(*slot->data.buf));
}
void *BMO_slot_buffer_get_single(BMOpSlot *slot)
{
BLI_assert(slot->slot_type == BMO_OP_SLOT_ELEMENT_BUF);
BLI_assert(slot->slot_subtype.elem & BMO_OP_SLOT_SUBTYPE_ELEM_IS_SINGLE);
BLI_assert(slot->len == 0 || slot->len == 1);
return slot->len ? (BMHeader *)slot->data.buf[0] : NULL;
}
/**
* Copies the values from another slot to the end of the output slot.
*/
void _bmo_slot_buffer_append(
BMOpSlot slot_args_dst[BMO_OP_MAX_SLOTS], const char *slot_name_dst,
BMOpSlot slot_args_src[BMO_OP_MAX_SLOTS], const char *slot_name_src,
struct MemArena *arena_dst)
{
BMOpSlot *slot_dst = BMO_slot_get(slot_args_dst, slot_name_dst);
BMOpSlot *slot_src = BMO_slot_get(slot_args_src, slot_name_src);
BLI_assert(slot_dst->slot_type == BMO_OP_SLOT_ELEMENT_BUF &&
slot_src->slot_type == BMO_OP_SLOT_ELEMENT_BUF);
if (slot_dst->len == 0) {
/* output slot is empty, copy rather than append */
_bmo_slot_copy(slot_args_src, slot_name_src,
slot_args_dst, slot_name_dst,
arena_dst);
}
else if (slot_src->len != 0) {
int elem_size = BMO_OPSLOT_TYPEINFO[slot_dst->slot_type];
int alloc_size = elem_size * (slot_dst->len + slot_src->len);
/* allocate new buffer */
void *buf = BLI_memarena_alloc(arena_dst, alloc_size);
/* copy slot data */
memcpy(buf, slot_dst->data.buf, elem_size * slot_dst->len);
memcpy(((char *)buf) + elem_size * slot_dst->len, slot_src->data.buf, elem_size * slot_src->len);
slot_dst->data.buf = buf;
slot_dst->len += slot_src->len;
}
}
/**
* \brief BMO_FLAG_TO_SLOT
*
* Copies elements of a certain type, which have a certain flag set
* into an output slot for an operator.
*/
static void bmo_slot_buffer_from_flag(
BMesh *bm, BMOperator *op,
BMOpSlot slot_args[BMO_OP_MAX_SLOTS], const char *slot_name,
const char htype, const short oflag,
const bool test_for_enabled)
{
BMOpSlot *slot = BMO_slot_get(slot_args, slot_name);
int totelement, i = 0;
BLI_assert(op->slots_in == slot_args || op->slots_out == slot_args);
BLI_assert(slot->slot_type == BMO_OP_SLOT_ELEMENT_BUF);
BLI_assert(((slot->slot_subtype.elem & BM_ALL_NOLOOP) & htype) == htype);
BLI_assert((slot->slot_subtype.elem & BMO_OP_SLOT_SUBTYPE_ELEM_IS_SINGLE) == 0);
if (test_for_enabled)
totelement = BMO_mesh_enabled_flag_count(bm, htype, oflag);
else
totelement = BMO_mesh_disabled_flag_count(bm, htype, oflag);
if (totelement) {
BMIter iter;
BMHeader *ele;
BMHeader **ele_array;
BMO_slot_buffer_alloc(op, slot_args, slot_name, totelement);
ele_array = (BMHeader **)slot->data.buf;
/* TODO - collapse these loops into one */
if (htype & BM_VERT) {
BM_ITER_MESH (ele, &iter, bm, BM_VERTS_OF_MESH) {
if (BMO_vert_flag_test_bool(bm, (BMVert *)ele, oflag) == test_for_enabled) {
ele_array[i] = ele;
i++;
}
}
}
if (htype & BM_EDGE) {
BM_ITER_MESH (ele, &iter, bm, BM_EDGES_OF_MESH) {
if (BMO_edge_flag_test_bool(bm, (BMEdge *)ele, oflag) == test_for_enabled) {
ele_array[i] = ele;
i++;
}
}
}
if (htype & BM_FACE) {
BM_ITER_MESH (ele, &iter, bm, BM_FACES_OF_MESH) {
if (BMO_face_flag_test_bool(bm, (BMFace *)ele, oflag) == test_for_enabled) {
ele_array[i] = ele;
i++;
}
}
}
}
else {
slot->len = 0;
}
}
void BMO_slot_buffer_from_enabled_flag(
BMesh *bm, BMOperator *op,
BMOpSlot slot_args[BMO_OP_MAX_SLOTS], const char *slot_name,
const char htype, const short oflag)
{
bmo_slot_buffer_from_flag(bm, op, slot_args, slot_name, htype, oflag, true);
}
void BMO_slot_buffer_from_disabled_flag(
BMesh *bm, BMOperator *op,
BMOpSlot slot_args[BMO_OP_MAX_SLOTS], const char *slot_name,
const char htype, const short oflag)
{
bmo_slot_buffer_from_flag(bm, op, slot_args, slot_name, htype, oflag, false);
}
/**
* \brief BMO_FLAG_BUFFER
*
* Header Flags elements in a slots buffer, automatically
* using the selection API where appropriate.
*/
void BMO_slot_buffer_hflag_enable(
BMesh *bm,
BMOpSlot slot_args[BMO_OP_MAX_SLOTS], const char *slot_name,
const char htype, const char hflag, const bool do_flush)
{
BMOpSlot *slot = BMO_slot_get(slot_args, slot_name);
BMElem **data = (BMElem **)slot->data.buf;
int i;
const bool do_flush_select = (do_flush && (hflag & BM_ELEM_SELECT));
const bool do_flush_hide = (do_flush && (hflag & BM_ELEM_HIDDEN));
BLI_assert(slot->slot_type == BMO_OP_SLOT_ELEMENT_BUF);
BLI_assert(((slot->slot_subtype.elem & BM_ALL_NOLOOP) & htype) == htype);
BLI_assert((slot->slot_subtype.elem & BMO_OP_SLOT_SUBTYPE_ELEM_IS_SINGLE) == 0);
for (i = 0; i < slot->len; i++, data++) {
if (!(htype & (*data)->head.htype))
continue;
if (do_flush_select) {
BM_elem_select_set(bm, *data, true);
}
if (do_flush_hide) {
BM_elem_hide_set(bm, *data, false);
}
BM_elem_flag_enable(*data, hflag);
}
}
/**
* \brief BMO_FLAG_BUFFER
*
* Removes flags from elements in a slots buffer, automatically
* using the selection API where appropriate.
*/
void BMO_slot_buffer_hflag_disable(
BMesh *bm,
BMOpSlot slot_args[BMO_OP_MAX_SLOTS], const char *slot_name,
const char htype, const char hflag, const bool do_flush)
{
BMOpSlot *slot = BMO_slot_get(slot_args, slot_name);
BMElem **data = (BMElem **)slot->data.buf;
int i;
const bool do_flush_select = (do_flush && (hflag & BM_ELEM_SELECT));
const bool do_flush_hide = (do_flush && (hflag & BM_ELEM_HIDDEN));
BLI_assert(slot->slot_type == BMO_OP_SLOT_ELEMENT_BUF);
BLI_assert(((slot->slot_subtype.elem & BM_ALL_NOLOOP) & htype) == htype);
for (i = 0; i < slot->len; i++, data++) {
if (!(htype & (*data)->head.htype))
continue;
if (do_flush_select) {
BM_elem_select_set(bm, *data, false);
}
if (do_flush_hide) {
BM_elem_hide_set(bm, *data, false);
}
BM_elem_flag_disable(*data, hflag);
}
}
/**
* \brief BMO_FLAG_BUFFER
*
* Flags elements in a slots buffer
*/
void BMO_slot_buffer_flag_enable(
BMesh *bm,
BMOpSlot slot_args[BMO_OP_MAX_SLOTS], const char *slot_name,
const char htype, const short oflag)
{
BMOpSlot *slot = BMO_slot_get(slot_args, slot_name);
BMHeader **data = slot->data.p;
int i;
BLI_assert(slot->slot_type == BMO_OP_SLOT_ELEMENT_BUF);
BLI_assert(((slot->slot_subtype.elem & BM_ALL_NOLOOP) & htype) == htype);
for (i = 0; i < slot->len; i++) {
if (!(htype & data[i]->htype))
continue;
BMO_elem_flag_enable(bm, (BMElemF *)data[i], oflag);
}
}
/**
* \brief BMO_FLAG_BUFFER
*
* Removes flags from elements in a slots buffer
*/
void BMO_slot_buffer_flag_disable(
BMesh *bm,
BMOpSlot slot_args[BMO_OP_MAX_SLOTS], const char *slot_name,
const char htype, const short oflag)
{
BMOpSlot *slot = BMO_slot_get(slot_args, slot_name);
BMHeader **data = (BMHeader **)slot->data.buf;
int i;
BLI_assert(slot->slot_type == BMO_OP_SLOT_ELEMENT_BUF);
BLI_assert(((slot->slot_subtype.elem & BM_ALL_NOLOOP) & htype) == htype);
for (i = 0; i < slot->len; i++) {
if (!(htype & data[i]->htype))
continue;
BMO_elem_flag_disable(bm, (BMElemF *)data[i], oflag);
}
}
/**
* \brief ALLOC/FREE FLAG LAYER
*
* Used by operator stack to free/allocate
* private flag data. This is allocated
* using a mempool so the allocation/frees
* should be quite fast.
*
* BMESH_TODO:
* Investigate not freeing flag layers until
* all operators have been executed. This would
* save a lot of realloc potentially.
*/
static void bmo_flag_layer_alloc(BMesh *bm)
{
/* set the index values since we are looping over all data anyway,
* may save time later on */
BLI_mempool *voldpool = bm->vtoolflagpool; /* old flag pool */
BLI_mempool *eoldpool = bm->etoolflagpool; /* old flag pool */
BLI_mempool *foldpool = bm->ftoolflagpool; /* old flag pool */
/* store memcpy size for reuse */
const size_t old_totflags_size = (bm->totflags * sizeof(BMFlagLayer));
bm->totflags++;
bm->vtoolflagpool = BLI_mempool_create(sizeof(BMFlagLayer) * bm->totflags, bm->totvert, 512, BLI_MEMPOOL_NOP);
bm->etoolflagpool = BLI_mempool_create(sizeof(BMFlagLayer) * bm->totflags, bm->totedge, 512, BLI_MEMPOOL_NOP);
bm->ftoolflagpool = BLI_mempool_create(sizeof(BMFlagLayer) * bm->totflags, bm->totface, 512, BLI_MEMPOOL_NOP);
#pragma omp parallel sections if (bm->totvert + bm->totedge + bm->totface >= BM_OMP_LIMIT)
{
#pragma omp section
{
BMIter iter;
BMVert_OFlag *ele;
int i;
BLI_mempool *newpool = bm->vtoolflagpool;
/* now go through and memcpy all the flags. Loops don't get a flag layer at this time.. */
BM_ITER_MESH_INDEX (ele, &iter, bm, BM_VERTS_OF_MESH, i) {
void *oldflags = ele->oflags;
ele->oflags = BLI_mempool_calloc(newpool);
memcpy(ele->oflags, oldflags, old_totflags_size);
BM_elem_index_set(&ele->base, i); /* set_inline */
BM_ELEM_API_FLAG_CLEAR((BMElemF *)ele);
}
}
#pragma omp section
{
BMIter iter;
BMEdge_OFlag *ele;
int i;
BLI_mempool *newpool = bm->etoolflagpool;
BM_ITER_MESH_INDEX (ele, &iter, bm, BM_EDGES_OF_MESH, i) {
void *oldflags = ele->oflags;
ele->oflags = BLI_mempool_calloc(newpool);
memcpy(ele->oflags, oldflags, old_totflags_size);
BM_elem_index_set(&ele->base, i); /* set_inline */
BM_ELEM_API_FLAG_CLEAR((BMElemF *)ele);
}
}
#pragma omp section
{
BMIter iter;
BMFace_OFlag *ele;
int i;
BLI_mempool *newpool = bm->ftoolflagpool;
BM_ITER_MESH_INDEX (ele, &iter, bm, BM_FACES_OF_MESH, i) {
void *oldflags = ele->oflags;
ele->oflags = BLI_mempool_calloc(newpool);
memcpy(ele->oflags, oldflags, old_totflags_size);
BM_elem_index_set(&ele->base, i); /* set_inline */
BM_ELEM_API_FLAG_CLEAR((BMElemF *)ele);
}
}
}
BLI_mempool_destroy(voldpool);
BLI_mempool_destroy(eoldpool);
BLI_mempool_destroy(foldpool);
bm->elem_index_dirty &= ~(BM_VERT | BM_EDGE | BM_FACE);
}
static void bmo_flag_layer_free(BMesh *bm)
{
/* set the index values since we are looping over all data anyway,
* may save time later on */
BLI_mempool *voldpool = bm->vtoolflagpool;
BLI_mempool *eoldpool = bm->etoolflagpool;
BLI_mempool *foldpool = bm->ftoolflagpool;
/* store memcpy size for reuse */
const size_t new_totflags_size = ((bm->totflags - 1) * sizeof(BMFlagLayer));
/* de-increment the totflags first.. */
bm->totflags--;
bm->vtoolflagpool = BLI_mempool_create(new_totflags_size, bm->totvert, 512, BLI_MEMPOOL_NOP);
bm->etoolflagpool = BLI_mempool_create(new_totflags_size, bm->totedge, 512, BLI_MEMPOOL_NOP);
bm->ftoolflagpool = BLI_mempool_create(new_totflags_size, bm->totface, 512, BLI_MEMPOOL_NOP);
#pragma omp parallel sections if (bm->totvert + bm->totedge + bm->totface >= BM_OMP_LIMIT)
{
#pragma omp section
{
BMIter iter;
BMVert_OFlag *ele;
int i;
BLI_mempool *newpool = bm->vtoolflagpool;
/* now go through and memcpy all the flag */
BM_ITER_MESH_INDEX (ele, &iter, bm, BM_VERTS_OF_MESH, i) {
void *oldflags = ele->oflags;
ele->oflags = BLI_mempool_alloc(newpool);
memcpy(ele->oflags, oldflags, new_totflags_size);
BM_elem_index_set(&ele->base, i); /* set_inline */
BM_ELEM_API_FLAG_CLEAR((BMElemF *)ele);
}
}
#pragma omp section
{
BMIter iter;
BMEdge_OFlag *ele;
int i;
BLI_mempool *newpool = bm->etoolflagpool;
BM_ITER_MESH_INDEX (ele, &iter, bm, BM_EDGES_OF_MESH, i) {
void *oldflags = ele->oflags;
ele->oflags = BLI_mempool_alloc(newpool);
memcpy(ele->oflags, oldflags, new_totflags_size);
BM_elem_index_set(&ele->base, i); /* set_inline */
BM_ELEM_API_FLAG_CLEAR((BMElemF *)ele);
}
}
#pragma omp section
{
BMIter iter;
BMFace_OFlag *ele;
int i;
BLI_mempool *newpool = bm->ftoolflagpool;
BM_ITER_MESH_INDEX (ele, &iter, bm, BM_FACES_OF_MESH, i) {
void *oldflags = ele->oflags;
ele->oflags = BLI_mempool_alloc(newpool);
memcpy(ele->oflags, oldflags, new_totflags_size);
BM_elem_index_set(&ele->base, i); /* set_inline */
BM_ELEM_API_FLAG_CLEAR((BMElemF *)ele);
}
}
}
BLI_mempool_destroy(voldpool);
BLI_mempool_destroy(eoldpool);
BLI_mempool_destroy(foldpool);
bm->elem_index_dirty &= ~(BM_VERT | BM_EDGE | BM_FACE);
}
static void bmo_flag_layer_clear(BMesh *bm)
{
/* set the index values since we are looping over all data anyway,
* may save time later on */
const BMFlagLayer zero_flag = {0};
const int totflags_offset = bm->totflags - 1;
#pragma omp parallel sections if (bm->totvert + bm->totedge + bm->totface >= BM_OMP_LIMIT)
{
/* now go through and memcpy all the flag */
#pragma omp section
{
BMIter iter;
BMVert_OFlag *ele;
int i;
BM_ITER_MESH_INDEX (ele, &iter, bm, BM_VERTS_OF_MESH, i) {
ele->oflags[totflags_offset] = zero_flag;
BM_elem_index_set(&ele->base, i); /* set_inline */
}
}
#pragma omp section
{
BMIter iter;
BMEdge_OFlag *ele;
int i;
BM_ITER_MESH_INDEX (ele, &iter, bm, BM_EDGES_OF_MESH, i) {
ele->oflags[totflags_offset] = zero_flag;
BM_elem_index_set(&ele->base, i); /* set_inline */
}
}
#pragma omp section
{
BMIter iter;
BMFace_OFlag *ele;
int i;
BM_ITER_MESH_INDEX (ele, &iter, bm, BM_FACES_OF_MESH, i) {
ele->oflags[totflags_offset] = zero_flag;
BM_elem_index_set(&ele->base, i); /* set_inline */
}
}
}
bm->elem_index_dirty &= ~(BM_VERT | BM_EDGE | BM_FACE);
}
void *BMO_slot_buffer_get_first(BMOpSlot slot_args[BMO_OP_MAX_SLOTS], const char *slot_name)
{
BMOpSlot *slot = BMO_slot_get(slot_args, slot_name);
if (slot->slot_type != BMO_OP_SLOT_ELEMENT_BUF)
return NULL;
return slot->data.buf ? *slot->data.buf : NULL;
}
/**
* \brief New Iterator
*
* \param restrictmask restricts the iteration to certain element types
* (e.g. combination of BM_VERT, BM_EDGE, BM_FACE), if iterating
* over an element buffer (not a mapping). */
void *BMO_iter_new(
BMOIter *iter,
BMOpSlot slot_args[BMO_OP_MAX_SLOTS], const char *slot_name,
const char restrictmask)
{
BMOpSlot *slot = BMO_slot_get(slot_args, slot_name);
memset(iter, 0, sizeof(BMOIter));
iter->slot = slot;
iter->cur = 0;
iter->restrictmask = restrictmask;
if (iter->slot->slot_type == BMO_OP_SLOT_MAPPING) {
BLI_ghashIterator_init(&iter->giter, slot->data.ghash);
}
else if (iter->slot->slot_type == BMO_OP_SLOT_ELEMENT_BUF) {
BLI_assert(restrictmask & slot->slot_subtype.elem);
}
else {
BLI_assert(0);
}
return BMO_iter_step(iter);
}
void *BMO_iter_step(BMOIter *iter)
{
BMOpSlot *slot = iter->slot;
if (slot->slot_type == BMO_OP_SLOT_ELEMENT_BUF) {
BMHeader *ele;
if (iter->cur >= slot->len) {
return NULL;
}
ele = slot->data.buf[iter->cur++];
while (!(iter->restrictmask & ele->htype)) {
if (iter->cur >= slot->len) {
return NULL;
}
ele = slot->data.buf[iter->cur++];
BLI_assert((ele == NULL) || (slot->slot_subtype.elem & ele->htype));
}
BLI_assert((ele == NULL) || (slot->slot_subtype.elem & ele->htype));
return ele;
}
else if (slot->slot_type == BMO_OP_SLOT_MAPPING) {
void *ret;
if (BLI_ghashIterator_done(&iter->giter) == false) {
ret = BLI_ghashIterator_getKey(&iter->giter);
iter->val = BLI_ghashIterator_getValue_p(&iter->giter);
BLI_ghashIterator_step(&iter->giter);
}
else {
ret = NULL;
iter->val = NULL;
}
return ret;
}
else {
BLI_assert(0);
}
return NULL;
}
/* used for iterating over mappings */
/**
* Returns a pointer to the key-value when iterating over mappings.
* remember for pointer maps this will be a pointer to a pointer.
*/
void **BMO_iter_map_value_p(BMOIter *iter)
{
return iter->val;
}
void *BMO_iter_map_value_ptr(BMOIter *iter)
{
BLI_assert(ELEM(iter->slot->slot_subtype.map,
BMO_OP_SLOT_SUBTYPE_MAP_ELEM, BMO_OP_SLOT_SUBTYPE_MAP_INTERNAL));
return iter->val ? *iter->val : NULL;
}
float BMO_iter_map_value_float(BMOIter *iter)
{
BLI_assert(iter->slot->slot_subtype.map == BMO_OP_SLOT_SUBTYPE_MAP_FLT);
return **((float **)iter->val);
}
int BMO_iter_map_value_int(BMOIter *iter)
{
BLI_assert(iter->slot->slot_subtype.map == BMO_OP_SLOT_SUBTYPE_MAP_INT);
return **((int **)iter->val);
}
bool BMO_iter_map_value_bool(BMOIter *iter)
{
BLI_assert(iter->slot->slot_subtype.map == BMO_OP_SLOT_SUBTYPE_MAP_BOOL);
return **((bool **)iter->val);
}
/* error system */
typedef struct BMOpError {
struct BMOpError *next, *prev;
int errorcode;
BMOperator *op;
const char *msg;
} BMOpError;
void BMO_error_clear(BMesh *bm)
{
while (BMO_error_pop(bm, NULL, NULL));
}
void BMO_error_raise(BMesh *bm, BMOperator *owner, int errcode, const char *msg)
{
BMOpError *err = MEM_callocN(sizeof(BMOpError), "bmop_error");
err->errorcode = errcode;
if (!msg) {
msg = bmo_error_messages[errcode];
}
err->msg = msg;
err->op = owner;
BLI_addhead(&bm->errorstack, err);
}
bool BMO_error_occurred(BMesh *bm)
{
return (BLI_listbase_is_empty(&bm->errorstack) == false);
}
/* returns error code or 0 if no error */
int BMO_error_get(BMesh *bm, const char **msg, BMOperator **op)
{
BMOpError *err = bm->errorstack.first;
if (!err) {
return 0;
}
if (msg) *msg = err->msg;
if (op) *op = err->op;
return err->errorcode;
}
int BMO_error_pop(BMesh *bm, const char **msg, BMOperator **op)
{
int errorcode = BMO_error_get(bm, msg, op);
if (errorcode) {
BMOpError *err = bm->errorstack.first;
BLI_remlink(&bm->errorstack, bm->errorstack.first);
MEM_freeN(err);
}
return errorcode;
}
#define NEXT_CHAR(fmt) ((fmt)[0] != 0 ? (fmt)[1] : 0)
static int bmo_name_to_slotcode(BMOpSlot slot_args[BMO_OP_MAX_SLOTS], const char *identifier)
{
int i = 0;
while (slot_args->slot_name) {
if (STREQLEN(identifier, slot_args->slot_name, MAX_SLOTNAME)) {
return i;
}
slot_args++;
i++;
}
return -1;
}
static int bmo_name_to_slotcode_check(BMOpSlot slot_args[BMO_OP_MAX_SLOTS], const char *identifier)
{
int i = bmo_name_to_slotcode(slot_args, identifier);
if (i < 0) {
fprintf(stderr, "%s: ! could not find bmesh slot for name %s! (bmesh internal error)\n", __func__, identifier);
}
return i;
}
int BMO_opcode_from_opname(const char *opname)
{
const uint tot = bmo_opdefines_total;
uint i;
for (i = 0; i < tot; i++) {
if (STREQ(bmo_opdefines[i]->opname, opname)) {
return i;
}
}
return -1;
}
static int BMO_opcode_from_opname_check(const char *opname)
{
int i = BMO_opcode_from_opname(opname);
if (i == -1)
fprintf(stderr, "%s: could not find bmesh slot for name %s! (bmesh internal error)\n", __func__, opname);
return i;
}
/**
* \brief Format Strings for #BMOperator Initialization.
*
* This system is used to execute or initialize an operator,
* using a formatted-string system.
*
* The basic format for the format string is:
* `[operatorname] [slot_name]=%[code] [slot_name]=%[code]`
*
* Example:
*
* \code{.c}
* BMO_op_callf(bm, BMO_FLAG_DEFAULTS,
* "delete context=%i geom=%hv",
* DEL_ONLYFACES, BM_ELEM_SELECT);
* \endcode
*
*
* **Primitive Types**
* - `b` - boolean (same as int but 1/0 only). #BMO_OP_SLOT_BOOL
* - `i` - int. #BMO_OP_SLOT_INT
* - `f` - float. #BMO_OP_SLOT_FLT
* - `p` - pointer (normally to a Scene/Mesh/Object/BMesh). #BMO_OP_SLOT_PTR
* - `m3` - 3x3 matrix of floats. #BMO_OP_SLOT_MAT
* - `m4` - 4x4 matrix of floats. #BMO_OP_SLOT_MAT
* - `v` - 3D vector of floats. #BMO_OP_SLOT_VEC
*
*
* **Utility**
*
* Pass an existing slot which is copied to either an input or output slot.
* Taking the operator and slot-name pair of args (BMOperator *, const char *).
* - `s` - slot_in (lower case)
* - `S` - slot_out (upper case)
*
*
* **Element Buffer** (#BMO_OP_SLOT_ELEMENT_BUF)
* - `e` - single element vert/edge/face (use with #BMO_OP_SLOT_SUBTYPE_ELEM_IS_SINGLE).
* - `eb` - elem buffer, take an array and a length.
* - `av` - all verts
* - `ae` - all edges
* - `af` - all faces
* - `hv` - header flagged verts (hflag)
* - `he` - header flagged edges (hflag)
* - `hf` - header flagged faces (hflag)
* - `Hv` - header flagged verts (hflag off)
* - `He` - header flagged edges (hflag off)
* - `Hf` - header flagged faces (hflag off)
* - `fv` - flagged verts (oflag)
* - `fe` - flagged edges (oflag)
* - `ff` - flagged faces (oflag)
* - `Fv` - flagged verts (oflag off)
* - `Fe` - flagged edges (oflag off)
* - `Ff` - flagged faces (oflag off)
*
* \note The common v/e/f suffix can be mixed,
* so `avef` is can be used for all verts, edges and faces.
* Order is not important so `Hfev` is also valid (all unflagged verts, edges and faces).
*/
bool BMO_op_vinitf(BMesh *bm, BMOperator *op, const int flag, const char *_fmt, va_list vlist)
{
// BMOpDefine *def;
char *opname, *ofmt, *fmt;
char slot_name[64] = {0};
int i, type;
bool noslot, state;
/* basic useful info to help find where bmop formatting strings fail */
const char *err_reason = "Unknown";
int lineno = -1;
#define GOTO_ERROR(reason) \
{ \
err_reason = reason; \
lineno = __LINE__; \
goto error; \
} (void)0
/* we muck around in here, so dup it */
fmt = ofmt = BLI_strdup(_fmt);
/* find operator name */
i = strcspn(fmt, " ");
opname = fmt;
noslot = (opname[i] == '\0');
opname[i] = '\0';
fmt += i + (noslot ? 0 : 1);
i = BMO_opcode_from_opname_check(opname);
if (i == -1) {
MEM_freeN(ofmt);
BLI_assert(0);
return false;
}
BMO_op_init(bm, op, flag, opname);
// def = bmo_opdefines[i];
i = 0;
state = true; /* false: not inside slot_code name, true: inside slot_code name */
while (*fmt) {
if (state) {
/* jump past leading whitespace */
i = strspn(fmt, " ");
fmt += i;
/* ignore trailing whitespace */
if (!fmt[i])
break;
/* find end of slot name, only "slot=%f", can be used */
i = strcspn(fmt, "=");
if (!fmt[i]) {
GOTO_ERROR("could not match end of slot name");
}
fmt[i] = 0;
if (bmo_name_to_slotcode_check(op->slots_in, fmt) < 0) {
GOTO_ERROR("name to slot code check failed");
}
BLI_strncpy(slot_name, fmt, sizeof(slot_name));
state = false;
fmt += i;
}
else {
switch (*fmt) {
case ' ':
case '=':
case '%':
break;
case 'm':
{
int size;
const char c = NEXT_CHAR(fmt);
fmt++;
if (c == '3') size = 3;
else if (c == '4') size = 4;
else GOTO_ERROR("matrix size was not 3 or 4");
BMO_slot_mat_set(op, op->slots_in, slot_name, va_arg(vlist, void *), size);
state = true;
break;
}
case 'v':
{
BMO_slot_vec_set(op->slots_in, slot_name, va_arg(vlist, float *));
state = true;
break;
}
case 'e':
{
BMOpSlot *slot = BMO_slot_get(op->slots_in, slot_name);
if (NEXT_CHAR(fmt) == 'b') {
BMHeader **ele_buffer = va_arg(vlist, void *);
int ele_buffer_len = va_arg(vlist, int);
BMO_slot_buffer_from_array(op, slot, ele_buffer, ele_buffer_len);
fmt++;
}
else {
/* single vert/edge/face */
BMHeader *ele = va_arg(vlist, void *);
BMO_slot_buffer_from_single(op, slot, ele);
}
state = true;
break;
}
case 's':
case 'S':
{
BMOperator *op_other = va_arg(vlist, void *);
const char *slot_name_other = va_arg(vlist, char *);
if (*fmt == 's') {
BLI_assert(bmo_name_to_slotcode_check(op_other->slots_in, slot_name_other) != -1);
BMO_slot_copy(op_other, slots_in, slot_name_other,
op, slots_in, slot_name);
}
else {
BLI_assert(bmo_name_to_slotcode_check(op_other->slots_out, slot_name_other) != -1);
BMO_slot_copy(op_other, slots_out, slot_name_other,
op, slots_in, slot_name);
}
state = true;
break;
}
case 'i':
BMO_slot_int_set(op->slots_in, slot_name, va_arg(vlist, int));
state = true;
break;
case 'b':
BMO_slot_bool_set(op->slots_in, slot_name, va_arg(vlist, int));
state = true;
break;
case 'p':
BMO_slot_ptr_set(op->slots_in, slot_name, va_arg(vlist, void *));
state = true;
break;
case 'f':
case 'F':
case 'h':
case 'H':
case 'a':
type = *fmt;
if (NEXT_CHAR(fmt) == ' ' || NEXT_CHAR(fmt) == '\0') {
BMO_slot_float_set(op->slots_in, slot_name, va_arg(vlist, double));
}
else {
char htype = 0;
while (1) {
char htype_set;
const char c = NEXT_CHAR(fmt);
if (c == 'f') htype_set = BM_FACE;
else if (c == 'e') htype_set = BM_EDGE;
else if (c == 'v') htype_set = BM_VERT;
else {
break;
}
if (UNLIKELY(htype & htype_set)) {
GOTO_ERROR("htype duplicated");
}
htype |= htype_set;
fmt++;
}
if (type == 'h') {
BMO_slot_buffer_from_enabled_hflag(bm, op, op->slots_in, slot_name, htype, va_arg(vlist, int));
}
else if (type == 'H') {
BMO_slot_buffer_from_disabled_hflag(bm, op, op->slots_in, slot_name, htype, va_arg(vlist, int));
}
else if (type == 'a') {
BMO_slot_buffer_from_all(bm, op, op->slots_in, slot_name, htype);
}
else if (type == 'f') {
BMO_slot_buffer_from_enabled_flag(bm, op, op->slots_in, slot_name, htype, va_arg(vlist, int));
}
else if (type == 'F') {
BMO_slot_buffer_from_disabled_flag(bm, op, op->slots_in, slot_name, htype, va_arg(vlist, int));
}
}
state = true;
break;
default:
fprintf(stderr,
"%s: unrecognized bmop format char: '%c', %d in '%s'\n",
__func__, *fmt, (int)(fmt - ofmt), ofmt);
break;
}
}
fmt++;
}
MEM_freeN(ofmt);
return true;
error:
/* non urgent todo - explain exactly what is failing */
fprintf(stderr, "%s: error parsing formatting string\n", __func__);
fprintf(stderr, "string: '%s', position %d\n", _fmt, (int)(fmt - ofmt));
fprintf(stderr, " ");
{
int pos = (int)(fmt - ofmt);
for (i = 0; i < pos; i++) {
fprintf(stderr, " ");
}
fprintf(stderr, "^\n");
}
fprintf(stderr, "source code: %s:%d\n", __FILE__, lineno);
fprintf(stderr, "reason: %s\n", err_reason);
MEM_freeN(ofmt);
BMO_op_finish(bm, op);
return false;
#undef GOTO_ERROR
}
bool BMO_op_initf(BMesh *bm, BMOperator *op, const int flag, const char *fmt, ...)
{
va_list list;
va_start(list, fmt);
if (!BMO_op_vinitf(bm, op, flag, fmt, list)) {
printf("%s: failed\n", __func__);
va_end(list);
return false;
}
va_end(list);
return true;
}
bool BMO_op_callf(BMesh *bm, const int flag, const char *fmt, ...)
{
va_list list;
BMOperator op;
va_start(list, fmt);
if (!BMO_op_vinitf(bm, &op, flag, fmt, list)) {
printf("%s: failed, format is:\n \"%s\"\n", __func__, fmt);
va_end(list);
return false;
}
BMO_op_exec(bm, &op);
BMO_op_finish(bm, &op);
va_end(list);
return true;
}
|
shear.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% SSSSS H H EEEEE AAA RRRR %
% SS H H E A A R R %
% SSS HHHHH EEE AAAAA RRRR %
% SS H H E A A R R %
% SSSSS H H EEEEE A A R R %
% %
% %
% MagickCore Methods to Shear or Rotate an Image by an Arbitrary Angle %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% The XShearImage() and YShearImage() methods are based on the paper "A Fast
% Algorithm for General Raster Rotation" by Alan W. Paeth, Graphics
% Interface '86 (Vancouver). ShearRotateImage() is adapted from a similar
% method based on the Paeth paper written by Michael Halle of the Spatial
% Imaging Group, MIT Media Lab.
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/artifact.h"
#include "magick/attribute.h"
#include "magick/blob-private.h"
#include "magick/cache-private.h"
#include "magick/channel.h"
#include "magick/color-private.h"
#include "magick/colorspace-private.h"
#include "magick/composite.h"
#include "magick/composite-private.h"
#include "magick/decorate.h"
#include "magick/distort.h"
#include "magick/draw.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/gem.h"
#include "magick/geometry.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/memory_.h"
#include "magick/list.h"
#include "magick/matrix.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/nt-base-private.h"
#include "magick/pixel-private.h"
#include "magick/quantum.h"
#include "magick/resource_.h"
#include "magick/shear.h"
#include "magick/statistic.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/thread-private.h"
#include "magick/threshold.h"
#include "magick/token.h"
#include "magick/transform.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C r o p T o F i t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CropToFitImage() crops the sheared image as determined by the bounding box
% as defined by width and height and shearing angles.
%
% The format of the CropToFitImage method is:
%
% MagickBooleanType CropToFitImage(Image **image,
% const MagickRealType x_shear,const MagickRealType x_shear,
% const MagickRealType width,const MagickRealType height,
% const MagickBooleanType rotate,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o x_shear, y_shear, width, height: Defines a region of the image to crop.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType CropToFitImage(Image **image,
const MagickRealType x_shear,const MagickRealType y_shear,
const MagickRealType width,const MagickRealType height,
const MagickBooleanType rotate,ExceptionInfo *exception)
{
Image
*crop_image;
PointInfo
extent[4],
min,
max;
RectangleInfo
geometry,
page;
ssize_t
i;
/*
Calculate the rotated image size.
*/
extent[0].x=(double) (-width/2.0);
extent[0].y=(double) (-height/2.0);
extent[1].x=(double) width/2.0;
extent[1].y=(double) (-height/2.0);
extent[2].x=(double) (-width/2.0);
extent[2].y=(double) height/2.0;
extent[3].x=(double) width/2.0;
extent[3].y=(double) height/2.0;
for (i=0; i < 4; i++)
{
extent[i].x+=x_shear*extent[i].y;
extent[i].y+=y_shear*extent[i].x;
if (rotate != MagickFalse)
extent[i].x+=x_shear*extent[i].y;
extent[i].x+=(double) (*image)->columns/2.0;
extent[i].y+=(double) (*image)->rows/2.0;
}
min=extent[0];
max=extent[0];
for (i=1; i < 4; i++)
{
if (min.x > extent[i].x)
min.x=extent[i].x;
if (min.y > extent[i].y)
min.y=extent[i].y;
if (max.x < extent[i].x)
max.x=extent[i].x;
if (max.y < extent[i].y)
max.y=extent[i].y;
}
geometry.x=CastDoubleToLong(ceil(min.x-0.5));
geometry.y=CastDoubleToLong(ceil(min.y-0.5));
geometry.width=(size_t) floor(max.x-min.x+0.5);
geometry.height=(size_t) floor(max.y-min.y+0.5);
page=(*image)->page;
(void) ParseAbsoluteGeometry("0x0+0+0",&(*image)->page);
crop_image=CropImage(*image,&geometry,exception);
if (crop_image == (Image *) NULL)
return(MagickFalse);
crop_image->page=page;
*image=DestroyImage(*image);
*image=crop_image;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s k e w I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DeskewImage() removes skew from the image. Skew is an artifact that
% occurs in scanned images because of the camera being misaligned,
% imperfections in the scanning or surface, or simply because the paper was
% not placed completely flat when scanned.
%
% The amount of rotation calculated to deskew the image is saved in the
% artifact "deskew:angle".
%
% If the artifact "deskew:auto-crop" is given the image will be automatically
% cropped of the excess background.
%
% The format of the DeskewImage method is:
%
% Image *DeskewImage(const Image *image,const double threshold,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold: separate background from foreground.
%
% o exception: return any errors or warnings in this structure.
%
*/
static void RadonProjection(const Image *image,MatrixInfo *source_matrix,
MatrixInfo *destination_matrix,const ssize_t sign,size_t *projection)
{
MatrixInfo
*swap;
MatrixInfo
*p,
*q;
ssize_t
x;
size_t
step;
p=source_matrix;
q=destination_matrix;
for (step=1; step < GetMatrixColumns(p); step*=2)
{
for (x=0; x < (ssize_t) GetMatrixColumns(p); x+=2*(ssize_t) step)
{
ssize_t
i;
ssize_t
y;
unsigned short
element,
neighbor;
for (i=0; i < (ssize_t) step; i++)
{
for (y=0; y < (ssize_t) (GetMatrixRows(p)-i-1); y++)
{
if (GetMatrixElement(p,x+i,y,&element) == MagickFalse)
continue;
if (GetMatrixElement(p,x+i+step,y+i,&neighbor) == MagickFalse)
continue;
neighbor+=element;
if (SetMatrixElement(q,x+2*i,y,&neighbor) == MagickFalse)
continue;
if (GetMatrixElement(p,x+i+step,y+i+1,&neighbor) == MagickFalse)
continue;
neighbor+=element;
if (SetMatrixElement(q,x+2*i+1,y,&neighbor) == MagickFalse)
continue;
}
for ( ; y < (ssize_t) (GetMatrixRows(p)-i); y++)
{
if (GetMatrixElement(p,x+i,y,&element) == MagickFalse)
continue;
if (GetMatrixElement(p,x+i+step,y+i,&neighbor) == MagickFalse)
continue;
neighbor+=element;
if (SetMatrixElement(q,x+2*i,y,&neighbor) == MagickFalse)
continue;
if (SetMatrixElement(q,x+2*i+1,y,&element) == MagickFalse)
continue;
}
for ( ; y < (ssize_t) GetMatrixRows(p); y++)
{
if (GetMatrixElement(p,x+i,y,&element) == MagickFalse)
continue;
if (SetMatrixElement(q,x+2*i,y,&element) == MagickFalse)
continue;
if (SetMatrixElement(q,x+2*i+1,y,&element) == MagickFalse)
continue;
}
}
}
swap=p;
p=q;
q=swap;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) \
magick_number_threads(image,image,GetMatrixColumns(p),1)
#endif
for (x=0; x < (ssize_t) GetMatrixColumns(p); x++)
{
ssize_t
y;
size_t
sum;
sum=0;
for (y=0; y < (ssize_t) (GetMatrixRows(p)-1); y++)
{
ssize_t
delta;
unsigned short
element,
neighbor;
if (GetMatrixElement(p,x,y,&element) == MagickFalse)
continue;
if (GetMatrixElement(p,x,y+1,&neighbor) == MagickFalse)
continue;
delta=(ssize_t) element-(ssize_t) neighbor;
sum+=delta*delta;
}
projection[GetMatrixColumns(p)+sign*x-1]=sum;
}
}
static MagickBooleanType RadonTransform(const Image *image,
const double threshold,size_t *projection,ExceptionInfo *exception)
{
CacheView
*image_view;
MatrixInfo
*destination_matrix,
*source_matrix;
MagickBooleanType
status;
ssize_t
i;
size_t
count,
width;
ssize_t
y;
unsigned char
byte;
unsigned short
bits[256];
for (width=1; width < ((image->columns+7)/8); width<<=1) ;
source_matrix=AcquireMatrixInfo(width,image->rows,sizeof(unsigned short),
exception);
destination_matrix=AcquireMatrixInfo(width,image->rows,sizeof(unsigned short),
exception);
if ((source_matrix == (MatrixInfo *) NULL) ||
(destination_matrix == (MatrixInfo *) NULL))
{
if (destination_matrix != (MatrixInfo *) NULL)
destination_matrix=DestroyMatrixInfo(destination_matrix);
if (source_matrix != (MatrixInfo *) NULL)
source_matrix=DestroyMatrixInfo(source_matrix);
return(MagickFalse);
}
if (NullMatrix(source_matrix) == MagickFalse)
{
destination_matrix=DestroyMatrixInfo(destination_matrix);
source_matrix=DestroyMatrixInfo(source_matrix);
return(MagickFalse);
}
for (i=0; i < 256; i++)
{
byte=(unsigned char) i;
for (count=0; byte != 0; byte>>=1)
count+=byte & 0x01;
bits[i]=(unsigned short) count;
}
status=MagickTrue;
image_view=AcquireVirtualCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const PixelPacket
*magick_restrict p;
ssize_t
i,
x;
size_t
bit,
byte;
unsigned short
value;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
bit=0;
byte=0;
i=(ssize_t) (image->columns+7)/8;
for (x=0; x < (ssize_t) image->columns; x++)
{
byte<<=1;
if (((MagickRealType) GetPixelRed(p) < threshold) ||
((MagickRealType) GetPixelGreen(p) < threshold) ||
((MagickRealType) GetPixelBlue(p) < threshold))
byte|=0x01;
bit++;
if (bit == 8)
{
value=bits[byte];
(void) SetMatrixElement(source_matrix,--i,y,&value);
bit=0;
byte=0;
}
p++;
}
if (bit != 0)
{
byte<<=(8-bit);
value=bits[byte];
(void) SetMatrixElement(source_matrix,--i,y,&value);
}
}
RadonProjection(image,source_matrix,destination_matrix,-1,projection);
(void) NullMatrix(source_matrix);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const PixelPacket
*magick_restrict p;
ssize_t
i,
x;
size_t
bit,
byte;
unsigned short
value;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
bit=0;
byte=0;
i=0;
for (x=0; x < (ssize_t) image->columns; x++)
{
byte<<=1;
if (((MagickRealType) GetPixelRed(p) < threshold) ||
((MagickRealType) GetPixelGreen(p) < threshold) ||
((MagickRealType) GetPixelBlue(p) < threshold))
byte|=0x01;
bit++;
if (bit == 8)
{
value=bits[byte];
(void) SetMatrixElement(source_matrix,i++,y,&value);
bit=0;
byte=0;
}
p++;
}
if (bit != 0)
{
byte<<=(8-bit);
value=bits[byte];
(void) SetMatrixElement(source_matrix,i++,y,&value);
}
}
RadonProjection(image,source_matrix,destination_matrix,1,projection);
image_view=DestroyCacheView(image_view);
destination_matrix=DestroyMatrixInfo(destination_matrix);
source_matrix=DestroyMatrixInfo(source_matrix);
return(MagickTrue);
}
static void GetImageBackgroundColor(Image *image,const ssize_t offset,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickPixelPacket
background;
MagickRealType
count;
ssize_t
y;
/*
Compute average background color.
*/
if (offset <= 0)
return;
GetMagickPixelPacket(image,&background);
count=0.0;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
const PixelPacket
*magick_restrict p;
ssize_t
x;
if ((y >= offset) && (y < ((ssize_t) image->rows-offset)))
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
continue;
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((x >= offset) && (x < ((ssize_t) image->columns-offset)))
continue;
background.red+=QuantumScale*GetPixelRed(p);
background.green+=QuantumScale*GetPixelGreen(p);
background.blue+=QuantumScale*GetPixelBlue(p);
background.opacity+=QuantumScale*GetPixelOpacity(p);
count++;
p++;
}
}
image_view=DestroyCacheView(image_view);
image->background_color.red=ClampToQuantum((MagickRealType) QuantumRange*
background.red/count);
image->background_color.green=ClampToQuantum((MagickRealType) QuantumRange*
background.green/count);
image->background_color.blue=ClampToQuantum((MagickRealType) QuantumRange*
background.blue/count);
image->background_color.opacity=ClampToQuantum((MagickRealType) QuantumRange*
background.opacity/count);
}
MagickExport Image *DeskewImage(const Image *image,const double threshold,
ExceptionInfo *exception)
{
AffineMatrix
affine_matrix;
const char
*artifact;
double
degrees;
Image
*clone_image,
*crop_image,
*deskew_image,
*median_image;
MagickBooleanType
status;
RectangleInfo
geometry;
ssize_t
i;
size_t
max_projection,
*projection,
width;
ssize_t
skew;
/*
Compute deskew angle.
*/
for (width=1; width < ((image->columns+7)/8); width<<=1) ;
projection=(size_t *) AcquireQuantumMemory((size_t) (2*width-1),
sizeof(*projection));
if (projection == (size_t *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
status=RadonTransform(image,threshold,projection,exception);
if (status == MagickFalse)
{
projection=(size_t *) RelinquishMagickMemory(projection);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
max_projection=0;
skew=0;
for (i=0; i < (ssize_t) (2*width-1); i++)
{
if (projection[i] > max_projection)
{
skew=i-(ssize_t) width+1;
max_projection=projection[i];
}
}
projection=(size_t *) RelinquishMagickMemory(projection);
degrees=RadiansToDegrees(-atan((double) skew/width/8));
if (image->debug != MagickFalse)
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" Deskew angle: %g",degrees);
/*
Deskew image.
*/
clone_image=CloneImage(image,0,0,MagickTrue,exception);
if (clone_image == (Image *) NULL)
return((Image *) NULL);
{
char
angle[MaxTextExtent];
(void) FormatLocaleString(angle,MaxTextExtent,"%g",degrees);
(void) SetImageArtifact(clone_image,"deskew:angle",angle);
}
(void) SetImageVirtualPixelMethod(clone_image,BackgroundVirtualPixelMethod);
affine_matrix.sx=cos(DegreesToRadians(fmod((double) degrees,360.0)));
affine_matrix.rx=sin(DegreesToRadians(fmod((double) degrees,360.0)));
affine_matrix.ry=(-sin(DegreesToRadians(fmod((double) degrees,360.0))));
affine_matrix.sy=cos(DegreesToRadians(fmod((double) degrees,360.0)));
affine_matrix.tx=0.0;
affine_matrix.ty=0.0;
artifact=GetImageArtifact(image,"deskew:auto-crop");
if (IsMagickTrue(artifact) == MagickFalse)
{
deskew_image=AffineTransformImage(clone_image,&affine_matrix,exception);
clone_image=DestroyImage(clone_image);
return(deskew_image);
}
/*
Auto-crop image.
*/
GetImageBackgroundColor(clone_image,(ssize_t) StringToLong(artifact),
exception);
deskew_image=AffineTransformImage(clone_image,&affine_matrix,exception);
clone_image=DestroyImage(clone_image);
if (deskew_image == (Image *) NULL)
return((Image *) NULL);
median_image=StatisticImage(deskew_image,MedianStatistic,3,3,exception);
if (median_image == (Image *) NULL)
{
deskew_image=DestroyImage(deskew_image);
return((Image *) NULL);
}
geometry=GetImageBoundingBox(median_image,exception);
median_image=DestroyImage(median_image);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TransformEvent,GetMagickModule()," Deskew geometry: "
"%.20gx%.20g%+.20g%+.20g",(double) geometry.width,(double)
geometry.height,(double) geometry.x,(double) geometry.y);
crop_image=CropImage(deskew_image,&geometry,exception);
deskew_image=DestroyImage(deskew_image);
return(crop_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I n t e g r a l R o t a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IntegralRotateImage() rotates the image an integral of 90 degrees. It
% allocates the memory necessary for the new Image structure and returns a
% pointer to the rotated image.
%
% The format of the IntegralRotateImage method is:
%
% Image *IntegralRotateImage(const Image *image,size_t rotations,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o rotations: Specifies the number of 90 degree rotations.
%
*/
MagickExport Image *IntegralRotateImage(const Image *image,size_t rotations,
ExceptionInfo *exception)
{
#define RotateImageTag "Rotate/Image"
CacheView
*image_view,
*rotate_view;
Image
*rotate_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
page;
ssize_t
y;
/*
Initialize rotated image attributes.
*/
assert(image != (Image *) NULL);
page=image->page;
rotations%=4;
rotate_image=(Image *) NULL;
rotate_view=(CacheView *) NULL;
switch (rotations)
{
case 0:
default:
{
rotate_image=CloneImage(image,0,0,MagickTrue,exception);
break;
}
case 2:
{
rotate_image=CloneImage(image,image->columns,image->rows,MagickTrue,
exception);
break;
}
case 1:
case 3:
{
rotate_image=CloneImage(image,image->rows,image->columns,MagickTrue,
exception);
break;
}
}
if (rotate_image == (Image *) NULL)
return((Image *) NULL);
if (rotations == 0)
return(rotate_image);
/*
Integral rotate the image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
rotate_view=AcquireAuthenticCacheView(rotate_image,exception);
switch (rotations)
{
case 1:
{
size_t
tile_height,
tile_width;
ssize_t
tile_y;
/*
Rotate 90 degrees.
*/
GetPixelCacheTileSize(image,&tile_width,&tile_height);
tile_width=image->columns;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,rotate_image,image->rows/tile_height,1)
#endif
for (tile_y=0; tile_y < (ssize_t) image->rows; tile_y+=(ssize_t) tile_height)
{
ssize_t
tile_x;
if (status == MagickFalse)
continue;
for (tile_x=0; tile_x < (ssize_t) image->columns; tile_x+=(ssize_t) tile_width)
{
MagickBooleanType
sync;
const IndexPacket
*magick_restrict indexes;
const PixelPacket
*magick_restrict p;
IndexPacket
*magick_restrict rotate_indexes;
PixelPacket
*magick_restrict q;
ssize_t
y;
size_t
height,
width;
width=tile_width;
if ((tile_width+tile_x) > image->columns)
width=(size_t) (tile_width-(tile_x+tile_width-image->columns));
height=tile_height;
if ((tile_height+tile_y) > image->rows)
height=(size_t) (tile_height-(tile_y+tile_height-image->rows));
p=GetCacheViewVirtualPixels(image_view,tile_x,tile_y,width,height,
exception);
if (p == (const PixelPacket *) NULL)
{
status=MagickFalse;
break;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (y=0; y < (ssize_t) width; y++)
{
const PixelPacket
*magick_restrict tile_pixels;
ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(rotate_view,(ssize_t)
(rotate_image->columns-(tile_y+height)),y+tile_x,height,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
tile_pixels=p+(height-1)*width+y;
for (x=0; x < (ssize_t) height; x++)
{
*q++=(*tile_pixels);
tile_pixels-=width;
}
rotate_indexes=GetCacheViewAuthenticIndexQueue(rotate_view);
if ((indexes != (IndexPacket *) NULL) &&
(rotate_indexes != (IndexPacket *) NULL))
{
const IndexPacket
*magick_restrict tile_indexes;
tile_indexes=indexes+(height-1)*width+y;
for (x=0; x < (ssize_t) height; x++)
{
*rotate_indexes++=(*tile_indexes);
tile_indexes-=width;
}
}
sync=SyncCacheViewAuthenticPixels(rotate_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,RotateImageTag,progress+=tile_height,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
(void) SetImageProgress(image,RotateImageTag,(MagickOffsetType)
image->rows-1,image->rows);
Swap(page.width,page.height);
Swap(page.x,page.y);
if (page.width != 0)
page.x=(ssize_t) (page.width-rotate_image->columns-page.x);
break;
}
case 2:
{
/*
Rotate 180 degrees.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,rotate_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
const IndexPacket
*magick_restrict indexes;
const PixelPacket
*magick_restrict p;
IndexPacket
*magick_restrict rotate_indexes;
PixelPacket
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,
exception);
q=QueueCacheViewAuthenticPixels(rotate_view,0,(ssize_t) (image->rows-y-
1),image->columns,1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
rotate_indexes=GetCacheViewAuthenticIndexQueue(rotate_view);
q+=image->columns;
for (x=0; x < (ssize_t) image->columns; x++)
*--q=(*p++);
if ((indexes != (IndexPacket *) NULL) &&
(rotate_indexes != (IndexPacket *) NULL))
for (x=0; x < (ssize_t) image->columns; x++)
SetPixelIndex(rotate_indexes+image->columns-x-1,
GetPixelIndex(indexes+x));
sync=SyncCacheViewAuthenticPixels(rotate_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,RotateImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
if (page.width != 0)
page.x=(ssize_t) (page.width-rotate_image->columns-page.x);
if (page.height != 0)
page.y=(ssize_t) (page.height-rotate_image->rows-page.y);
break;
}
case 3:
{
size_t
tile_height,
tile_width;
ssize_t
tile_y;
/*
Rotate 270 degrees.
*/
GetPixelCacheTileSize(image,&tile_width,&tile_height);
tile_width=image->columns;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,rotate_image,image->rows/tile_height,1)
#endif
for (tile_y=0; tile_y < (ssize_t) image->rows; tile_y+=(ssize_t) tile_height)
{
ssize_t
tile_x;
if (status == MagickFalse)
continue;
for (tile_x=0; tile_x < (ssize_t) image->columns; tile_x+=(ssize_t) tile_width)
{
MagickBooleanType
sync;
const IndexPacket
*magick_restrict indexes;
const PixelPacket
*magick_restrict p;
IndexPacket
*magick_restrict rotate_indexes;
PixelPacket
*magick_restrict q;
ssize_t
y;
size_t
height,
width;
width=tile_width;
if ((tile_x+tile_width) > image->columns)
width=(size_t) (tile_width-(tile_x+tile_width-image->columns));
height=tile_height;
if ((tile_y+tile_height) > image->rows)
height=(size_t) (tile_height-(tile_y+tile_height-image->rows));
p=GetCacheViewVirtualPixels(image_view,tile_x,tile_y,width,height,
exception);
if (p == (const PixelPacket *) NULL)
{
status=MagickFalse;
break;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (y=0; y < (ssize_t) width; y++)
{
const PixelPacket
*magick_restrict tile_pixels;
ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(rotate_view,tile_y,(ssize_t) (y+
rotate_image->rows-(tile_x+width)),height,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
tile_pixels=p+(width-1)-y;
for (x=0; x < (ssize_t) height; x++)
{
*q++=(*tile_pixels);
tile_pixels+=width;
}
rotate_indexes=GetCacheViewAuthenticIndexQueue(rotate_view);
if ((indexes != (IndexPacket *) NULL) &&
(rotate_indexes != (IndexPacket *) NULL))
{
const IndexPacket
*magick_restrict tile_indexes;
tile_indexes=indexes+(width-1)-y;
for (x=0; x < (ssize_t) height; x++)
{
*rotate_indexes++=(*tile_indexes);
tile_indexes+=width;
}
}
sync=SyncCacheViewAuthenticPixels(rotate_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,RotateImageTag,progress+=tile_height,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
(void) SetImageProgress(image,RotateImageTag,(MagickOffsetType)
image->rows-1,image->rows);
Swap(page.width,page.height);
Swap(page.x,page.y);
if (page.height != 0)
page.y=(ssize_t) (page.height-rotate_image->rows-page.y);
break;
}
default:
break;
}
rotate_view=DestroyCacheView(rotate_view);
image_view=DestroyCacheView(image_view);
rotate_image->type=image->type;
rotate_image->page=page;
if (status == MagickFalse)
rotate_image=DestroyImage(rotate_image);
return(rotate_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ X S h e a r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% XShearImage() shears the image in the X direction with a shear angle of
% 'degrees'. Positive angles shear counter-clockwise (right-hand rule), and
% negative angles shear clockwise. Angles are measured relative to a vertical
% Y-axis. X shears will widen an image creating 'empty' triangles on the left
% and right sides of the source image.
%
% The format of the XShearImage method is:
%
% MagickBooleanType XShearImage(Image *image,const MagickRealType degrees,
% const size_t width,const size_t height,
% const ssize_t x_offset,const ssize_t y_offset,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o degrees: A MagickRealType representing the shearing angle along the X
% axis.
%
% o width, height, x_offset, y_offset: Defines a region of the image
% to shear.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType XShearImage(Image *image,const MagickRealType degrees,
const size_t width,const size_t height,const ssize_t x_offset,
const ssize_t y_offset,ExceptionInfo *exception)
{
#define XShearImageTag "XShear/Image"
typedef enum
{
LEFT,
RIGHT
} ShearDirection;
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
background;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
GetMagickPixelPacket(image,&background);
SetMagickPixelPacket(image,&image->background_color,(IndexPacket *) NULL,
&background);
if (image->colorspace == CMYKColorspace)
ConvertRGBToCMYK(&background);
/*
X shear image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,height,1)
#endif
for (y=0; y < (ssize_t) height; y++)
{
MagickPixelPacket
pixel,
source,
destination;
MagickRealType
area,
displacement;
IndexPacket
*magick_restrict indexes,
*magick_restrict shear_indexes;
PixelPacket
*magick_restrict p,
*magick_restrict q;
ssize_t
i;
ShearDirection
direction;
ssize_t
step;
if (status == MagickFalse)
continue;
p=GetCacheViewAuthenticPixels(image_view,0,y_offset+y,image->columns,1,
exception);
if (p == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
p+=x_offset;
indexes+=x_offset;
displacement=degrees*(MagickRealType) (y-height/2.0);
if (displacement == 0.0)
continue;
if (displacement > 0.0)
direction=RIGHT;
else
{
displacement*=(-1.0);
direction=LEFT;
}
step=CastDoubleToLong(floor((double) displacement));
area=(MagickRealType) (displacement-step);
step++;
pixel=background;
GetMagickPixelPacket(image,&source);
GetMagickPixelPacket(image,&destination);
switch (direction)
{
case LEFT:
{
/*
Transfer pixels left-to-right.
*/
if (step > x_offset)
break;
q=p-step;
shear_indexes=indexes-step;
for (i=0; i < (ssize_t) width; i++)
{
if ((x_offset+i) < step)
{
SetMagickPixelPacket(image,++p,++indexes,&pixel);
q++;
shear_indexes++;
continue;
}
SetMagickPixelPacket(image,p,indexes,&source);
MagickPixelCompositeAreaBlend(&pixel,(MagickRealType) pixel.opacity,
&source,(MagickRealType) GetPixelOpacity(p),area,&destination);
SetPixelPacket(image,&destination,q++,shear_indexes++);
SetMagickPixelPacket(image,p++,indexes++,&pixel);
}
MagickPixelCompositeAreaBlend(&pixel,(MagickRealType) pixel.opacity,
&background,(MagickRealType) background.opacity,area,&destination);
SetPixelPacket(image,&destination,q++,shear_indexes++);
for (i=0; i < (step-1); i++)
SetPixelPacket(image,&background,q++,shear_indexes++);
break;
}
case RIGHT:
{
/*
Transfer pixels right-to-left.
*/
p+=width;
indexes+=width;
q=p+step;
shear_indexes=indexes+step;
for (i=0; i < (ssize_t) width; i++)
{
p--;
indexes--;
q--;
shear_indexes--;
if ((size_t) (x_offset+width+step-i) > image->columns)
continue;
SetMagickPixelPacket(image,p,indexes,&source);
MagickPixelCompositeAreaBlend(&pixel,(MagickRealType) pixel.opacity,
&source,(MagickRealType) GetPixelOpacity(p),area,&destination);
SetPixelPacket(image,&destination,q,shear_indexes);
SetMagickPixelPacket(image,p,indexes,&pixel);
}
MagickPixelCompositeAreaBlend(&pixel,(MagickRealType) pixel.opacity,
&background,(MagickRealType) background.opacity,area,&destination);
SetPixelPacket(image,&destination,--q,--shear_indexes);
for (i=0; i < (step-1); i++)
SetPixelPacket(image,&background,--q,--shear_indexes);
break;
}
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,XShearImageTag,progress,height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ Y S h e a r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% YShearImage shears the image in the Y direction with a shear angle of
% 'degrees'. Positive angles shear counter-clockwise (right-hand rule), and
% negative angles shear clockwise. Angles are measured relative to a
% horizontal X-axis. Y shears will increase the height of an image creating
% 'empty' triangles on the top and bottom of the source image.
%
% The format of the YShearImage method is:
%
% MagickBooleanType YShearImage(Image *image,const MagickRealType degrees,
% const size_t width,const size_t height,
% const ssize_t x_offset,const ssize_t y_offset,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o degrees: A MagickRealType representing the shearing angle along the Y
% axis.
%
% o width, height, x_offset, y_offset: Defines a region of the image
% to shear.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType YShearImage(Image *image,const MagickRealType degrees,
const size_t width,const size_t height,const ssize_t x_offset,
const ssize_t y_offset,ExceptionInfo *exception)
{
#define YShearImageTag "YShear/Image"
typedef enum
{
UP,
DOWN
} ShearDirection;
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
background;
ssize_t
x;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
GetMagickPixelPacket(image,&background);
SetMagickPixelPacket(image,&image->background_color,(IndexPacket *) NULL,
&background);
if (image->colorspace == CMYKColorspace)
ConvertRGBToCMYK(&background);
/*
Y Shear image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,width,1)
#endif
for (x=0; x < (ssize_t) width; x++)
{
MagickPixelPacket
pixel,
source,
destination;
MagickRealType
area,
displacement;
IndexPacket
*magick_restrict indexes,
*magick_restrict shear_indexes;
ssize_t
i;
PixelPacket
*magick_restrict p,
*magick_restrict q;
ShearDirection
direction;
ssize_t
step;
if (status == MagickFalse)
continue;
p=GetCacheViewAuthenticPixels(image_view,x_offset+x,0,1,image->rows,
exception);
if (p == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
p+=y_offset;
indexes+=y_offset;
displacement=degrees*(MagickRealType) (x-width/2.0);
if (displacement == 0.0)
continue;
if (displacement > 0.0)
direction=DOWN;
else
{
displacement*=(-1.0);
direction=UP;
}
step=CastDoubleToLong(floor((double) displacement));
area=(MagickRealType) (displacement-step);
step++;
pixel=background;
GetMagickPixelPacket(image,&source);
GetMagickPixelPacket(image,&destination);
switch (direction)
{
case UP:
{
/*
Transfer pixels top-to-bottom.
*/
if (step > y_offset)
break;
q=p-step;
shear_indexes=indexes-step;
for (i=0; i < (ssize_t) height; i++)
{
if ((y_offset+i) < step)
{
SetMagickPixelPacket(image,++p,++indexes,&pixel);
q++;
shear_indexes++;
continue;
}
SetMagickPixelPacket(image,p,indexes,&source);
MagickPixelCompositeAreaBlend(&pixel,(MagickRealType) pixel.opacity,
&source,(MagickRealType) GetPixelOpacity(p),area,&destination);
SetPixelPacket(image,&destination,q++,shear_indexes++);
SetMagickPixelPacket(image,p++,indexes++,&pixel);
}
MagickPixelCompositeAreaBlend(&pixel,(MagickRealType) pixel.opacity,
&background,(MagickRealType) background.opacity,area,&destination);
SetPixelPacket(image,&destination,q++,shear_indexes++);
for (i=0; i < (step-1); i++)
SetPixelPacket(image,&background,q++,shear_indexes++);
break;
}
case DOWN:
{
/*
Transfer pixels bottom-to-top.
*/
p+=height;
indexes+=height;
q=p+step;
shear_indexes=indexes+step;
for (i=0; i < (ssize_t) height; i++)
{
p--;
indexes--;
q--;
shear_indexes--;
if ((size_t) (y_offset+height+step-i) > image->rows)
continue;
SetMagickPixelPacket(image,p,indexes,&source);
MagickPixelCompositeAreaBlend(&pixel,(MagickRealType) pixel.opacity,
&source,(MagickRealType) GetPixelOpacity(p),area,&destination);
SetPixelPacket(image,&destination,q,shear_indexes);
SetMagickPixelPacket(image,p,indexes,&pixel);
}
MagickPixelCompositeAreaBlend(&pixel,(MagickRealType) pixel.opacity,
&background,(MagickRealType) background.opacity,area,&destination);
SetPixelPacket(image,&destination,--q,--shear_indexes);
for (i=0; i < (step-1); i++)
SetPixelPacket(image,&background,--q,--shear_indexes);
break;
}
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,YShearImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h e a r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ShearImage() creates a new image that is a shear_image copy of an existing
% one. Shearing slides one edge of an image along the X or Y axis, creating
% a parallelogram. An X direction shear slides an edge along the X axis,
% while a Y direction shear slides an edge along the Y axis. The amount of
% the shear is controlled by a shear angle. For X direction shears, x_shear
% is measured relative to the Y axis, and similarly, for Y direction shears
% y_shear is measured relative to the X axis. Empty triangles left over from
% shearing the image are filled with the background color defined by member
% 'background_color' of the image.. ShearImage() allocates the memory
% necessary for the new Image structure and returns a pointer to the new image.
%
% ShearImage() is based on the paper "A Fast Algorithm for General Raster
% Rotatation" by Alan W. Paeth.
%
% The format of the ShearImage method is:
%
% Image *ShearImage(const Image *image,const double x_shear,
% const double y_shear,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o x_shear, y_shear: Specifies the number of degrees to shear the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ShearImage(const Image *image,const double x_shear,
const double y_shear,ExceptionInfo *exception)
{
Image
*integral_image,
*shear_image;
MagickBooleanType
status;
PointInfo
shear;
RectangleInfo
border_info,
bounds;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if ((x_shear != 0.0) && (fmod(x_shear,90.0) == 0.0))
ThrowImageException(ImageError,"AngleIsDiscontinuous");
if ((y_shear != 0.0) && (fmod(y_shear,90.0) == 0.0))
ThrowImageException(ImageError,"AngleIsDiscontinuous");
/*
Initialize shear angle.
*/
integral_image=CloneImage(image,0,0,MagickTrue,exception);
if (integral_image == (Image *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
shear.x=(-tan(DegreesToRadians(fmod(x_shear,360.0))));
shear.y=tan(DegreesToRadians(fmod(y_shear,360.0)));
if ((shear.x == 0.0) && (shear.y == 0.0))
return(integral_image);
if (SetImageStorageClass(integral_image,DirectClass) == MagickFalse)
{
InheritException(exception,&integral_image->exception);
integral_image=DestroyImage(integral_image);
return(integral_image);
}
if (integral_image->matte == MagickFalse)
(void) SetImageAlphaChannel(integral_image,OpaqueAlphaChannel);
/*
Compute image size.
*/
bounds.width=image->columns+CastDoubleToLong(floor(fabs(shear.x)*
image->rows+0.5));
bounds.x=CastDoubleToLong(ceil((double) image->columns+((fabs(shear.x)*
image->rows)-image->columns)/2.0-0.5));
bounds.y=CastDoubleToLong(ceil((double) image->rows+((fabs(shear.y)*
bounds.width)-image->rows)/2.0-0.5));
/*
Surround image with border.
*/
integral_image->border_color=integral_image->background_color;
integral_image->compose=CopyCompositeOp;
border_info.width=(size_t) bounds.x;
border_info.height=(size_t) bounds.y;
shear_image=BorderImage(integral_image,&border_info,exception);
integral_image=DestroyImage(integral_image);
if (shear_image == (Image *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
/*
Shear the image.
*/
if (shear_image->matte == MagickFalse)
(void) SetImageAlphaChannel(shear_image,OpaqueAlphaChannel);
status=XShearImage(shear_image,shear.x,image->columns,image->rows,bounds.x,
(ssize_t) (shear_image->rows-image->rows)/2,exception);
if (status == MagickFalse)
{
shear_image=DestroyImage(shear_image);
return((Image *) NULL);
}
status=YShearImage(shear_image,shear.y,bounds.width,image->rows,(ssize_t)
(shear_image->columns-bounds.width)/2,bounds.y,exception);
if (status == MagickFalse)
{
shear_image=DestroyImage(shear_image);
return((Image *) NULL);
}
status=CropToFitImage(&shear_image,shear.x,shear.y,(MagickRealType)
image->columns,(MagickRealType) image->rows,MagickFalse,exception);
shear_image->matte=image->matte;
shear_image->compose=image->compose;
shear_image->page.width=0;
shear_image->page.height=0;
if (status == MagickFalse)
shear_image=DestroyImage(shear_image);
return(shear_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h e a r R o t a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ShearRotateImage() creates a new image that is a rotated copy of an existing
% one. Positive angles rotate counter-clockwise (right-hand rule), while
% negative angles rotate clockwise. Rotated images are usually larger than
% the originals and have 'empty' triangular corners. X axis. Empty
% triangles left over from shearing the image are filled with the background
% color defined by member 'background_color' of the image. ShearRotateImage
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% ShearRotateImage() is based on the paper "A Fast Algorithm for General
% Raster Rotatation" by Alan W. Paeth. ShearRotateImage is adapted from a
% similar method based on the Paeth paper written by Michael Halle of the
% Spatial Imaging Group, MIT Media Lab.
%
% The format of the ShearRotateImage method is:
%
% Image *ShearRotateImage(const Image *image,const double degrees,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o degrees: Specifies the number of degrees to rotate the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ShearRotateImage(const Image *image,const double degrees,
ExceptionInfo *exception)
{
Image
*integral_image,
*rotate_image;
MagickBooleanType
status;
MagickRealType
angle;
PointInfo
shear;
RectangleInfo
border_info,
bounds;
size_t
height,
rotations,
shear_width,
width;
/*
Adjust rotation angle.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
angle=fmod(degrees,360.0);
if (angle < -45.0)
angle+=360.0;
for (rotations=0; angle > 45.0; rotations++)
angle-=90.0;
rotations%=4;
/*
Calculate shear equations.
*/
integral_image=IntegralRotateImage(image,rotations,exception);
if (integral_image == (Image *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
shear.x=(-tan((double) DegreesToRadians(angle)/2.0));
shear.y=sin((double) DegreesToRadians(angle));
if ((shear.x == 0.0) && (shear.y == 0.0))
return(integral_image);
if (SetImageStorageClass(integral_image,DirectClass) == MagickFalse)
{
InheritException(exception,&integral_image->exception);
integral_image=DestroyImage(integral_image);
return(integral_image);
}
if (integral_image->matte == MagickFalse)
(void) SetImageAlphaChannel(integral_image,OpaqueAlphaChannel);
/*
Compute maximum bounds for 3 shear operations.
*/
width=integral_image->columns;
height=integral_image->rows;
bounds.width=(size_t) floor(fabs((double) height*shear.x)+width+0.5);
bounds.height=(size_t) floor(fabs((double) bounds.width*shear.y)+height+0.5);
shear_width=(size_t) floor(fabs((double) bounds.height*shear.x)+
bounds.width+0.5);
bounds.x=CastDoubleToLong(floor((double) ((shear_width > bounds.width) ?
width : bounds.width-shear_width+2)/2.0+0.5));
bounds.y=CastDoubleToLong(floor(((double) bounds.height-height+2)/2.0+0.5));
/*
Surround image with a border.
*/
integral_image->border_color=integral_image->background_color;
integral_image->compose=CopyCompositeOp;
border_info.width=(size_t) bounds.x;
border_info.height=(size_t) bounds.y;
rotate_image=BorderImage(integral_image,&border_info,exception);
integral_image=DestroyImage(integral_image);
if (rotate_image == (Image *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
/*
Rotate the image.
*/
status=XShearImage(rotate_image,shear.x,width,height,bounds.x,(ssize_t)
(rotate_image->rows-height)/2,exception);
if (status == MagickFalse)
{
rotate_image=DestroyImage(rotate_image);
return((Image *) NULL);
}
status=YShearImage(rotate_image,shear.y,bounds.width,height,(ssize_t)
(rotate_image->columns-bounds.width)/2,bounds.y,exception);
if (status == MagickFalse)
{
rotate_image=DestroyImage(rotate_image);
return((Image *) NULL);
}
status=XShearImage(rotate_image,shear.x,bounds.width,bounds.height,(ssize_t)
(rotate_image->columns-bounds.width)/2,(ssize_t) (rotate_image->rows-
bounds.height)/2,exception);
if (status == MagickFalse)
{
rotate_image=DestroyImage(rotate_image);
return((Image *) NULL);
}
status=CropToFitImage(&rotate_image,shear.x,shear.y,(MagickRealType) width,
(MagickRealType) height,MagickTrue,exception);
rotate_image->matte=image->matte;
rotate_image->compose=image->compose;
rotate_image->page.width=0;
rotate_image->page.height=0;
if (status == MagickFalse)
rotate_image=DestroyImage(rotate_image);
return(rotate_image);
}
|
newton-bisection.c | #include "header.h"
/** Numerically calculates the derivative of the function
pointed to by fun_ptr using central difference. **/
double derive(double (*fun_ptr)(double), double x) {
const double dx = 1e-6;
return 1.0/(2.0*dx) * ((*fun_ptr)(x+dx) - (*fun_ptr)(x-dx));
}
/** Finds all zeros in the interval [a, b]. This procedure
divides the interval [a, b] into N smaller subintervals and
then uses Newton-bisection on each of the subintervals. **/
array *find_all_zeros_NR(double (*fun_ptr)(double), double a, double b, int N, int T) {
array *zeros = malloc(sizeof(array));
zeros->ptr = malloc(N*sizeof(double));
zeros->size = 0;
double dn = (b-a)/((double) N);
double xl, xu;
int chunk = ceil(N/(double)T);
#pragma omp parallel for num_threads(T) schedule(static, chunk) private(xl, xu)
for (int i = 0; i < N; i++) {
xl = a + dn*i;
xu = a + dn*(i+1);
zeros->ptr[i] = find_zero_NR(fun_ptr, xl, xu);
}
/* collect the found zeros */
for (int i = 0; i < N; i++) {
if (zeros->ptr[i] != -1) {
zeros->ptr[zeros->size] = zeros->ptr[i];
zeros->size++;
}
}
zeros->ptr = realloc(zeros->ptr, zeros->size*sizeof(double));
return zeros;
}
/** Finds a zero, using Newton-bisection, in the interval [a, b] of the function
pointed to by fun_ptr. If no zero is found, the function returns -1.0. **/
double find_zero_NR(double (*fun_ptr)(double), double a, double b) {
const int max_iter = 50; // maxium number of iterations
const double tol = 1e-8; // tolerance
int i = 0; // iteration counter
double c = a + (b-a)*0.5; // mid-point
double x_new = c; // x_k+1
double x_old; // x_k
/* perform Newton-bisection while |x_k+1 - x_k| > tol, or
while |b-a| < tol, or the maximum number of iterations
has not been reached */
do {
x_old = x_new;
x_new = x_old - (*fun_ptr)(x_old)/derive(fun_ptr, x_old);
if (x_new < a || x_new >= b) { // check if outside the interval
if ((*fun_ptr)(a) * (*fun_ptr)(c) > 0) { // check sign
a = c;
} else {
b = c;
}
c = a + (b-a)*0.5;
x_old = c;
x_new = x_old - (*fun_ptr)(x_old)/derive(fun_ptr, x_old);
}
i++;
} while (b-a > tol && fabs(x_new - x_old) > tol && i < max_iter);
if (fabs(x_new - x_old) <= tol && (x_new >= a && x_new < b)) {
if (x_new == -1.0) { // add small perturbation if x_new == -1.0
x_new += 1e-12;
}
return x_new;
} else if (b-a <= tol && (*fun_ptr)(a) * (*fun_ptr)(b) < 0) {
if (x_new == -1.0) { // add small perturbation if c == -1.0
c += 1e-12;
}
return c;
} else {
return -1.0;
}
}
|
residual.flux.c | //------------------------------------------------------------------------------------------------------------------------------
// Samuel Williams
// SWWilliams@lbl.gov
// Lawrence Berkeley National Lab
//------------------------------------------------------------------------------------------------------------------------------
// This routines calculates the residual (res=rhs-Ax) using the linear operator specified in the apply_op_ijk macro
// This requires exchanging a ghost zone and/or enforcing a boundary condition.
// NOTE, x_id must be distinct from rhs_id and res_id
void residual(level_type * level, int res_id, int x_id, int rhs_id, double a, double b){
if(level->fluxes==NULL){posix_memalign( (void**)&(level->fluxes), 64, level->num_threads*(level->box_jStride)*(BLOCKCOPY_TILE_J+1)*(4)*sizeof(double) );}
// exchange the boundary for x in prep for Ax...
exchange_boundary(level,x_id,stencil_get_shape());
apply_BCs(level,x_id,stencil_get_shape());
// now do residual/restriction proper...
double _timeStart = getTime();
double h2inv = 1.0/(level->h*level->h);
// loop over all block/tiles this process owns...
#pragma omp parallel if(level->num_my_blocks>1)
{
int block;
int threadID=0;if(level->num_my_blocks>1)threadID = omp_get_thread_num();
double * __restrict__ flux_i = level->fluxes + (level->box_jStride)*(BLOCKCOPY_TILE_J+1)*( (threadID*4) + 0);
double * __restrict__ flux_j = level->fluxes + (level->box_jStride)*(BLOCKCOPY_TILE_J+1)*( (threadID*4) + 1);
double * __restrict__ flux_k = level->fluxes + (level->box_jStride)*(BLOCKCOPY_TILE_J+1)*( (threadID*4) + 2);
for(block=threadID;block<level->num_my_blocks;block+=level->num_threads){
const int box = level->my_blocks[block].read.box;
const int jlo = level->my_blocks[block].read.j;
const int klo = level->my_blocks[block].read.k;
const int idim = level->my_blocks[block].dim.i;
const int jdim = level->my_blocks[block].dim.j;
const int kdim = level->my_blocks[block].dim.k;
const int ghosts = level->my_boxes[box].ghosts;
const int jStride = level->my_boxes[box].jStride;
const int kStride = level->my_boxes[box].kStride;
const int flux_kStride = (BLOCKCOPY_TILE_J+1)*level->box_jStride;
const double * __restrict__ x = level->my_boxes[box].vectors[ x_id] + ghosts*(1+jStride+kStride) + (jlo*jStride + klo*kStride); // i.e. [0] = first non ghost zone point
const double * __restrict__ rhs = level->my_boxes[box].vectors[ rhs_id] + ghosts*(1+jStride+kStride) + (jlo*jStride + klo*kStride);
const double * __restrict__ alpha = level->my_boxes[box].vectors[VECTOR_ALPHA ] + ghosts*(1+jStride+kStride) + (jlo*jStride + klo*kStride);
const double * __restrict__ beta_i = level->my_boxes[box].vectors[VECTOR_BETA_I] + ghosts*(1+jStride+kStride) + (jlo*jStride + klo*kStride);
const double * __restrict__ beta_j = level->my_boxes[box].vectors[VECTOR_BETA_J] + ghosts*(1+jStride+kStride) + (jlo*jStride + klo*kStride);
const double * __restrict__ beta_k = level->my_boxes[box].vectors[VECTOR_BETA_K] + ghosts*(1+jStride+kStride) + (jlo*jStride + klo*kStride);
double * __restrict__ res = level->my_boxes[box].vectors[ res_id] + ghosts*(1+jStride+kStride) + (jlo*jStride + klo*kStride);
#ifdef __INTEL_COMPILER
__assume_aligned(x ,BOX_ALIGN_JSTRIDE*sizeof(double));
__assume_aligned(rhs ,BOX_ALIGN_JSTRIDE*sizeof(double));
__assume_aligned(alpha ,BOX_ALIGN_JSTRIDE*sizeof(double));
__assume_aligned(beta_i,BOX_ALIGN_JSTRIDE*sizeof(double));
__assume_aligned(beta_j,BOX_ALIGN_JSTRIDE*sizeof(double));
__assume_aligned(beta_k,BOX_ALIGN_JSTRIDE*sizeof(double));
__assume_aligned(res ,BOX_ALIGN_JSTRIDE*sizeof(double));
__assume_aligned(flux_i,BOX_ALIGN_JSTRIDE*sizeof(double));
__assume_aligned(flux_j,BOX_ALIGN_JSTRIDE*sizeof(double));
__assume_aligned(flux_k,BOX_ALIGN_JSTRIDE*sizeof(double));
__assume( (+jStride) % BOX_ALIGN_JSTRIDE == 0); // e.g. jStride%4==0 or jStride%8==0, hence x+jStride is aligned
__assume( (-jStride) % BOX_ALIGN_JSTRIDE == 0);
__assume( (+kStride) % BOX_ALIGN_KSTRIDE == 0);
__assume( (-kStride) % BOX_ALIGN_KSTRIDE == 0);
__assume(((jdim )*jStride) % BOX_ALIGN_JSTRIDE == 0);
__assume(((jdim+1)*jStride) % BOX_ALIGN_JSTRIDE == 0);
__assume( (flux_kStride) % BOX_ALIGN_JSTRIDE == 0);
#elif __xlC__
__alignx(BOX_ALIGN_JSTRIDE*sizeof(double), x );
__alignx(BOX_ALIGN_JSTRIDE*sizeof(double), rhs );
__alignx(BOX_ALIGN_JSTRIDE*sizeof(double), alpha );
__alignx(BOX_ALIGN_JSTRIDE*sizeof(double), beta_i);
__alignx(BOX_ALIGN_JSTRIDE*sizeof(double), beta_j);
__alignx(BOX_ALIGN_JSTRIDE*sizeof(double), beta_k);
__alignx(BOX_ALIGN_JSTRIDE*sizeof(double), res );
__alignx(BOX_ALIGN_JSTRIDE*sizeof(double), flux_i);
__alignx(BOX_ALIGN_JSTRIDE*sizeof(double), flux_j);
__alignx(BOX_ALIGN_JSTRIDE*sizeof(double), flux_k);
#endif
int i,j,k,ij;
for(k=0;k<kdim;k++){
double * __restrict__ flux_klo = flux_k + ((k )&0x1)*flux_kStride;
double * __restrict__ flux_khi = flux_k + ((k+1)&0x1)*flux_kStride;
#if (BLOCKCOPY_TILE_I != 10000)
#error operators.flux.c cannot block the unit stride dimension (BLOCKCOPY_TILE_I!=10000).
#endif
// calculate fluxes (pipeline flux_k)...
#if (_OPENMP>=201307)
#pragma omp simd aligned(beta_i,x,flux_i:BOX_ALIGN_JSTRIDE*sizeof(double))
#endif
for(ij=0;ij<jdim*jStride;ij++){ // flux_i for jdim pencils...
int ijk = ij + (k )*kStride;
flux_i[ ij] = beta_dxdi(x,ijk );
}
#if (_OPENMP>=201307)
#pragma omp simd aligned(beta_j,x,flux_j:BOX_ALIGN_JSTRIDE*sizeof(double))
#endif
for(ij=0;ij<(jdim+1)*jStride;ij++){ // flux_j for jdim+1 pencils...
int ijk = ij + (k )*kStride;
flux_j[ ij] = beta_dxdj(x,ijk );
}
if(k==0){ // startup / prolog for flux_k on jdim pencils...
#if (_OPENMP>=201307)
#pragma omp simd aligned(beta_k,x,flux_klo:BOX_ALIGN_JSTRIDE*sizeof(double))
#endif
for(ij=0;ij<jdim*jStride;ij++){
int ijk = ij + 0;
flux_klo[ij] = beta_dxdk(x,ijk);
}}
#if (_OPENMP>=201307)
#pragma omp simd aligned(beta_k,x,flux_khi:BOX_ALIGN_JSTRIDE*sizeof(double))
#endif
for(ij=0;ij<jdim*jStride;ij++){ // for flux_k on jdim pencils...
int ijk = ij + (k+1)*kStride;
flux_khi[ij] = beta_dxdk(x,ijk); // flux_k needs k+1
}
// residual...
#if (_OPENMP>=201307)
#pragma omp simd aligned(flux_i,flux_j,flux_klo,flux_khi,alpha,rhs,x,res:BOX_ALIGN_JSTRIDE*sizeof(double))
#endif
for(ij=0;ij<(jdim-1)*jStride+idim;ij++){
int ijk = ij + k*kStride;
double Lx = - flux_i[ ij] + flux_i[ ij+ 1]
- flux_j[ ij] + flux_j[ ij+jStride]
- flux_klo[ij] + flux_khi[ij ];
#ifdef USE_HELMHOLTZ
double Ax = a*alpha[ijk]*x[ijk] - b*Lx;
#else
double Ax = -b*Lx;
#endif
res[ijk] = rhs[ijk]-Ax;
}
} // kdim
} // block
} // omp
level->timers.residual += (double)(getTime()-_timeStart);
}
|
CCGSubSurf_legacy.c | /*
* ***** BEGIN GPL LICENSE BLOCK *****
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* ***** END GPL LICENSE BLOCK *****
*/
/** \file blender/blenkernel/intern/CCGSubSurf_legacy.c
* \ingroup bke
*/
#include "MEM_guardedalloc.h"
#include "BLI_sys_types.h" // for intptr_t support
#include "BLI_utildefines.h" /* for BLI_assert */
#include "BLI_math.h"
#include "CCGSubSurf.h"
#include "CCGSubSurf_intern.h"
#define FACE_calcIFNo(f, lvl, S, x, y, no) _face_calcIFNo(f, lvl, S, x, y, no, subdivLevels, vertDataSize)
/* TODO(sergey): Deduplicate the following functions/ */
static void *_edge_getCoVert(CCGEdge *e, CCGVert *v, int lvl, int x, int dataSize)
{
int levelBase = ccg_edgebase(lvl);
if (v == e->v0) {
return &EDGE_getLevelData(e)[dataSize * (levelBase + x)];
}
else {
return &EDGE_getLevelData(e)[dataSize * (levelBase + (1 << lvl) - x)];
}
}
/* *************************************************** */
static int _edge_isBoundary(const CCGEdge *e)
{
return e->numFaces < 2;
}
static int _vert_isBoundary(const CCGVert *v)
{
int i;
for (i = 0; i < v->numEdges; i++)
if (_edge_isBoundary(v->edges[i]))
return 1;
return 0;
}
static CCGVert *_edge_getOtherVert(CCGEdge *e, CCGVert *vQ)
{
if (vQ == e->v0) {
return e->v1;
}
else {
return e->v0;
}
}
static float *_face_getIFNoEdge(CCGFace *f,
CCGEdge *e,
int f_ed_idx,
int lvl,
int eX, int eY,
int levels,
int dataSize,
int normalDataOffset)
{
return (float *) ((byte *) ccg_face_getIFCoEdge(f, e, f_ed_idx, lvl, eX, eY, levels, dataSize) + normalDataOffset);
}
static void _face_calcIFNo(CCGFace *f,
int lvl,
int S,
int x, int y,
float no[3],
int levels,
int dataSize)
{
float *a = ccg_face_getIFCo(f, lvl, S, x + 0, y + 0, levels, dataSize);
float *b = ccg_face_getIFCo(f, lvl, S, x + 1, y + 0, levels, dataSize);
float *c = ccg_face_getIFCo(f, lvl, S, x + 1, y + 1, levels, dataSize);
float *d = ccg_face_getIFCo(f, lvl, S, x + 0, y + 1, levels, dataSize);
float a_cX = c[0] - a[0], a_cY = c[1] - a[1], a_cZ = c[2] - a[2];
float b_dX = d[0] - b[0], b_dY = d[1] - b[1], b_dZ = d[2] - b[2];
no[0] = b_dY * a_cZ - b_dZ * a_cY;
no[1] = b_dZ * a_cX - b_dX * a_cZ;
no[2] = b_dX * a_cY - b_dY * a_cX;
Normalize(no);
}
static int VERT_seam(const CCGVert *v)
{
return ((v->flags & Vert_eSeam) != 0);
}
static float EDGE_getSharpness(CCGEdge *e, int lvl)
{
if (!lvl)
return e->crease;
else if (!e->crease)
return 0.0f;
else if (e->crease - lvl < 0.0f)
return 0.0f;
else
return e->crease - lvl;
}
static void ccgSubSurf__calcVertNormals(CCGSubSurf *ss,
CCGVert **effectedV, CCGEdge **effectedE, CCGFace **effectedF,
int numEffectedV, int numEffectedE, int numEffectedF)
{
int i, ptrIdx;
int subdivLevels = ss->subdivLevels;
int lvl = ss->subdivLevels;
int edgeSize = ccg_edgesize(lvl);
int gridSize = ccg_gridsize(lvl);
int normalDataOffset = ss->normalDataOffset;
int vertDataSize = ss->meshIFC.vertDataSize;
#pragma omp parallel for private(ptrIdx) if (numEffectedF * edgeSize * edgeSize * 4 >= CCG_OMP_LIMIT)
for (ptrIdx = 0; ptrIdx < numEffectedF; ptrIdx++) {
CCGFace *f = (CCGFace *) effectedF[ptrIdx];
int S, x, y;
float no[3];
for (S = 0; S < f->numVerts; S++) {
for (y = 0; y < gridSize - 1; y++) {
for (x = 0; x < gridSize - 1; x++) {
NormZero(FACE_getIFNo(f, lvl, S, x, y));
}
}
if (FACE_getEdges(f)[(S - 1 + f->numVerts) % f->numVerts]->flags & Edge_eEffected) {
for (x = 0; x < gridSize - 1; x++) {
NormZero(FACE_getIFNo(f, lvl, S, x, gridSize - 1));
}
}
if (FACE_getEdges(f)[S]->flags & Edge_eEffected) {
for (y = 0; y < gridSize - 1; y++) {
NormZero(FACE_getIFNo(f, lvl, S, gridSize - 1, y));
}
}
if (FACE_getVerts(f)[S]->flags & Vert_eEffected) {
NormZero(FACE_getIFNo(f, lvl, S, gridSize - 1, gridSize - 1));
}
}
for (S = 0; S < f->numVerts; S++) {
int yLimit = !(FACE_getEdges(f)[(S - 1 + f->numVerts) % f->numVerts]->flags & Edge_eEffected);
int xLimit = !(FACE_getEdges(f)[S]->flags & Edge_eEffected);
int yLimitNext = xLimit;
int xLimitPrev = yLimit;
for (y = 0; y < gridSize - 1; y++) {
for (x = 0; x < gridSize - 1; x++) {
int xPlusOk = (!xLimit || x < gridSize - 2);
int yPlusOk = (!yLimit || y < gridSize - 2);
FACE_calcIFNo(f, lvl, S, x, y, no);
NormAdd(FACE_getIFNo(f, lvl, S, x + 0, y + 0), no);
if (xPlusOk)
NormAdd(FACE_getIFNo(f, lvl, S, x + 1, y + 0), no);
if (yPlusOk)
NormAdd(FACE_getIFNo(f, lvl, S, x + 0, y + 1), no);
if (xPlusOk && yPlusOk) {
if (x < gridSize - 2 || y < gridSize - 2 || FACE_getVerts(f)[S]->flags & Vert_eEffected) {
NormAdd(FACE_getIFNo(f, lvl, S, x + 1, y + 1), no);
}
}
if (x == 0 && y == 0) {
int K;
if (!yLimitNext || 1 < gridSize - 1)
NormAdd(FACE_getIFNo(f, lvl, (S + 1) % f->numVerts, 0, 1), no);
if (!xLimitPrev || 1 < gridSize - 1)
NormAdd(FACE_getIFNo(f, lvl, (S - 1 + f->numVerts) % f->numVerts, 1, 0), no);
for (K = 0; K < f->numVerts; K++) {
if (K != S) {
NormAdd(FACE_getIFNo(f, lvl, K, 0, 0), no);
}
}
}
else if (y == 0) {
NormAdd(FACE_getIFNo(f, lvl, (S + 1) % f->numVerts, 0, x), no);
if (!yLimitNext || x < gridSize - 2)
NormAdd(FACE_getIFNo(f, lvl, (S + 1) % f->numVerts, 0, x + 1), no);
}
else if (x == 0) {
NormAdd(FACE_getIFNo(f, lvl, (S - 1 + f->numVerts) % f->numVerts, y, 0), no);
if (!xLimitPrev || y < gridSize - 2)
NormAdd(FACE_getIFNo(f, lvl, (S - 1 + f->numVerts) % f->numVerts, y + 1, 0), no);
}
}
}
}
}
/* XXX can I reduce the number of normalisations here? */
for (ptrIdx = 0; ptrIdx < numEffectedV; ptrIdx++) {
CCGVert *v = (CCGVert *) effectedV[ptrIdx];
float *no = VERT_getNo(v, lvl);
NormZero(no);
for (i = 0; i < v->numFaces; i++) {
CCGFace *f = v->faces[i];
NormAdd(no, FACE_getIFNo(f, lvl, ccg_face_getVertIndex(f, v), gridSize - 1, gridSize - 1));
}
if (UNLIKELY(v->numFaces == 0)) {
NormCopy(no, VERT_getCo(v, lvl));
}
Normalize(no);
for (i = 0; i < v->numFaces; i++) {
CCGFace *f = v->faces[i];
NormCopy(FACE_getIFNo(f, lvl, ccg_face_getVertIndex(f, v), gridSize - 1, gridSize - 1), no);
}
}
for (ptrIdx = 0; ptrIdx < numEffectedE; ptrIdx++) {
CCGEdge *e = (CCGEdge *) effectedE[ptrIdx];
if (e->numFaces) {
CCGFace *fLast = e->faces[e->numFaces - 1];
int x;
for (i = 0; i < e->numFaces - 1; i++) {
CCGFace *f = e->faces[i];
const int f_ed_idx = ccg_face_getEdgeIndex(f, e);
const int f_ed_idx_last = ccg_face_getEdgeIndex(fLast, e);
for (x = 1; x < edgeSize - 1; x++) {
NormAdd(_face_getIFNoEdge(fLast, e, f_ed_idx_last, lvl, x, 0, subdivLevels, vertDataSize, normalDataOffset),
_face_getIFNoEdge(f, e, f_ed_idx, lvl, x, 0, subdivLevels, vertDataSize, normalDataOffset));
}
}
for (i = 0; i < e->numFaces - 1; i++) {
CCGFace *f = e->faces[i];
const int f_ed_idx = ccg_face_getEdgeIndex(f, e);
const int f_ed_idx_last = ccg_face_getEdgeIndex(fLast, e);
for (x = 1; x < edgeSize - 1; x++) {
NormCopy(_face_getIFNoEdge(f, e, f_ed_idx, lvl, x, 0, subdivLevels, vertDataSize, normalDataOffset),
_face_getIFNoEdge(fLast, e, f_ed_idx_last, lvl, x, 0, subdivLevels, vertDataSize, normalDataOffset));
}
}
}
}
#pragma omp parallel for private(ptrIdx) if (numEffectedF * edgeSize * edgeSize * 4 >= CCG_OMP_LIMIT)
for (ptrIdx = 0; ptrIdx < numEffectedF; ptrIdx++) {
CCGFace *f = (CCGFace *) effectedF[ptrIdx];
int S, x, y;
for (S = 0; S < f->numVerts; S++) {
NormCopy(FACE_getIFNo(f, lvl, (S + 1) % f->numVerts, 0, gridSize - 1),
FACE_getIFNo(f, lvl, S, gridSize - 1, 0));
}
for (S = 0; S < f->numVerts; S++) {
for (y = 0; y < gridSize; y++) {
for (x = 0; x < gridSize; x++) {
float *no = FACE_getIFNo(f, lvl, S, x, y);
Normalize(no);
}
}
VertDataCopy((float *)((byte *)FACE_getCenterData(f) + normalDataOffset),
FACE_getIFNo(f, lvl, S, 0, 0), ss);
for (x = 1; x < gridSize - 1; x++)
NormCopy(FACE_getIENo(f, lvl, S, x),
FACE_getIFNo(f, lvl, S, x, 0));
}
}
for (ptrIdx = 0; ptrIdx < numEffectedE; ptrIdx++) {
CCGEdge *e = (CCGEdge *) effectedE[ptrIdx];
if (e->numFaces) {
CCGFace *f = e->faces[0];
int x;
const int f_ed_idx = ccg_face_getEdgeIndex(f, e);
for (x = 0; x < edgeSize; x++)
NormCopy(EDGE_getNo(e, lvl, x),
_face_getIFNoEdge(f, e, f_ed_idx, lvl, x, 0, subdivLevels, vertDataSize, normalDataOffset));
}
else {
/* set to zero here otherwise the normals are uninitialized memory
* render: tests/animation/knight.blend with valgrind.
* we could be more clever and interpolate vertex normals but these are
* most likely not used so just zero out. */
int x;
for (x = 0; x < edgeSize; x++) {
float *no = EDGE_getNo(e, lvl, x);
NormCopy(no, EDGE_getCo(e, lvl, x));
Normalize(no);
}
}
}
}
static void ccgSubSurf__calcSubdivLevel(
CCGSubSurf *ss,
CCGVert **effectedV, CCGEdge **effectedE, CCGFace **effectedF,
const int numEffectedV, const int numEffectedE, const int numEffectedF, const int curLvl)
{
const int subdivLevels = ss->subdivLevels;
const int nextLvl = curLvl + 1;
int edgeSize = ccg_edgesize(curLvl);
int gridSize = ccg_gridsize(curLvl);
int ptrIdx, i;
int vertDataSize = ss->meshIFC.vertDataSize;
float *q = ss->q, *r = ss->r;
#pragma omp parallel for private(ptrIdx) if (numEffectedF * edgeSize * edgeSize * 4 >= CCG_OMP_LIMIT)
for (ptrIdx = 0; ptrIdx < numEffectedF; ptrIdx++) {
CCGFace *f = (CCGFace *) effectedF[ptrIdx];
int S, x, y;
/* interior face midpoints
* - old interior face points
*/
for (S = 0; S < f->numVerts; S++) {
for (y = 0; y < gridSize - 1; y++) {
for (x = 0; x < gridSize - 1; x++) {
int fx = 1 + 2 * x;
int fy = 1 + 2 * y;
const float *co0 = FACE_getIFCo(f, curLvl, S, x + 0, y + 0);
const float *co1 = FACE_getIFCo(f, curLvl, S, x + 1, y + 0);
const float *co2 = FACE_getIFCo(f, curLvl, S, x + 1, y + 1);
const float *co3 = FACE_getIFCo(f, curLvl, S, x + 0, y + 1);
float *co = FACE_getIFCo(f, nextLvl, S, fx, fy);
VertDataAvg4(co, co0, co1, co2, co3, ss);
}
}
}
/* interior edge midpoints
* - old interior edge points
* - new interior face midpoints
*/
for (S = 0; S < f->numVerts; S++) {
for (x = 0; x < gridSize - 1; x++) {
int fx = x * 2 + 1;
const float *co0 = FACE_getIECo(f, curLvl, S, x + 0);
const float *co1 = FACE_getIECo(f, curLvl, S, x + 1);
const float *co2 = FACE_getIFCo(f, nextLvl, (S + 1) % f->numVerts, 1, fx);
const float *co3 = FACE_getIFCo(f, nextLvl, S, fx, 1);
float *co = FACE_getIECo(f, nextLvl, S, fx);
VertDataAvg4(co, co0, co1, co2, co3, ss);
}
/* interior face interior edge midpoints
* - old interior face points
* - new interior face midpoints
*/
/* vertical */
for (x = 1; x < gridSize - 1; x++) {
for (y = 0; y < gridSize - 1; y++) {
int fx = x * 2;
int fy = y * 2 + 1;
const float *co0 = FACE_getIFCo(f, curLvl, S, x, y + 0);
const float *co1 = FACE_getIFCo(f, curLvl, S, x, y + 1);
const float *co2 = FACE_getIFCo(f, nextLvl, S, fx - 1, fy);
const float *co3 = FACE_getIFCo(f, nextLvl, S, fx + 1, fy);
float *co = FACE_getIFCo(f, nextLvl, S, fx, fy);
VertDataAvg4(co, co0, co1, co2, co3, ss);
}
}
/* horizontal */
for (y = 1; y < gridSize - 1; y++) {
for (x = 0; x < gridSize - 1; x++) {
int fx = x * 2 + 1;
int fy = y * 2;
const float *co0 = FACE_getIFCo(f, curLvl, S, x + 0, y);
const float *co1 = FACE_getIFCo(f, curLvl, S, x + 1, y);
const float *co2 = FACE_getIFCo(f, nextLvl, S, fx, fy - 1);
const float *co3 = FACE_getIFCo(f, nextLvl, S, fx, fy + 1);
float *co = FACE_getIFCo(f, nextLvl, S, fx, fy);
VertDataAvg4(co, co0, co1, co2, co3, ss);
}
}
}
}
/* exterior edge midpoints
* - old exterior edge points
* - new interior face midpoints
*/
for (ptrIdx = 0; ptrIdx < numEffectedE; ptrIdx++) {
CCGEdge *e = (CCGEdge *) effectedE[ptrIdx];
float sharpness = EDGE_getSharpness(e, curLvl);
int x, j;
if (_edge_isBoundary(e) || sharpness > 1.0f) {
for (x = 0; x < edgeSize - 1; x++) {
int fx = x * 2 + 1;
const float *co0 = EDGE_getCo(e, curLvl, x + 0);
const float *co1 = EDGE_getCo(e, curLvl, x + 1);
float *co = EDGE_getCo(e, nextLvl, fx);
VertDataCopy(co, co0, ss);
VertDataAdd(co, co1, ss);
VertDataMulN(co, 0.5f, ss);
}
}
else {
for (x = 0; x < edgeSize - 1; x++) {
int fx = x * 2 + 1;
const float *co0 = EDGE_getCo(e, curLvl, x + 0);
const float *co1 = EDGE_getCo(e, curLvl, x + 1);
float *co = EDGE_getCo(e, nextLvl, fx);
int numFaces = 0;
VertDataCopy(q, co0, ss);
VertDataAdd(q, co1, ss);
for (j = 0; j < e->numFaces; j++) {
CCGFace *f = e->faces[j];
const int f_ed_idx = ccg_face_getEdgeIndex(f, e);
VertDataAdd(q, ccg_face_getIFCoEdge(f, e, f_ed_idx, nextLvl, fx, 1, subdivLevels, vertDataSize), ss);
numFaces++;
}
VertDataMulN(q, 1.0f / (2.0f + numFaces), ss);
VertDataCopy(r, co0, ss);
VertDataAdd(r, co1, ss);
VertDataMulN(r, 0.5f, ss);
VertDataCopy(co, q, ss);
VertDataSub(r, q, ss);
VertDataMulN(r, sharpness, ss);
VertDataAdd(co, r, ss);
}
}
}
/* exterior vertex shift
* - old vertex points (shifting)
* - old exterior edge points
* - new interior face midpoints
*/
for (ptrIdx = 0; ptrIdx < numEffectedV; ptrIdx++) {
CCGVert *v = (CCGVert *) effectedV[ptrIdx];
const float *co = VERT_getCo(v, curLvl);
float *nCo = VERT_getCo(v, nextLvl);
int sharpCount = 0, allSharp = 1;
float avgSharpness = 0.0;
int j, seam = VERT_seam(v), seamEdges = 0;
for (j = 0; j < v->numEdges; j++) {
CCGEdge *e = v->edges[j];
float sharpness = EDGE_getSharpness(e, curLvl);
if (seam && _edge_isBoundary(e))
seamEdges++;
if (sharpness != 0.0f) {
sharpCount++;
avgSharpness += sharpness;
}
else {
allSharp = 0;
}
}
if (sharpCount) {
avgSharpness /= sharpCount;
if (avgSharpness > 1.0f) {
avgSharpness = 1.0f;
}
}
if (seamEdges < 2 || seamEdges != v->numEdges)
seam = 0;
if (!v->numEdges || ss->meshIFC.simpleSubdiv) {
VertDataCopy(nCo, co, ss);
}
else if (_vert_isBoundary(v)) {
int numBoundary = 0;
VertDataZero(r, ss);
for (j = 0; j < v->numEdges; j++) {
CCGEdge *e = v->edges[j];
if (_edge_isBoundary(e)) {
VertDataAdd(r, _edge_getCoVert(e, v, curLvl, 1, vertDataSize), ss);
numBoundary++;
}
}
VertDataCopy(nCo, co, ss);
VertDataMulN(nCo, 0.75f, ss);
VertDataMulN(r, 0.25f / numBoundary, ss);
VertDataAdd(nCo, r, ss);
}
else {
const int cornerIdx = (1 + (1 << (curLvl))) - 2;
int numEdges = 0, numFaces = 0;
VertDataZero(q, ss);
for (j = 0; j < v->numFaces; j++) {
CCGFace *f = v->faces[j];
VertDataAdd(q, FACE_getIFCo(f, nextLvl, ccg_face_getVertIndex(f, v), cornerIdx, cornerIdx), ss);
numFaces++;
}
VertDataMulN(q, 1.0f / numFaces, ss);
VertDataZero(r, ss);
for (j = 0; j < v->numEdges; j++) {
CCGEdge *e = v->edges[j];
VertDataAdd(r, _edge_getCoVert(e, v, curLvl, 1, vertDataSize), ss);
numEdges++;
}
VertDataMulN(r, 1.0f / numEdges, ss);
VertDataCopy(nCo, co, ss);
VertDataMulN(nCo, numEdges - 2.0f, ss);
VertDataAdd(nCo, q, ss);
VertDataAdd(nCo, r, ss);
VertDataMulN(nCo, 1.0f / numEdges, ss);
}
if ((sharpCount > 1 && v->numFaces) || seam) {
VertDataZero(q, ss);
if (seam) {
avgSharpness = 1.0f;
sharpCount = seamEdges;
allSharp = 1;
}
for (j = 0; j < v->numEdges; j++) {
CCGEdge *e = v->edges[j];
float sharpness = EDGE_getSharpness(e, curLvl);
if (seam) {
if (_edge_isBoundary(e))
VertDataAdd(q, _edge_getCoVert(e, v, curLvl, 1, vertDataSize), ss);
}
else if (sharpness != 0.0f) {
VertDataAdd(q, _edge_getCoVert(e, v, curLvl, 1, vertDataSize), ss);
}
}
VertDataMulN(q, (float) 1 / sharpCount, ss);
if (sharpCount != 2 || allSharp) {
/* q = q + (co - q) * avgSharpness */
VertDataCopy(r, co, ss);
VertDataSub(r, q, ss);
VertDataMulN(r, avgSharpness, ss);
VertDataAdd(q, r, ss);
}
/* r = co * 0.75 + q * 0.25 */
VertDataCopy(r, co, ss);
VertDataMulN(r, 0.75f, ss);
VertDataMulN(q, 0.25f, ss);
VertDataAdd(r, q, ss);
/* nCo = nCo + (r - nCo) * avgSharpness */
VertDataSub(r, nCo, ss);
VertDataMulN(r, avgSharpness, ss);
VertDataAdd(nCo, r, ss);
}
}
/* exterior edge interior shift
* - old exterior edge midpoints (shifting)
* - old exterior edge midpoints
* - new interior face midpoints
*/
for (ptrIdx = 0; ptrIdx < numEffectedE; ptrIdx++) {
CCGEdge *e = (CCGEdge *) effectedE[ptrIdx];
float sharpness = EDGE_getSharpness(e, curLvl);
int sharpCount = 0;
float avgSharpness = 0.0;
int x, j;
if (sharpness != 0.0f) {
sharpCount = 2;
avgSharpness += sharpness;
if (avgSharpness > 1.0f) {
avgSharpness = 1.0f;
}
}
else {
sharpCount = 0;
avgSharpness = 0;
}
if (_edge_isBoundary(e)) {
for (x = 1; x < edgeSize - 1; x++) {
int fx = x * 2;
const float *co = EDGE_getCo(e, curLvl, x);
float *nCo = EDGE_getCo(e, nextLvl, fx);
/* Average previous level's endpoints */
VertDataCopy(r, EDGE_getCo(e, curLvl, x - 1), ss);
VertDataAdd(r, EDGE_getCo(e, curLvl, x + 1), ss);
VertDataMulN(r, 0.5f, ss);
/* nCo = nCo * 0.75 + r * 0.25 */
VertDataCopy(nCo, co, ss);
VertDataMulN(nCo, 0.75f, ss);
VertDataMulN(r, 0.25f, ss);
VertDataAdd(nCo, r, ss);
}
}
else {
for (x = 1; x < edgeSize - 1; x++) {
int fx = x * 2;
const float *co = EDGE_getCo(e, curLvl, x);
float *nCo = EDGE_getCo(e, nextLvl, fx);
int numFaces = 0;
VertDataZero(q, ss);
VertDataZero(r, ss);
VertDataAdd(r, EDGE_getCo(e, curLvl, x - 1), ss);
VertDataAdd(r, EDGE_getCo(e, curLvl, x + 1), ss);
for (j = 0; j < e->numFaces; j++) {
CCGFace *f = e->faces[j];
int f_ed_idx = ccg_face_getEdgeIndex(f, e);
VertDataAdd(q, ccg_face_getIFCoEdge(f, e, f_ed_idx, nextLvl, fx - 1, 1, subdivLevels, vertDataSize), ss);
VertDataAdd(q, ccg_face_getIFCoEdge(f, e, f_ed_idx, nextLvl, fx + 1, 1, subdivLevels, vertDataSize), ss);
VertDataAdd(r, ccg_face_getIFCoEdge(f, e, f_ed_idx, curLvl, x, 1, subdivLevels, vertDataSize), ss);
numFaces++;
}
VertDataMulN(q, 1.0f / (numFaces * 2.0f), ss);
VertDataMulN(r, 1.0f / (2.0f + numFaces), ss);
VertDataCopy(nCo, co, ss);
VertDataMulN(nCo, (float) numFaces, ss);
VertDataAdd(nCo, q, ss);
VertDataAdd(nCo, r, ss);
VertDataMulN(nCo, 1.0f / (2 + numFaces), ss);
if (sharpCount == 2) {
VertDataCopy(q, co, ss);
VertDataMulN(q, 6.0f, ss);
VertDataAdd(q, EDGE_getCo(e, curLvl, x - 1), ss);
VertDataAdd(q, EDGE_getCo(e, curLvl, x + 1), ss);
VertDataMulN(q, 1 / 8.0f, ss);
VertDataSub(q, nCo, ss);
VertDataMulN(q, avgSharpness, ss);
VertDataAdd(nCo, q, ss);
}
}
}
}
#pragma omp parallel private(ptrIdx) if (numEffectedF * edgeSize * edgeSize * 4 >= CCG_OMP_LIMIT)
{
float *q_thread, *r_thread;
#pragma omp critical
{
q_thread = MEM_mallocN(ss->meshIFC.vertDataSize, "CCGSubsurf q");
r_thread = MEM_mallocN(ss->meshIFC.vertDataSize, "CCGSubsurf r");
}
#pragma omp for schedule(static)
for (ptrIdx = 0; ptrIdx < numEffectedF; ptrIdx++) {
CCGFace *f = (CCGFace *) effectedF[ptrIdx];
int S, x, y;
/* interior center point shift
* - old face center point (shifting)
* - old interior edge points
* - new interior face midpoints
*/
VertDataZero(q_thread, ss);
for (S = 0; S < f->numVerts; S++) {
VertDataAdd(q_thread, FACE_getIFCo(f, nextLvl, S, 1, 1), ss);
}
VertDataMulN(q_thread, 1.0f / f->numVerts, ss);
VertDataZero(r_thread, ss);
for (S = 0; S < f->numVerts; S++) {
VertDataAdd(r_thread, FACE_getIECo(f, curLvl, S, 1), ss);
}
VertDataMulN(r_thread, 1.0f / f->numVerts, ss);
VertDataMulN((float *)FACE_getCenterData(f), f->numVerts - 2.0f, ss);
VertDataAdd((float *)FACE_getCenterData(f), q_thread, ss);
VertDataAdd((float *)FACE_getCenterData(f), r_thread, ss);
VertDataMulN((float *)FACE_getCenterData(f), 1.0f / f->numVerts, ss);
for (S = 0; S < f->numVerts; S++) {
/* interior face shift
* - old interior face point (shifting)
* - new interior edge midpoints
* - new interior face midpoints
*/
for (x = 1; x < gridSize - 1; x++) {
for (y = 1; y < gridSize - 1; y++) {
int fx = x * 2;
int fy = y * 2;
const float *co = FACE_getIFCo(f, curLvl, S, x, y);
float *nCo = FACE_getIFCo(f, nextLvl, S, fx, fy);
VertDataAvg4(q_thread,
FACE_getIFCo(f, nextLvl, S, fx - 1, fy - 1),
FACE_getIFCo(f, nextLvl, S, fx + 1, fy - 1),
FACE_getIFCo(f, nextLvl, S, fx + 1, fy + 1),
FACE_getIFCo(f, nextLvl, S, fx - 1, fy + 1),
ss);
VertDataAvg4(r_thread,
FACE_getIFCo(f, nextLvl, S, fx - 1, fy + 0),
FACE_getIFCo(f, nextLvl, S, fx + 1, fy + 0),
FACE_getIFCo(f, nextLvl, S, fx + 0, fy - 1),
FACE_getIFCo(f, nextLvl, S, fx + 0, fy + 1),
ss);
VertDataCopy(nCo, co, ss);
VertDataSub(nCo, q_thread, ss);
VertDataMulN(nCo, 0.25f, ss);
VertDataAdd(nCo, r_thread, ss);
}
}
/* interior edge interior shift
* - old interior edge point (shifting)
* - new interior edge midpoints
* - new interior face midpoints
*/
for (x = 1; x < gridSize - 1; x++) {
int fx = x * 2;
const float *co = FACE_getIECo(f, curLvl, S, x);
float *nCo = FACE_getIECo(f, nextLvl, S, fx);
VertDataAvg4(q_thread,
FACE_getIFCo(f, nextLvl, (S + 1) % f->numVerts, 1, fx - 1),
FACE_getIFCo(f, nextLvl, (S + 1) % f->numVerts, 1, fx + 1),
FACE_getIFCo(f, nextLvl, S, fx + 1, +1),
FACE_getIFCo(f, nextLvl, S, fx - 1, +1), ss);
VertDataAvg4(r_thread,
FACE_getIECo(f, nextLvl, S, fx - 1),
FACE_getIECo(f, nextLvl, S, fx + 1),
FACE_getIFCo(f, nextLvl, (S + 1) % f->numVerts, 1, fx),
FACE_getIFCo(f, nextLvl, S, fx, 1),
ss);
VertDataCopy(nCo, co, ss);
VertDataSub(nCo, q_thread, ss);
VertDataMulN(nCo, 0.25f, ss);
VertDataAdd(nCo, r_thread, ss);
}
}
}
#pragma omp critical
{
MEM_freeN(q_thread);
MEM_freeN(r_thread);
}
}
/* copy down */
edgeSize = ccg_edgesize(nextLvl);
gridSize = ccg_gridsize(nextLvl);
const int cornerIdx = gridSize - 1;
#pragma omp parallel for private(i) if (numEffectedF * edgeSize * edgeSize * 4 >= CCG_OMP_LIMIT)
for (i = 0; i < numEffectedE; i++) {
CCGEdge *e = effectedE[i];
VertDataCopy(EDGE_getCo(e, nextLvl, 0), VERT_getCo(e->v0, nextLvl), ss);
VertDataCopy(EDGE_getCo(e, nextLvl, edgeSize - 1), VERT_getCo(e->v1, nextLvl), ss);
}
#pragma omp parallel for private(i) if (numEffectedF * edgeSize * edgeSize * 4 >= CCG_OMP_LIMIT)
for (i = 0; i < numEffectedF; i++) {
CCGFace *f = effectedF[i];
int S, x;
for (S = 0; S < f->numVerts; S++) {
CCGEdge *e = FACE_getEdges(f)[S];
CCGEdge *prevE = FACE_getEdges(f)[(S + f->numVerts - 1) % f->numVerts];
VertDataCopy(FACE_getIFCo(f, nextLvl, S, 0, 0), (float *)FACE_getCenterData(f), ss);
VertDataCopy(FACE_getIECo(f, nextLvl, S, 0), (float *)FACE_getCenterData(f), ss);
VertDataCopy(FACE_getIFCo(f, nextLvl, S, cornerIdx, cornerIdx), VERT_getCo(FACE_getVerts(f)[S], nextLvl), ss);
VertDataCopy(FACE_getIECo(f, nextLvl, S, cornerIdx), EDGE_getCo(FACE_getEdges(f)[S], nextLvl, cornerIdx), ss);
for (x = 1; x < gridSize - 1; x++) {
float *co = FACE_getIECo(f, nextLvl, S, x);
VertDataCopy(FACE_getIFCo(f, nextLvl, S, x, 0), co, ss);
VertDataCopy(FACE_getIFCo(f, nextLvl, (S + 1) % f->numVerts, 0, x), co, ss);
}
for (x = 0; x < gridSize - 1; x++) {
int eI = gridSize - 1 - x;
VertDataCopy(FACE_getIFCo(f, nextLvl, S, cornerIdx, x), _edge_getCoVert(e, FACE_getVerts(f)[S], nextLvl, eI, vertDataSize), ss);
VertDataCopy(FACE_getIFCo(f, nextLvl, S, x, cornerIdx), _edge_getCoVert(prevE, FACE_getVerts(f)[S], nextLvl, eI, vertDataSize), ss);
}
}
}
}
void ccgSubSurf__sync_legacy(CCGSubSurf *ss)
{
CCGVert **effectedV;
CCGEdge **effectedE;
CCGFace **effectedF;
int numEffectedV, numEffectedE, numEffectedF;
int subdivLevels = ss->subdivLevels;
int vertDataSize = ss->meshIFC.vertDataSize;
int i, j, ptrIdx, S;
int curLvl, nextLvl;
void *q = ss->q, *r = ss->r;
effectedV = MEM_mallocN(sizeof(*effectedV) * ss->vMap->numEntries, "CCGSubsurf effectedV");
effectedE = MEM_mallocN(sizeof(*effectedE) * ss->eMap->numEntries, "CCGSubsurf effectedE");
effectedF = MEM_mallocN(sizeof(*effectedF) * ss->fMap->numEntries, "CCGSubsurf effectedF");
numEffectedV = numEffectedE = numEffectedF = 0;
for (i = 0; i < ss->vMap->curSize; i++) {
CCGVert *v = (CCGVert *) ss->vMap->buckets[i];
for (; v; v = v->next) {
if (v->flags & Vert_eEffected) {
effectedV[numEffectedV++] = v;
for (j = 0; j < v->numEdges; j++) {
CCGEdge *e = v->edges[j];
if (!(e->flags & Edge_eEffected)) {
effectedE[numEffectedE++] = e;
e->flags |= Edge_eEffected;
}
}
for (j = 0; j < v->numFaces; j++) {
CCGFace *f = v->faces[j];
if (!(f->flags & Face_eEffected)) {
effectedF[numEffectedF++] = f;
f->flags |= Face_eEffected;
}
}
}
}
}
curLvl = 0;
nextLvl = curLvl + 1;
for (ptrIdx = 0; ptrIdx < numEffectedF; ptrIdx++) {
CCGFace *f = effectedF[ptrIdx];
void *co = FACE_getCenterData(f);
VertDataZero(co, ss);
for (i = 0; i < f->numVerts; i++) {
VertDataAdd(co, VERT_getCo(FACE_getVerts(f)[i], curLvl), ss);
}
VertDataMulN(co, 1.0f / f->numVerts, ss);
f->flags = 0;
}
for (ptrIdx = 0; ptrIdx < numEffectedE; ptrIdx++) {
CCGEdge *e = effectedE[ptrIdx];
void *co = EDGE_getCo(e, nextLvl, 1);
float sharpness = EDGE_getSharpness(e, curLvl);
if (_edge_isBoundary(e) || sharpness >= 1.0f) {
VertDataCopy(co, VERT_getCo(e->v0, curLvl), ss);
VertDataAdd(co, VERT_getCo(e->v1, curLvl), ss);
VertDataMulN(co, 0.5f, ss);
}
else {
int numFaces = 0;
VertDataCopy(q, VERT_getCo(e->v0, curLvl), ss);
VertDataAdd(q, VERT_getCo(e->v1, curLvl), ss);
for (i = 0; i < e->numFaces; i++) {
CCGFace *f = e->faces[i];
VertDataAdd(q, (float *)FACE_getCenterData(f), ss);
numFaces++;
}
VertDataMulN(q, 1.0f / (2.0f + numFaces), ss);
VertDataCopy(r, VERT_getCo(e->v0, curLvl), ss);
VertDataAdd(r, VERT_getCo(e->v1, curLvl), ss);
VertDataMulN(r, 0.5f, ss);
VertDataCopy(co, q, ss);
VertDataSub(r, q, ss);
VertDataMulN(r, sharpness, ss);
VertDataAdd(co, r, ss);
}
/* edge flags cleared later */
}
for (ptrIdx = 0; ptrIdx < numEffectedV; ptrIdx++) {
CCGVert *v = effectedV[ptrIdx];
void *co = VERT_getCo(v, curLvl);
void *nCo = VERT_getCo(v, nextLvl);
int sharpCount = 0, allSharp = 1;
float avgSharpness = 0.0;
int seam = VERT_seam(v), seamEdges = 0;
for (i = 0; i < v->numEdges; i++) {
CCGEdge *e = v->edges[i];
float sharpness = EDGE_getSharpness(e, curLvl);
if (seam && _edge_isBoundary(e))
seamEdges++;
if (sharpness != 0.0f) {
sharpCount++;
avgSharpness += sharpness;
}
else {
allSharp = 0;
}
}
if (sharpCount) {
avgSharpness /= sharpCount;
if (avgSharpness > 1.0f) {
avgSharpness = 1.0f;
}
}
if (seamEdges < 2 || seamEdges != v->numEdges)
seam = 0;
if (!v->numEdges || ss->meshIFC.simpleSubdiv) {
VertDataCopy(nCo, co, ss);
}
else if (_vert_isBoundary(v)) {
int numBoundary = 0;
VertDataZero(r, ss);
for (i = 0; i < v->numEdges; i++) {
CCGEdge *e = v->edges[i];
if (_edge_isBoundary(e)) {
VertDataAdd(r, VERT_getCo(_edge_getOtherVert(e, v), curLvl), ss);
numBoundary++;
}
}
VertDataCopy(nCo, co, ss);
VertDataMulN(nCo, 0.75f, ss);
VertDataMulN(r, 0.25f / numBoundary, ss);
VertDataAdd(nCo, r, ss);
}
else {
int numEdges = 0, numFaces = 0;
VertDataZero(q, ss);
for (i = 0; i < v->numFaces; i++) {
CCGFace *f = v->faces[i];
VertDataAdd(q, (float *)FACE_getCenterData(f), ss);
numFaces++;
}
VertDataMulN(q, 1.0f / numFaces, ss);
VertDataZero(r, ss);
for (i = 0; i < v->numEdges; i++) {
CCGEdge *e = v->edges[i];
VertDataAdd(r, VERT_getCo(_edge_getOtherVert(e, v), curLvl), ss);
numEdges++;
}
VertDataMulN(r, 1.0f / numEdges, ss);
VertDataCopy(nCo, co, ss);
VertDataMulN(nCo, numEdges - 2.0f, ss);
VertDataAdd(nCo, q, ss);
VertDataAdd(nCo, r, ss);
VertDataMulN(nCo, 1.0f / numEdges, ss);
}
if (sharpCount > 1 || seam) {
VertDataZero(q, ss);
if (seam) {
avgSharpness = 1.0f;
sharpCount = seamEdges;
allSharp = 1;
}
for (i = 0; i < v->numEdges; i++) {
CCGEdge *e = v->edges[i];
float sharpness = EDGE_getSharpness(e, curLvl);
if (seam) {
if (_edge_isBoundary(e)) {
CCGVert *oV = _edge_getOtherVert(e, v);
VertDataAdd(q, VERT_getCo(oV, curLvl), ss);
}
}
else if (sharpness != 0.0f) {
CCGVert *oV = _edge_getOtherVert(e, v);
VertDataAdd(q, VERT_getCo(oV, curLvl), ss);
}
}
VertDataMulN(q, (float) 1 / sharpCount, ss);
if (sharpCount != 2 || allSharp) {
/* q = q + (co - q) * avgSharpness */
VertDataCopy(r, co, ss);
VertDataSub(r, q, ss);
VertDataMulN(r, avgSharpness, ss);
VertDataAdd(q, r, ss);
}
/* r = co * 0.75 + q * 0.25 */
VertDataCopy(r, co, ss);
VertDataMulN(r, 0.75f, ss);
VertDataMulN(q, 0.25f, ss);
VertDataAdd(r, q, ss);
/* nCo = nCo + (r - nCo) * avgSharpness */
VertDataSub(r, nCo, ss);
VertDataMulN(r, avgSharpness, ss);
VertDataAdd(nCo, r, ss);
}
/* vert flags cleared later */
}
if (ss->useAgeCounts) {
for (i = 0; i < numEffectedV; i++) {
CCGVert *v = effectedV[i];
byte *userData = ccgSubSurf_getVertUserData(ss, v);
*((int *) &userData[ss->vertUserAgeOffset]) = ss->currentAge;
}
for (i = 0; i < numEffectedE; i++) {
CCGEdge *e = effectedE[i];
byte *userData = ccgSubSurf_getEdgeUserData(ss, e);
*((int *) &userData[ss->edgeUserAgeOffset]) = ss->currentAge;
}
for (i = 0; i < numEffectedF; i++) {
CCGFace *f = effectedF[i];
byte *userData = ccgSubSurf_getFaceUserData(ss, f);
*((int *) &userData[ss->faceUserAgeOffset]) = ss->currentAge;
}
}
for (i = 0; i < numEffectedE; i++) {
CCGEdge *e = effectedE[i];
VertDataCopy(EDGE_getCo(e, nextLvl, 0), VERT_getCo(e->v0, nextLvl), ss);
VertDataCopy(EDGE_getCo(e, nextLvl, 2), VERT_getCo(e->v1, nextLvl), ss);
}
for (i = 0; i < numEffectedF; i++) {
CCGFace *f = effectedF[i];
for (S = 0; S < f->numVerts; S++) {
CCGEdge *e = FACE_getEdges(f)[S];
CCGEdge *prevE = FACE_getEdges(f)[(S + f->numVerts - 1) % f->numVerts];
VertDataCopy(FACE_getIFCo(f, nextLvl, S, 0, 0), (float *)FACE_getCenterData(f), ss);
VertDataCopy(FACE_getIECo(f, nextLvl, S, 0), (float *)FACE_getCenterData(f), ss);
VertDataCopy(FACE_getIFCo(f, nextLvl, S, 1, 1), VERT_getCo(FACE_getVerts(f)[S], nextLvl), ss);
VertDataCopy(FACE_getIECo(f, nextLvl, S, 1), EDGE_getCo(FACE_getEdges(f)[S], nextLvl, 1), ss);
VertDataCopy(FACE_getIFCo(f, nextLvl, S, 1, 0), _edge_getCoVert(e, FACE_getVerts(f)[S], nextLvl, 1, vertDataSize), ss);
VertDataCopy(FACE_getIFCo(f, nextLvl, S, 0, 1), _edge_getCoVert(prevE, FACE_getVerts(f)[S], nextLvl, 1, vertDataSize), ss);
}
}
for (curLvl = 1; curLvl < subdivLevels; curLvl++)
ccgSubSurf__calcSubdivLevel(ss,
effectedV, effectedE, effectedF,
numEffectedV, numEffectedE, numEffectedF, curLvl);
if (ss->calcVertNormals)
ccgSubSurf__calcVertNormals(ss,
effectedV, effectedE, effectedF,
numEffectedV, numEffectedE, numEffectedF);
for (ptrIdx = 0; ptrIdx < numEffectedV; ptrIdx++) {
CCGVert *v = effectedV[ptrIdx];
v->flags = 0;
}
for (ptrIdx = 0; ptrIdx < numEffectedE; ptrIdx++) {
CCGEdge *e = effectedE[ptrIdx];
e->flags = 0;
}
MEM_freeN(effectedF);
MEM_freeN(effectedE);
MEM_freeN(effectedV);
#ifdef DUMP_RESULT_GRIDS
ccgSubSurf__dumpCoords(ss);
#endif
}
/* ** Public API exposed to other areas which depends on old CCG code. ** */
/* Update normals for specified faces. */
CCGError ccgSubSurf_updateNormals(CCGSubSurf *ss, CCGFace **effectedF, int numEffectedF)
{
CCGVert **effectedV;
CCGEdge **effectedE;
int i, numEffectedV, numEffectedE, freeF;
ccgSubSurf__allFaces(ss, &effectedF, &numEffectedF, &freeF);
ccgSubSurf__effectedFaceNeighbours(ss, effectedF, numEffectedF,
&effectedV, &numEffectedV, &effectedE, &numEffectedE);
if (ss->calcVertNormals)
ccgSubSurf__calcVertNormals(ss,
effectedV, effectedE, effectedF,
numEffectedV, numEffectedE, numEffectedF);
for (i = 0; i < numEffectedV; i++)
effectedV[i]->flags = 0;
for (i = 0; i < numEffectedE; i++)
effectedE[i]->flags = 0;
for (i = 0; i < numEffectedF; i++)
effectedF[i]->flags = 0;
MEM_freeN(effectedE);
MEM_freeN(effectedV);
if (freeF) MEM_freeN(effectedF);
return eCCGError_None;
}
/* compute subdivision levels from a given starting point, used by
* multires subdivide/propagate, by filling in coordinates at a
* certain level, and then subdividing that up to the highest level */
CCGError ccgSubSurf_updateLevels(CCGSubSurf *ss, int lvl, CCGFace **effectedF, int numEffectedF)
{
CCGVert **effectedV;
CCGEdge **effectedE;
int numEffectedV, numEffectedE, freeF, i;
int curLvl, subdivLevels = ss->subdivLevels;
ccgSubSurf__allFaces(ss, &effectedF, &numEffectedF, &freeF);
ccgSubSurf__effectedFaceNeighbours(ss, effectedF, numEffectedF,
&effectedV, &numEffectedV, &effectedE, &numEffectedE);
for (curLvl = lvl; curLvl < subdivLevels; curLvl++) {
ccgSubSurf__calcSubdivLevel(ss,
effectedV, effectedE, effectedF,
numEffectedV, numEffectedE, numEffectedF, curLvl);
}
for (i = 0; i < numEffectedV; i++)
effectedV[i]->flags = 0;
for (i = 0; i < numEffectedE; i++)
effectedE[i]->flags = 0;
for (i = 0; i < numEffectedF; i++)
effectedF[i]->flags = 0;
MEM_freeN(effectedE);
MEM_freeN(effectedV);
if (freeF) MEM_freeN(effectedF);
return eCCGError_None;
}
|
omp_loop.h | // -*- C++ -*-
// Copyright (C) 2007-2013 Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the terms
// of the GNU General Public License as published by the Free Software
// Foundation; either version 3, or (at your option) any later
// version.
// This library is distributed in the hope that it will be useful, but
// WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// General Public License for more details.
// Under Section 7 of GPL version 3, you are granted additional
// permissions described in the GCC Runtime Library Exception, version
// 3.1, as published by the Free Software Foundation.
// You should have received a copy of the GNU General Public License and
// a copy of the GCC Runtime Library Exception along with this program;
// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
// <http://www.gnu.org/licenses/>.
/** @file parallel/omp_loop.h
* @brief Parallelization of embarrassingly parallel execution by
* means of an OpenMP for loop.
* This file is a GNU parallel extension to the Standard C++ Library.
*/
// Written by Felix Putze.
#ifndef _GLIBCXX_PARALLEL_OMP_LOOP_H
#define _GLIBCXX_PARALLEL_OMP_LOOP_H 1
#include <omp.h>
#include <parallel/settings.h>
#include <parallel/basic_iterator.h>
#include <parallel/base.h>
namespace __gnu_parallel
{
/** @brief Embarrassingly parallel algorithm for random access
* iterators, using an OpenMP for loop.
*
* @param __begin Begin iterator of element sequence.
* @param __end End iterator of element sequence.
* @param __o User-supplied functor (comparator, predicate, adding
* functor, etc.).
* @param __f Functor to @a process an element with __op (depends on
* desired functionality, e. g. for std::for_each(), ...).
* @param __r Functor to @a add a single __result to the already
* processed elements (depends on functionality).
* @param __base Base value for reduction.
* @param __output Pointer to position where final result is written to
* @param __bound Maximum number of elements processed (e. g. for
* std::count_n()).
* @return User-supplied functor (that may contain a part of the result).
*/
template<typename _RAIter,
typename _Op,
typename _Fu,
typename _Red,
typename _Result>
_Op
__for_each_template_random_access_omp_loop(_RAIter __begin, _RAIter __end,
_Op __o, _Fu& __f, _Red __r,
_Result __base,
_Result& __output,
typename std::iterator_traits<_RAIter>::difference_type __bound)
{
typedef typename std::iterator_traits<_RAIter>::difference_type
_DifferenceType;
_DifferenceType __length = __end - __begin;
_ThreadIndex __num_threads = __gnu_parallel::min<_DifferenceType>
(__get_max_threads(), __length);
_Result *__thread_results;
# pragma omp parallel num_threads(__num_threads)
{
# pragma omp single
{
__num_threads = omp_get_num_threads();
__thread_results = new _Result[__num_threads];
for (_ThreadIndex __i = 0; __i < __num_threads; ++__i)
__thread_results[__i] = _Result();
}
_ThreadIndex __iam = omp_get_thread_num();
#pragma omp for schedule(dynamic, _Settings::get().workstealing_chunk_size)
for (_DifferenceType __pos = 0; __pos < __length; ++__pos)
__thread_results[__iam] = __r(__thread_results[__iam],
__f(__o, __begin+__pos));
} //parallel
for (_ThreadIndex __i = 0; __i < __num_threads; ++__i)
__output = __r(__output, __thread_results[__i]);
delete [] __thread_results;
// Points to last element processed (needed as return value for
// some algorithms like transform).
__f._M_finish_iterator = __begin + __length;
return __o;
}
} // end namespace
#endif /* _GLIBCXX_PARALLEL_OMP_LOOP_H */
|
ark_brusselator1D_omp.c | /*---------------------------------------------------------------
* Programmer(s): Daniel R. Reynolds @ SMU
*---------------------------------------------------------------
* SUNDIALS Copyright Start
* Copyright (c) 2002-2020, Lawrence Livermore National Security
* and Southern Methodist University.
* All rights reserved.
*
* See the top-level LICENSE and NOTICE files for details.
*
* SPDX-License-Identifier: BSD-3-Clause
* SUNDIALS Copyright End
*---------------------------------------------------------------
* Example problem:
*
* The following test simulates a brusselator problem from chemical
* kinetics. This is n PDE system with 3 components, Y = [u,v,w],
* satisfying the equations,
* u_t = du*u_xx + a - (w+1)*u + v*u^2
* v_t = dv*v_xx + w*u - v*u^2
* w_t = dw*w_xx + (b-w)/ep - w*u
* for t in [0, 80], x in [0, 1], with initial conditions
* u(0,x) = a + 0.1*sin(pi*x)
* v(0,x) = b/a + 0.1*sin(pi*x)
* w(0,x) = b + 0.1*sin(pi*x),
* and with stationary boundary conditions, i.e.
* u_t(t,0) = u_t(t,1) = 0,
* v_t(t,0) = v_t(t,1) = 0,
* w_t(t,0) = w_t(t,1) = 0.
* Note: these can also be implemented as Dirichlet boundary
* conditions with values identical to the initial conditions.
*
* The spatial derivatives are computed using second-order
* centered differences, with the data distributed over N points
* on a uniform spatial grid.
*
* This program solves the problem with the DIRK method, using a
* Newton iteration with the band linear solver, and a
* user-supplied Jacobian routine. This example uses the OpenMP
* vector kernel, and employs OpenMP threading within the
* right-hand side and Jacobian construction functions.
*
* 100 outputs are printed at equal intervals, and run statistics
* are printed at the end.
*---------------------------------------------------------------*/
/* Header files */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <arkode/arkode_arkstep.h> /* prototypes for ARKStep fcts., consts */
#include <nvector/nvector_openmp.h> /* access to OpenMP N_Vector */
#include <sunmatrix/sunmatrix_band.h> /* access to band SUNMatrix */
#include <sunlinsol/sunlinsol_band.h> /* access to band SUNLinearSolver */
#include <sundials/sundials_types.h> /* def. of type 'realtype' */
#ifdef _OPENMP
#include <omp.h> /* OpenMP functions */
#endif
#if defined(SUNDIALS_EXTENDED_PRECISION)
#define GSYM "Lg"
#define ESYM "Le"
#define FSYM "Lf"
#else
#define GSYM "g"
#define ESYM "e"
#define FSYM "f"
#endif
/* accessor macros between (x,v) location and 1D NVector array */
#define IDX(x,v) (3*(x)+v)
/* user data structure */
typedef struct {
sunindextype N; /* number of intervals */
int nthreads; /* number of OpenMP threads */
realtype dx; /* mesh spacing */
realtype a; /* constant forcing on u */
realtype b; /* steady-state value of w */
realtype du; /* diffusion coeff for u */
realtype dv; /* diffusion coeff for v */
realtype dw; /* diffusion coeff for w */
realtype ep; /* stiffness parameter */
} *UserData;
/* User-supplied Functions Called by the Solver */
static int f(realtype t, N_Vector y, N_Vector ydot, void *user_data);
static int Jac(realtype t, N_Vector y, N_Vector fy,
SUNMatrix J, void *user_data,
N_Vector tmp1, N_Vector tmp2, N_Vector tmp3);
/* Private helper functions */
static int LaplaceMatrix(realtype c, SUNMatrix Jac, UserData udata);
static int ReactionJac(realtype c, N_Vector y, SUNMatrix Jac, UserData udata);
/* Private function to check function return values */
static int check_flag(void *flagvalue, const char *funcname, int opt);
/* Main Program */
int main(int argc, char *argv[])
{
/* general problem parameters */
realtype T0 = RCONST(0.0); /* initial time */
realtype Tf = RCONST(10.0); /* final time */
int Nt = 100; /* total number of output times */
int Nvar = 3; /* number of solution fields */
UserData udata = NULL;
realtype *data;
sunindextype N = 201; /* spatial mesh size */
realtype a = 0.6; /* problem parameters */
realtype b = 2.0;
realtype du = 0.025;
realtype dv = 0.025;
realtype dw = 0.025;
realtype ep = 1.0e-5; /* stiffness parameter */
realtype reltol = 1.0e-6; /* tolerances */
realtype abstol = 1.0e-10;
sunindextype NEQ, i;
/* general problem variables */
int flag; /* reusable error-checking flag */
N_Vector y = NULL; /* empty vector for storing solution */
N_Vector umask = NULL; /* empty mask vectors for viewing solution components */
N_Vector vmask = NULL;
N_Vector wmask = NULL;
SUNMatrix A = NULL; /* empty matrix for linear solver */
SUNLinearSolver LS = NULL; /* empty linear solver structure */
void *arkode_mem = NULL; /* empty ARKode memory structure */
realtype pi, t, dTout, tout, u, v, w;
FILE *FID, *UFID, *VFID, *WFID;
int iout, num_threads;
long int nst, nst_a, nfe, nfi, nsetups, nje, nfeLS, nni, ncfn, netf;
/* allocate udata structure */
udata = (UserData) malloc(sizeof(*udata));
if (check_flag((void *) udata, "malloc", 2)) return 1;
/* set the number of threads to use */
num_threads = 1; /* default value */
#ifdef _OPENMP
num_threads = omp_get_max_threads(); /* overwrite with OMP_NUM_THREADS environment variable */
#endif
if (argc > 1) /* overwrite with command line value, if supplied */
num_threads = (int) strtol(argv[1], NULL, 0);
/* store the inputs in the UserData structure */
udata->N = N;
udata->a = a;
udata->b = b;
udata->du = du;
udata->dv = dv;
udata->dw = dw;
udata->ep = ep;
udata->nthreads = num_threads;
/* set total allocated vector length */
NEQ = Nvar*udata->N;
/* Initial problem output */
printf("\n1D Brusselator PDE test problem:\n");
printf(" N = %li, NEQ = %li\n", (long int) udata->N, (long int) NEQ);
printf(" num_threads = %i\n", num_threads);
printf(" problem parameters: a = %"GSYM", b = %"GSYM", ep = %"GSYM"\n",
udata->a, udata->b, udata->ep);
printf(" diffusion coefficients: du = %"GSYM", dv = %"GSYM", dw = %"GSYM"\n",
udata->du, udata->dv, udata->dw);
printf(" reltol = %.1"ESYM", abstol = %.1"ESYM"\n\n", reltol, abstol);
/* Initialize vector data structures */
y = N_VNew_OpenMP(NEQ, num_threads); /* Create vector for solution */
if (check_flag((void *)y, "N_VNew_OpenMP", 0)) return 1;
udata->dx = RCONST(1.0)/(N-1); /* set spatial mesh spacing */
data = N_VGetArrayPointer(y); /* Access data array for new NVector y */
if (check_flag((void *)data, "N_VGetArrayPointer", 0)) return 1;
umask = N_VNew_OpenMP(NEQ, num_threads); /* Create vector masks */
if (check_flag((void *)umask, "N_VNew_OpenMP", 0)) return 1;
vmask = N_VNew_OpenMP(NEQ, num_threads);
if (check_flag((void *)vmask, "N_VNew_OpenMP", 0)) return 1;
wmask = N_VNew_OpenMP(NEQ, num_threads);
if (check_flag((void *)wmask, "N_VNew_OpenMP", 0)) return 1;
/* Set initial conditions into y */
pi = RCONST(4.0)*atan(RCONST(1.0));
for (i=0; i<N; i++) {
data[IDX(i,0)] = a + RCONST(0.1)*sin(pi*i*udata->dx); /* u */
data[IDX(i,1)] = b/a + RCONST(0.1)*sin(pi*i*udata->dx); /* v */
data[IDX(i,2)] = b + RCONST(0.1)*sin(pi*i*udata->dx); /* w */
}
/* Set mask array values for each solution component */
N_VConst(0.0, umask);
data = N_VGetArrayPointer(umask);
if (check_flag((void *) data, "N_VGetArrayPointer", 0)) return 1;
for (i=0; i<N; i++) data[IDX(i,0)] = RCONST(1.0);
N_VConst(0.0, vmask);
data = N_VGetArrayPointer(vmask);
if (check_flag((void *) data, "N_VGetArrayPointer", 0)) return 1;
for (i=0; i<N; i++) data[IDX(i,1)] = RCONST(1.0);
N_VConst(0.0, wmask);
data = N_VGetArrayPointer(wmask);
if (check_flag((void *) data, "N_VGetArrayPointer", 0)) return 1;
for (i=0; i<N; i++) data[IDX(i,2)] = RCONST(1.0);
/* Initialize matrix and linear solver data structures */
A = SUNBandMatrix(NEQ, 4, 4);
if (check_flag((void *)A, "SUNBandMatrix", 0)) return 1;
LS = SUNLinSol_Band(y, A);
if (check_flag((void *)LS, "SUNLinSol_Band", 0)) return 1;
/* Call ARKStepCreate to initialize the ARK timestepper module and
specify the right-hand side function in y'=f(t,y), the inital time
T0, and the initial dependent variable vector y. Note: since this
problem is fully implicit, we set f_E to NULL and f_I to f. */
arkode_mem = ARKStepCreate(NULL, f, T0, y);
if (check_flag((void *)arkode_mem, "ARKStepCreate", 0)) return 1;
/* Set routines */
flag = ARKStepSetUserData(arkode_mem, (void *) udata); /* Pass udata to user functions */
if (check_flag(&flag, "ARKStepSetUserData", 1)) return 1;
flag = ARKStepSStolerances(arkode_mem, reltol, abstol); /* Specify tolerances */
if (check_flag(&flag, "ARKStepSStolerances", 1)) return 1;
/* Linear solver specification */
flag = ARKStepSetLinearSolver(arkode_mem, LS, A); /* Attach matrix and linear solver */
if (check_flag(&flag, "ARKStepSetLinearSolver", 1)) return 1;
flag = ARKStepSetJacFn(arkode_mem, Jac); /* Set the Jacobian routine */
if (check_flag(&flag, "ARKStepSetJacFn", 1)) return 1;
/* output spatial mesh to disk */
FID=fopen("bruss_mesh.txt","w");
for (i=0; i<N; i++) fprintf(FID," %.16"ESYM"\n", udata->dx*i);
fclose(FID);
/* Open output stream for results, access data arrays */
UFID=fopen("bruss_u.txt","w");
VFID=fopen("bruss_v.txt","w");
WFID=fopen("bruss_w.txt","w");
/* output initial condition to disk */
data = N_VGetArrayPointer(y);
if (check_flag((void *)data, "N_VGetArrayPointer", 0)) return 1;
for (i=0; i<N; i++) fprintf(UFID," %.16"ESYM, data[IDX(i,0)]);
for (i=0; i<N; i++) fprintf(VFID," %.16"ESYM, data[IDX(i,1)]);
for (i=0; i<N; i++) fprintf(WFID," %.16"ESYM, data[IDX(i,2)]);
fprintf(UFID,"\n");
fprintf(VFID,"\n");
fprintf(WFID,"\n");
/* Main time-stepping loop: calls ARKStepEvolve to perform the integration, then
prints results. Stops when the final time has been reached */
t = T0;
dTout = (Tf-T0)/Nt;
tout = T0+dTout;
printf(" t ||u||_rms ||v||_rms ||w||_rms\n");
printf(" ----------------------------------------------\n");
for (iout=0; iout<Nt; iout++) {
flag = ARKStepEvolve(arkode_mem, tout, y, &t, ARK_NORMAL); /* call integrator */
if (check_flag(&flag, "ARKStepEvolve", 1)) break;
u = N_VWL2Norm(y,umask); /* access/print solution statistics */
u = sqrt(u*u/N);
v = N_VWL2Norm(y,vmask);
v = sqrt(v*v/N);
w = N_VWL2Norm(y,wmask);
w = sqrt(w*w/N);
printf(" %10.6"FSYM" %10.6"FSYM" %10.6"FSYM" %10.6"FSYM"\n", t, u, v, w);
if (flag >= 0) { /* successful solve: update output time */
tout += dTout;
tout = (tout > Tf) ? Tf : tout;
} else { /* unsuccessful solve: break */
fprintf(stderr,"Solver failure, stopping integration\n");
break;
}
/* output results to disk */
for (i=0; i<N; i++) fprintf(UFID," %.16"ESYM, data[IDX(i,0)]);
for (i=0; i<N; i++) fprintf(VFID," %.16"ESYM, data[IDX(i,1)]);
for (i=0; i<N; i++) fprintf(WFID," %.16"ESYM, data[IDX(i,2)]);
fprintf(UFID,"\n");
fprintf(VFID,"\n");
fprintf(WFID,"\n");
}
printf(" ----------------------------------------------\n");
fclose(UFID);
fclose(VFID);
fclose(WFID);
/* Print some final statistics */
flag = ARKStepGetNumSteps(arkode_mem, &nst);
check_flag(&flag, "ARKStepGetNumSteps", 1);
flag = ARKStepGetNumStepAttempts(arkode_mem, &nst_a);
check_flag(&flag, "ARKStepGetNumStepAttempts", 1);
flag = ARKStepGetNumRhsEvals(arkode_mem, &nfe, &nfi);
check_flag(&flag, "ARKStepGetNumRhsEvals", 1);
flag = ARKStepGetNumLinSolvSetups(arkode_mem, &nsetups);
check_flag(&flag, "ARKStepGetNumLinSolvSetups", 1);
flag = ARKStepGetNumErrTestFails(arkode_mem, &netf);
check_flag(&flag, "ARKStepGetNumErrTestFails", 1);
flag = ARKStepGetNumNonlinSolvIters(arkode_mem, &nni);
check_flag(&flag, "ARKStepGetNumNonlinSolvIters", 1);
flag = ARKStepGetNumNonlinSolvConvFails(arkode_mem, &ncfn);
check_flag(&flag, "ARKStepGetNumNonlinSolvConvFails", 1);
flag = ARKStepGetNumJacEvals(arkode_mem, &nje);
check_flag(&flag, "ARKStepGetNumJacEvals", 1);
flag = ARKStepGetNumLinRhsEvals(arkode_mem, &nfeLS);
check_flag(&flag, "ARKStepGetNumLinRhsEvals", 1);
printf("\nFinal Solver Statistics:\n");
printf(" Internal solver steps = %li (attempted = %li)\n", nst, nst_a);
printf(" Total RHS evals: Fe = %li, Fi = %li\n", nfe, nfi);
printf(" Total linear solver setups = %li\n", nsetups);
printf(" Total RHS evals for setting up the linear system = %li\n", nfeLS);
printf(" Total number of Jacobian evaluations = %li\n", nje);
printf(" Total number of Newton iterations = %li\n", nni);
printf(" Total number of nonlinear solver convergence failures = %li\n", ncfn);
printf(" Total number of error test failures = %li\n\n", netf);
/* Clean up and return with successful completion */
free(udata); /* Free user data */
ARKStepFree(&arkode_mem); /* Free integrator memory */
SUNLinSolFree(LS); /* Free linear solver */
SUNMatDestroy(A); /* Free matrix */
N_VDestroy(y); /* Free vectors */
N_VDestroy(umask);
N_VDestroy(vmask);
N_VDestroy(wmask);
return 0;
}
/*-------------------------------
* Functions called by the solver
*-------------------------------*/
/* f routine to compute the ODE RHS function f(t,y). */
static int f(realtype t, N_Vector y, N_Vector ydot, void *user_data)
{
UserData udata = (UserData) user_data; /* access problem data */
sunindextype N = udata->N; /* set variable shortcuts */
realtype a = udata->a;
realtype b = udata->b;
realtype ep = udata->ep;
realtype du = udata->du;
realtype dv = udata->dv;
realtype dw = udata->dw;
realtype dx = udata->dx;
realtype *Ydata=NULL, *dYdata=NULL;
realtype uconst, vconst, wconst, u, ul, ur, v, vl, vr, w, wl, wr;
sunindextype i = 0;
/* clear out ydot (to be careful) */
N_VConst(0.0, ydot);
Ydata = N_VGetArrayPointer(y); /* access data arrays */
if (check_flag((void *)Ydata, "N_VGetArrayPointer", 0)) return 1;
dYdata = N_VGetArrayPointer(ydot);
if (check_flag((void *)dYdata, "N_VGetArrayPointer", 0)) return 1;
N_VConst(0.0, ydot); /* initialize ydot to zero */
/* iterate over domain, computing all equations */
uconst = du/dx/dx;
vconst = dv/dx/dx;
wconst = dw/dx/dx;
#pragma omp parallel for default(shared) private(i,u,ul,ur,v,vl,vr,w,wl,wr) schedule(static) num_threads(udata->nthreads)
for (i=1; i<N-1; i++) {
/* set shortcuts */
u = Ydata[IDX(i,0)]; ul = Ydata[IDX(i-1,0)]; ur = Ydata[IDX(i+1,0)];
v = Ydata[IDX(i,1)]; vl = Ydata[IDX(i-1,1)]; vr = Ydata[IDX(i+1,1)];
w = Ydata[IDX(i,2)]; wl = Ydata[IDX(i-1,2)]; wr = Ydata[IDX(i+1,2)];
/* u_t = du*u_xx + a - (w+1)*u + v*u^2 */
dYdata[IDX(i,0)] = (ul - RCONST(2.0)*u + ur)*uconst + a - (w+RCONST(1.0))*u + v*u*u;
/* v_t = dv*v_xx + w*u - v*u^2 */
dYdata[IDX(i,1)] = (vl - RCONST(2.0)*v + vr)*vconst + w*u - v*u*u;
/* w_t = dw*w_xx + (b-w)/ep - w*u */
dYdata[IDX(i,2)] = (wl - RCONST(2.0)*w + wr)*wconst + (b-w)/ep - w*u;
}
/* enforce stationary boundaries */
dYdata[IDX(0,0)] = dYdata[IDX(0,1)] = dYdata[IDX(0,2)] = 0.0;
dYdata[IDX(N-1,0)] = dYdata[IDX(N-1,1)] = dYdata[IDX(N-1,2)] = 0.0;
return 0;
}
/* Jacobian routine to compute J(t,y) = df/dy. */
static int Jac(realtype t, N_Vector y, N_Vector fy,
SUNMatrix J, void *user_data,
N_Vector tmp1, N_Vector tmp2, N_Vector tmp3)
{
UserData udata = (UserData) user_data; /* access problem data */
SUNMatZero(J); /* Initialize Jacobian to zero */
/* Fill in the Laplace matrix */
if (LaplaceMatrix(RCONST(1.0), J, udata)) {
printf("Jacobian calculation error in calling LaplaceMatrix!\n");
return 1;
}
/* Add in the Jacobian of the reaction terms matrix */
if (ReactionJac(RCONST(1.0), y, J, udata)) {
printf("Jacobian calculation error in calling ReactionJac!\n");
return 1;
}
return 0;
}
/*-------------------------------
* Private helper functions
*-------------------------------*/
/* Routine to compute the stiffness matrix from (L*y), scaled by the factor c.
We add the result into Jac and do not erase what was already there */
static int LaplaceMatrix(realtype c, SUNMatrix Jac, UserData udata)
{
sunindextype N = udata->N; /* set shortcuts */
realtype dx = udata->dx;
sunindextype i = 0;
realtype uconst = c*udata->du/dx/dx;
realtype vconst = c*udata->dv/dx/dx;
realtype wconst = c*udata->dw/dx/dx;
/* iterate over intervals, filling in Jacobian entries */
#pragma omp parallel for default(shared) private(i) schedule(static) num_threads(udata->nthreads)
for (i=1; i<N-1; i++) {
/* Jacobian of (L*y) at this node */
SM_ELEMENT_B(Jac,IDX(i,0),IDX(i-1,0)) += uconst;
SM_ELEMENT_B(Jac,IDX(i,1),IDX(i-1,1)) += vconst;
SM_ELEMENT_B(Jac,IDX(i,2),IDX(i-1,2)) += wconst;
SM_ELEMENT_B(Jac,IDX(i,0),IDX(i,0)) -= RCONST(2.0)*uconst;
SM_ELEMENT_B(Jac,IDX(i,1),IDX(i,1)) -= RCONST(2.0)*vconst;
SM_ELEMENT_B(Jac,IDX(i,2),IDX(i,2)) -= RCONST(2.0)*wconst;
SM_ELEMENT_B(Jac,IDX(i,0),IDX(i+1,0)) += uconst;
SM_ELEMENT_B(Jac,IDX(i,1),IDX(i+1,1)) += vconst;
SM_ELEMENT_B(Jac,IDX(i,2),IDX(i+1,2)) += wconst;
}
return 0;
}
/* Routine to compute the Jacobian matrix from R(y), scaled by the factor c.
We add the result into Jac and do not erase what was already there */
static int ReactionJac(realtype c, N_Vector y, SUNMatrix Jac, UserData udata)
{
sunindextype N = udata->N; /* set shortcuts */
realtype ep = udata->ep;
sunindextype i = 0;
realtype u, v, w;
realtype *Ydata = N_VGetArrayPointer(y); /* access solution array */
if (check_flag((void *)Ydata, "N_VGetArrayPointer", 0)) return 1;
/* iterate over nodes, filling in Jacobian entries */
#pragma omp parallel for default(shared) private(i,u,v,w) schedule(static) num_threads(udata->nthreads)
for (i=1; i<N-1; i++) {
/* set nodal value shortcuts (shifted index due to start at first interior node) */
u = Ydata[IDX(i,0)];
v = Ydata[IDX(i,1)];
w = Ydata[IDX(i,2)];
/* all vars wrt u */
SM_ELEMENT_B(Jac,IDX(i,0),IDX(i,0)) += c*(RCONST(2.0)*u*v-(w+RCONST(1.0)));
SM_ELEMENT_B(Jac,IDX(i,1),IDX(i,0)) += c*(w - RCONST(2.0)*u*v);
SM_ELEMENT_B(Jac,IDX(i,2),IDX(i,0)) += c*(-w);
/* all vars wrt v */
SM_ELEMENT_B(Jac,IDX(i,0),IDX(i,1)) += c*(u*u);
SM_ELEMENT_B(Jac,IDX(i,1),IDX(i,1)) += c*(-u*u);
/* all vars wrt w */
SM_ELEMENT_B(Jac,IDX(i,0),IDX(i,2)) += c*(-u);
SM_ELEMENT_B(Jac,IDX(i,1),IDX(i,2)) += c*(u);
SM_ELEMENT_B(Jac,IDX(i,2),IDX(i,2)) += c*(-RCONST(1.0)/ep - u);
}
return 0;
}
/* Check function return value...
opt == 0 means SUNDIALS function allocates memory so check if
returned NULL pointer
opt == 1 means SUNDIALS function returns a flag so check if
flag >= 0
opt == 2 means function allocates memory so check if returned
NULL pointer
*/
static int check_flag(void *flagvalue, const char *funcname, int opt)
{
int *errflag;
/* Check if SUNDIALS function returned NULL pointer - no memory allocated */
if (opt == 0 && flagvalue == NULL) {
fprintf(stderr, "\nSUNDIALS_ERROR: %s() failed - returned NULL pointer\n\n",
funcname);
return 1; }
/* Check if flag < 0 */
else if (opt == 1) {
errflag = (int *) flagvalue;
if (*errflag < 0) {
fprintf(stderr, "\nSUNDIALS_ERROR: %s() failed with flag = %d\n\n",
funcname, *errflag);
return 1; }}
/* Check if function returned NULL pointer - no memory allocated */
else if (opt == 2 && flagvalue == NULL) {
fprintf(stderr, "\nMEMORY_ERROR: %s() failed - returned NULL pointer\n\n",
funcname);
return 1; }
return 0;
}
/*---- end of file ----*/
|
calculate_discontinuous_distance_to_skin_process.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Pooyan Dadvand
// Ruben Zorrilla
//
#if !defined(KRATOS_CALCULATE_DISCONTINUOUS_DISTANCE_TO_SKIN_PROCESS_H_INCLUDED )
#define KRATOS_CALCULATE_DISCONTINUOUS_DISTANCE_TO_SKIN_PROCESS_H_INCLUDED
// System includes
#include <string>
#include <iostream>
// External includes
// Project includes
#include "geometries/plane_3d.h"
#include "includes/checks.h"
#include "processes/process.h"
#include "processes/find_intersected_geometrical_objects_process.h"
namespace Kratos
{
///@addtogroup Kratos Core
///@{
///@name Kratos Classes
///@{
/// This only calculates the distance. Calculating the inside outside should be done by a derived class of this.
/** This process takes a volume model part (with tetrahedra mesh) and a skin model part (with triangle mesh) and
and calcualtes the distance to the skin for all the elements and nodes of the volume model part.
*/
template<std::size_t TDim = 3>
class KRATOS_API(KRATOS_CORE) CalculateDiscontinuousDistanceToSkinProcess : public Process
{
public:
///@name Type Definitions
///@{
/// Pointer definition of CalculateDiscontinuousDistanceToSkinProcess
KRATOS_CLASS_POINTER_DEFINITION(CalculateDiscontinuousDistanceToSkinProcess);
///@}
///@name Life Cycle
///@{
/// Constructor to be used.
CalculateDiscontinuousDistanceToSkinProcess(
ModelPart& rVolumePart,
ModelPart& rSkinPart);
/// Destructor.
~CalculateDiscontinuousDistanceToSkinProcess() override;
///@}
///@name Deleted
///@{
/// Default constructor.
CalculateDiscontinuousDistanceToSkinProcess() = delete;
/// Copy constructor.
CalculateDiscontinuousDistanceToSkinProcess(Process const& rOther) = delete;
/// Assignment operator.
CalculateDiscontinuousDistanceToSkinProcess& operator=(CalculateDiscontinuousDistanceToSkinProcess const& rOther) = delete;
/// Copy constructor.
CalculateDiscontinuousDistanceToSkinProcess(CalculateDiscontinuousDistanceToSkinProcess const& rOther);
FindIntersectedGeometricalObjectsProcess mFindIntersectedObjectsProcess;
///@}
///@name Operations
///@{
/**
* @brief Initializes discontinuous distance computation process
* This method initializes the TO_SPLIT flag, the DISTANCE and
* ELEMENTAL_DISTANCES variables as well as the EMBEDDED_VELOCITY
*/
virtual void Initialize();
/**
* @brief Calls the FindIntersectedObjectsProcess to find the intersections
* This method calls the FindIntersectedObjectsProcess FindIntersections method.
*/
virtual void FindIntersections();
/**
* @brief Get the array containing the intersecting objects
* This method returns an array containing pointers to the intersecting geometries
* @return std::vector<PointerVector<GeometricalObject>>&
*/
virtual std::vector<PointerVector<GeometricalObject>>& GetIntersections();
/**
* @brief Computes the elemental distance values
* Given an intersecting objects vector, this method computes the elemental distance field
* @param rIntersectedObjects array containing pointers to the intersecting geometries
*/
virtual void CalculateDistances(std::vector<PointerVector<GeometricalObject>>& rIntersectedObjects);
/**
* @brief Calls the FindIntersectedObjects Clear() method
* This method calls the FindIntersectedObjects Clear() to empty the intersecting objects geometries array
*/
virtual void Clear();
/**
* @brief Executes the CalculateDiscontinuousDistanceToSkinProcess
* This method automatically does all the calls required to compute the discontinuous distance function.
*/
void Execute() override;
/**
* @brief Calculate embedded variable from skin double specialization
* This method calls the specialization method for two double variables
* @param rVariable origin double variable in the skin mesh
* @param rEmbeddedVariable elemental double variable in the volume mesh to be computed
*/
void CalculateEmbeddedVariableFromSkin(
const Variable<double> &rVariable,
const Variable<double> &rEmbeddedVariable);
/**
* @brief Calculate embedded variable from skin array specialization
* This method calls the specialization method for two double variables
* @param rVariable origin array variable in the skin mesh
* @param rEmbeddedVariable elemental array variable in the volume mesh to be computed
*/
void CalculateEmbeddedVariableFromSkin(
const Variable<array_1d<double,3>> &rVariable,
const Variable<array_1d<double,3>> &rEmbeddedVariable);
///@}
///@name Access
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
std::string Info() const override;
/// Print information about this object.
void PrintInfo(std::ostream& rOStream) const override;
/// Print object's data.
void PrintData(std::ostream& rOStream) const override;
///@}
protected:
///@name Protected Operations
///@{
/**
* @brief Set the Intersection Plane object
* This method returns the plane that defines the element intersection. The 2D
* case is considered to be a simplification of the 3D one, so a "fake" extra
* point is created by extruding the first point in the z-direction.
* @param rIntPtsVector array containing the intersecting points coordinates
* @return Plane3D the plane defined by the given intersecting points coordinates
*/
Plane3D SetIntersectionPlane(const std::vector<array_1d<double,3>> &rIntPtsVector);
///@}
private:
///@name Member Variables
///@{
ModelPart& mrSkinPart;
ModelPart& mrVolumePart;
///@}
///@name Private Operations
///@{
/**
* @brief Computes the discontinuous distance in one element
* This method computes the discontinuous distance field for a given element
* @param rElement1 reference to the element of interest
* @param rIntersectedObjects reference to the array containing the element of interest intersecting geometries
*/
void CalculateElementalDistances(
Element& rElement1,
PointerVector<GeometricalObject>& rIntersectedObjects);
/**
* @brief Computes the edges intersections in one element
* Provided a list of elemental intersecting geometries, this
* method computes the edge intersections for a given element
* @param rElement1 reference to the element of interest
* @param rIntersectedObjects reference to the array containing the element of interest intersecting geometries
* @param rCutEdgesVector array that classifies the edges depending on their cut / uncut status
* @param rIntersectionPointsArray array containing the edges intersection points
* @return unsigned int number of cut edges
*/
unsigned int ComputeEdgesIntersections(
Element& rElement1,
const PointerVector<GeometricalObject>& rIntersectedObjects,
std::vector<unsigned int> &rCutEdgesVector,
std::vector<array_1d <double,3> > &rIntersectionPointsArray);
/**
* @brief Computes the intersection of a single edge
* This method computes the intersection of a given edge with the candidate
* intersecting geometry. This operation is performed accordingly to the working
* space dimension using the intersection utilities implemented in intersection_utilities.h
* @param rIntObjGeometry candidate intersecting geometry
* @param rEdgePoint1 edge origin point
* @param rEdgePoint2 edge end point
* @param rIntersectionPoint intersection point
* @return int type of intersection id (see intersection_utilities.h)
*/
int ComputeEdgeIntersection(
const Element::GeometryType& rIntObjGeometry,
const Element::NodeType& rEdgePoint1,
const Element::NodeType& rEdgePoint2,
Point& rIntersectionPoint);
/**
* @brief Computes the element intersection unit normal
* This method computes the element intersection unit normal vector using the distance function gradient.
* @param rGeometry reference to the geometry of the element of interest
* @param rElementalDistances array containing the ELEMENTAL_DISTANCES values
* @param rNormal obtained unit normal vector
*/
void ComputeIntersectionNormal(
Element::GeometryType& rGeometry,
const Vector& rElementalDistances,
array_1d<double,3> &rNormal);
/**
* @brief Computes the intersection plane approximation
* For complex intersection patterns, this method takes a list containing
* all the intersecting points and computes the plane that minimizes the
* distance from all these points in a least squares sense. The approximated
* plane is defined in terms of an origin point and its normal vector.
* @param rElement1 reference to the element of interest
* @param rPointsCoord list containing the coordinates of al the intersecting points
* @param rPlaneBasePointCoords base point defining the approximated plane
* @param rPlaneNormal normal vector defining the approximated plane
*/
void ComputePlaneApproximation(
const Element& rElement1,
const std::vector< array_1d<double,3> >& rPointsCoord,
array_1d<double,3>& rPlaneBasePointCoords,
array_1d<double,3>& rPlaneNormal);
/**
* @brief Checks (and corrects if needed) the intersection normal orientation
* This method checks the orientation of the previously computed intersection normal.
* To do that, the normal vector to each one of the intersecting geometries is
* computed and its directo is compared against the current one. If the negative
* votes win, the current normal vector orientation is switched.
* @param rGeometry element of interest geometry
* @param rIntersectedObjects reference to the array containing the element of interest intersecting geometries
* @param rElementalDistances array containing the ELEMENTAL_DISTANCES values
*/
void CorrectDistanceOrientation(
Element::GeometryType& rGeometry,
const PointerVector<GeometricalObject>& rIntersectedObjects,
Vector& rElementalDistances);
/**
* @brief Computes the normal vector to an intersecting object geometry
* This method computes the normal vector to an intersecting object geometry.
* @param rGeometry reference to the geometry of the intersecting object
* @param rIntObjNormal reference to the intersecting object normal vector
*/
void inline ComputeIntersectionNormalFromGeometry(
const Element::GeometryType &rGeometry,
array_1d<double,3> &rIntObjNormal);
/**
* @brief Computes the value of any embedded variable
* For a given array variable in the skin mesh, this method calculates the value
* of such variable in the embedded mesh. This is done in each element of the volume
* mesh by computing the average value of all the edges intersections. This value
* is averaged again according to the number of intersected edges.
* @tparam TVarType variable type
* @param rVariable origin variable in the skin mesh
* @param rEmbeddedVariable elemental variable in the volume mesh to be computed
*/
template<class TVarType>
void CalculateEmbeddedVariableFromSkinSpecialization(
const Variable<TVarType> &rVariable,
const Variable<TVarType> &rEmbeddedVariable)
{
const auto &r_int_obj_vect= this->GetIntersections();
const int n_elems = mrVolumePart.NumberOfElements();
// Check requested variables
KRATOS_ERROR_IF(rEmbeddedVariable.Key() == 0)
<< rEmbeddedVariable << " key is 0. Check that the variable is correctly registered." << std::endl;
KRATOS_ERROR_IF((mrSkinPart.NodesBegin())->SolutionStepsDataHas(rVariable) == false)
<< "Skin model part solution step data missing variable: " << rVariable << std::endl;
// Initialize embedded variable value
#pragma omp parallel for
for (int i_elem = 0; i_elem < n_elems; ++i_elem) {
auto it_elem = mrVolumePart.ElementsBegin() + i_elem;
it_elem->SetValue(rEmbeddedVariable, rEmbeddedVariable.Zero());
}
// Compute the embedded variable value for each element
#pragma omp parallel for schedule(dynamic)
for (int i_elem = 0; i_elem < n_elems; ++i_elem) {
// Check if the current element has intersecting entities
if (r_int_obj_vect[i_elem].size() != 0) {
// Initialize the element values
unsigned int n_int_edges = 0;
auto it_elem = mrVolumePart.ElementsBegin() + i_elem;
auto &r_geom = it_elem->GetGeometry();
const auto edges = r_geom.Edges();
// Loop the element of interest edges
for (unsigned int i_edge = 0; i_edge < r_geom.EdgesNumber(); ++i_edge) {
// Initialize edge values
unsigned int n_int_obj = 0;
TVarType i_edge_val = rEmbeddedVariable.Zero();
// Check the edge intersection against all the candidates
for (auto &r_int_obj : r_int_obj_vect[i_elem]) {
Point intersection_point;
const int is_intersected = this->ComputeEdgeIntersection(
r_int_obj.GetGeometry(),
edges[i_edge][0],
edges[i_edge][1],
intersection_point);
// Compute the variable value in the intersection point
if (is_intersected == 1) {
n_int_obj++;
array_1d<double,3> local_coords;
r_int_obj.GetGeometry().PointLocalCoordinates(local_coords, intersection_point);
Vector int_obj_N;
r_int_obj.GetGeometry().ShapeFunctionsValues(int_obj_N, local_coords);
for (unsigned int i_node = 0; i_node < r_int_obj.GetGeometry().PointsNumber(); ++i_node) {
i_edge_val += r_int_obj.GetGeometry()[i_node].FastGetSolutionStepValue(rVariable) * int_obj_N[i_node];
}
}
}
// Check if the edge is intersected
if (n_int_obj != 0) {
// Update the element intersected edges counter
n_int_edges++;
// Add the average edge value (there might exist cases in where
// more than one geometry intersects the edge of interest).
it_elem->GetValue(rEmbeddedVariable) += i_edge_val / n_int_obj;
}
}
// Average between all the intersected edges
if (n_int_edges != 0) {
it_elem->GetValue(rEmbeddedVariable) /= n_int_edges;
}
}
}
};
///@}
}; // Class CalculateDiscontinuousDistanceToSkinProcess
///@}
///@name Input and output
///@{
/// input stream function
inline std::istream& operator >> (
std::istream& rIStream,
CalculateDiscontinuousDistanceToSkinProcess<>& rThis);
/// output stream function
inline std::ostream& operator << (
std::ostream& rOStream,
const CalculateDiscontinuousDistanceToSkinProcess<>& rThis)
{
rThis.PrintInfo(rOStream);
rOStream << std::endl;
rThis.PrintData(rOStream);
return rOStream;
}
///@}
///@} addtogroup block
} // namespace Kratos.
#endif // KRATOS_CALCULATE_DISCONTINUOUS_DISTANCE_TO_SKIN_PROCESS_H_INCLUDED defined
|
8091.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4000. */
#include "atax.h"
/* Array initialization. */
static
void init_array (int nx, int ny,
DATA_TYPE POLYBENCH_2D(A,NX,NY,nx,ny),
DATA_TYPE POLYBENCH_1D(x,NY,ny))
{
int i, j;
for (i = 0; i < ny; i++)
x[i] = i * M_PI;
for (i = 0; i < nx; i++)
for (j = 0; j < ny; j++)
A[i][j] = ((DATA_TYPE) i*(j+1)) / nx;
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int nx,
DATA_TYPE POLYBENCH_1D(y,NX,nx))
{
int i;
for (i = 0; i < nx; i++) {
fprintf (stderr, DATA_PRINTF_MODIFIER, y[i]);
if (i % 20 == 0) fprintf (stderr, "\n");
}
fprintf (stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_atax(int nx, int ny,
DATA_TYPE POLYBENCH_2D(A,NX,NY,nx,ny),
DATA_TYPE POLYBENCH_1D(x,NY,ny),
DATA_TYPE POLYBENCH_1D(y,NY,ny),
DATA_TYPE POLYBENCH_1D(tmp,NX,nx))
{
int i, j;
#pragma scop
#pragma omp parallel
{
#pragma omp parallel for schedule(static, 16) num_threads(1)
for (i = 0; i < _PB_NY; i++)
y[i] = 0;
#pragma omp parallel for private (j) schedule(static, 16) num_threads(1)
for (i = 0; i < _PB_NX; i++)
{
tmp[i] = 0;
for (j = 0; j < _PB_NY; j++)
tmp[i] = tmp[i] + A[i][j] * x[j];
for (j = 0; j < _PB_NY; j++)
y[j] = y[j] + A[i][j] * tmp[i];
}
}
#pragma endscop
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int nx = NX;
int ny = NY;
/* Variable declaration/allocation. */
POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NX, NY, nx, ny);
POLYBENCH_1D_ARRAY_DECL(x, DATA_TYPE, NY, ny);
POLYBENCH_1D_ARRAY_DECL(y, DATA_TYPE, NY, ny);
POLYBENCH_1D_ARRAY_DECL(tmp, DATA_TYPE, NX, nx);
/* Initialize array(s). */
init_array (nx, ny, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(x));
/* Start timer. */
polybench_start_instruments;
/* Run kernel. */
kernel_atax (nx, ny,
POLYBENCH_ARRAY(A),
POLYBENCH_ARRAY(x),
POLYBENCH_ARRAY(y),
POLYBENCH_ARRAY(tmp));
/* Stop and print timer. */
polybench_stop_instruments;
polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(nx, POLYBENCH_ARRAY(y)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(A);
POLYBENCH_FREE_ARRAY(x);
POLYBENCH_FREE_ARRAY(y);
POLYBENCH_FREE_ARRAY(tmp);
return 0;
}
|
convolution_pack1to4.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void convolution_pack1to4_msa(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_pack1ton, const Mat& bias_data, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat& activation_params, const Option& opt)
{
int w = bottom_blob.w;
int channels = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int maxk = kernel_w * kernel_h;
// kernel offsets
std::vector<int> _space_ofs(maxk);
int* space_ofs = &_space_ofs[0];
{
int p1 = 0;
int p2 = 0;
int gap = w * dilation_h - kernel_w * dilation_w;
for (int i = 0; i < kernel_h; i++)
{
for (int j = 0; j < kernel_w; j++)
{
space_ofs[p1] = p2;
p1++;
p2 += dilation_w;
}
p2 += gap;
}
}
const float* bias_data_ptr = bias_data;
// num_output
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
float* outptr = top_blob.channel(p);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
v4f32 _sum = (v4f32)__msa_fill_w(0);
if (bias_data_ptr)
{
_sum = (v4f32)__msa_ld_w(bias_data_ptr + p * 4, 0);
}
const float* kptr = (const float*)weight_data_pack1ton + maxk * channels * p * 4;
// channels
for (int q = 0; q < channels; q++)
{
const Mat m = bottom_blob.channel(q);
const float* sptr = m.row(i * stride_h) + j * stride_w;
for (int k = 0; k < maxk; k++) // 29.23
{
v4f32 _val = __msa_fill_w_f32(sptr[space_ofs[k]]);
v4f32 _w = (v4f32)__msa_ld_w(kptr, 0);
_sum = __msa_fmadd_w(_sum, _val, _w);
kptr += 4;
}
}
_sum = activation_ps(_sum, activation_type, activation_params);
__msa_st_w((v4i32)_sum, outptr + j * 4, 0);
}
outptr += outw * 4;
}
}
}
|
soxr.c | /* SoX Resampler Library Copyright (c) 2007-13 robs@users.sourceforge.net
* Licence for this file: LGPL v2.1 See LICENCE for details. */
#include <math.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include "soxr.h"
#include "data-io.h"
#include "internal.h"
char const * soxr_version(void)
{
return "libsoxr-" SOXR_THIS_VERSION_STR;
}
typedef void sample_t; /* float or double */
typedef void (* fn_t)(void);
typedef fn_t control_block_t[10];
#define resampler_input (*(sample_t * (*)(void *, sample_t * samples, size_t n))p->control_block[0])
#define resampler_process (*(void (*)(void *, size_t))p->control_block[1])
#define resampler_output (*(sample_t const * (*)(void *, sample_t * samples, size_t * n))p->control_block[2])
#define resampler_flush (*(void (*)(void *))p->control_block[3])
#define resampler_close (*(void (*)(void *))p->control_block[4])
#define resampler_delay (*(double (*)(void *))p->control_block[5])
#define resampler_sizes (*(void (*)(size_t * shared, size_t * channel))p->control_block[6])
#define resampler_create (*(char const * (*)(void * channel, void * shared, double io_ratio, soxr_quality_spec_t * q_spec, soxr_runtime_spec_t * r_spec, double scale))p->control_block[7])
#define resampler_set_io_ratio (*(void (*)(void *, double io_ratio, size_t len))p->control_block[8])
#define resampler_id (*(char const * (*)(void))p->control_block[9])
typedef void * resampler_t; /* For one channel. */
typedef void * resampler_shared_t; /* Between channels. */
typedef void (* deinterleave_t)(sample_t * * dest,
soxr_datatype_t data_type, void const * * src0, size_t n, unsigned ch);
typedef size_t (* interleave_t)(soxr_datatype_t data_type, void * * dest,
sample_t const * const * src, size_t, unsigned, unsigned long *);
struct soxr {
unsigned num_channels;
double io_ratio;
soxr_error_t error;
soxr_quality_spec_t q_spec;
soxr_io_spec_t io_spec;
soxr_runtime_spec_t runtime_spec;
void * input_fn_state;
soxr_input_fn_t input_fn;
size_t max_ilen;
resampler_shared_t shared;
resampler_t * resamplers;
control_block_t control_block;
deinterleave_t deinterleave;
interleave_t interleave;
void * * channel_ptrs;
size_t clips;
unsigned long seed;
int flushing;
};
/* TODO: these should not be here. */
#define TO_3dB(a) ((1.6e-6*a-7.5e-4)*a+.646)
#define LOW_Q_BW0 (1385 / 2048.) /* 0.67625 rounded to be a FP exact. */
soxr_quality_spec_t soxr_quality_spec(unsigned long recipe, unsigned long flags)
{
soxr_quality_spec_t spec, * p = &spec;
unsigned quality = recipe & 0xf;
double rej;
memset(p, 0, sizeof(*p));
if (quality > 13) {
p->e = "invalid quality type";
return spec;
}
if (quality == 13)
quality = 6;
else if (quality > 10)
quality = 0;
p->phase_response = "\62\31\144"[(recipe & 0x30)>>8];
p->stopband_begin = 1;
p->precision = !quality? 0: quality < 3? 16 : quality < 8? 4 + quality * 4 : 55 - quality * 4;
rej = p->precision * linear_to_dB(2.);
p->flags = flags;
if (quality < 8) {
p->passband_end = quality == 1? LOW_Q_BW0 : 1 - .05 / TO_3dB(rej);
if (quality <= 2)
p->flags &= ~SOXR_ROLLOFF_NONE, p->flags |= SOXR_ROLLOFF_MEDIUM;
}
else {
static float const bw[] = {.931f, .832f, .663f};
p->passband_end = bw[quality - 8];
if (quality - 8 == 2)
p->flags &= ~SOXR_ROLLOFF_NONE, p->flags |= SOXR_ROLLOFF_MEDIUM;
}
if (recipe & SOXR_STEEP_FILTER)
p->passband_end = 1 - .01 / TO_3dB(rej);
return spec;
}
char const * soxr_engine(soxr_t p)
{
return resampler_id();
}
size_t * soxr_num_clips(soxr_t p)
{
return &p->clips;
}
soxr_error_t soxr_error(soxr_t p)
{
return p->error;
}
soxr_runtime_spec_t soxr_runtime_spec(unsigned num_threads)
{
soxr_runtime_spec_t spec, * p = &spec;
memset(p, 0, sizeof(*p));
p->log2_min_dft_size = 10;
p->log2_large_dft_size = 17;
p->coef_size_kbytes = 400;
p->num_threads = num_threads;
return spec;
}
soxr_io_spec_t soxr_io_spec(
soxr_datatype_t itype,
soxr_datatype_t otype)
{
soxr_io_spec_t spec, * p = &spec;
memset(p, 0, sizeof(*p));
if ((itype | otype) >= SOXR_SPLIT * 2)
p->e = "invalid io datatype(s)";
else {
p->itype = itype;
p->otype = otype;
p->scale = 1;
}
return spec;
}
#if HAVE_SIMD
static bool cpu_has_simd(void)
{
#if defined __x86_64__ || defined _M_X64
return true;
#elif defined __GNUC__ && defined i386
uint32_t eax, ebx, ecx, edx;
__asm__ __volatile__ (
"pushl %%ebx \n\t"
"cpuid \n\t"
"movl %%ebx, %1\n\t"
"popl %%ebx \n\t"
: "=a"(eax), "=r"(ebx), "=c"(ecx), "=d"(edx)
: "a"(1)
: "cc" );
return !!(edx & 0x06000000);
#elif defined _MSC_VER && defined _M_IX86
uint32_t d;
__asm {
xor eax, eax
inc eax
push ebx
cpuid
pop ebx
mov d, edx
}
return !!(d & 0x06000000);
#endif
return false;
}
#endif
extern control_block_t _soxr_rate32s_cb, _soxr_rate32_cb, _soxr_rate64_cb, _soxr_vr32_cb;
soxr_t soxr_create(
double input_rate, double output_rate,
unsigned num_channels,
soxr_error_t * error0,
soxr_io_spec_t const * io_spec,
soxr_quality_spec_t const * q_spec,
soxr_runtime_spec_t const * runtime_spec)
{
double io_ratio = output_rate? input_rate? input_rate / output_rate : -1 : input_rate? -1 : 0;
static const float datatype_full_scale[] = {1, 1, 65536.*32768, 32768};
soxr_t p = 0;
soxr_error_t error = 0;
if (q_spec && q_spec->e) error = q_spec->e;
else if (io_spec && (io_spec->itype | io_spec->otype) >= SOXR_SPLIT * 2)
error = "invalid io datatype(s)";
if (!error && !(p = calloc(sizeof(*p), 1))) error = "malloc failed";
if (p) {
p->q_spec = q_spec? *q_spec : soxr_quality_spec(SOXR_HQ, 0);
if (q_spec) { /* Backwards compatibility with original API: */
if (p->q_spec.passband_end > 2)
p->q_spec.passband_end /= 100;
if (p->q_spec.stopband_begin > 2)
p->q_spec.stopband_begin = 2 - p->q_spec.stopband_begin / 100;
}
p->io_ratio = io_ratio;
p->num_channels = num_channels;
if (io_spec)
p->io_spec = *io_spec;
else
p->io_spec.scale = 1;
p->runtime_spec = runtime_spec? *runtime_spec : soxr_runtime_spec(1);
p->io_spec.scale *= datatype_full_scale[p->io_spec.otype & 3] /
datatype_full_scale[p->io_spec.itype & 3];
p->seed = (unsigned long)time(0) ^ (unsigned long)(size_t)p;
#if HAVE_SINGLE_PRECISION
if (!HAVE_DOUBLE_PRECISION || (p->q_spec.precision <= 20 && !(p->q_spec.flags & SOXR_DOUBLE_PRECISION))
|| (p->q_spec.flags & SOXR_VR)) {
p->deinterleave = (deinterleave_t)_soxr_deinterleave_f;
p->interleave = (interleave_t)_soxr_interleave_f;
memcpy(&p->control_block,
(p->q_spec.flags & SOXR_VR)? &_soxr_vr32_cb :
#if HAVE_SIMD
cpu_has_simd()? &_soxr_rate32s_cb :
#endif
&_soxr_rate32_cb, sizeof(p->control_block));
}
#if HAVE_DOUBLE_PRECISION
else
#endif
#endif
#if HAVE_DOUBLE_PRECISION
{
p->deinterleave = (deinterleave_t)_soxr_deinterleave;
p->interleave = (interleave_t)_soxr_interleave;
memcpy(&p->control_block, &_soxr_rate64_cb, sizeof(p->control_block));
}
#endif
if (p->num_channels && io_ratio)
error = soxr_set_io_ratio(p, io_ratio, 0);
}
if (error)
soxr_delete(p), p = 0;
if (error0)
*error0 = error;
return p;
}
soxr_error_t soxr_set_input_fn(soxr_t p,
soxr_input_fn_t input_fn, void * input_fn_state, size_t max_ilen)
{
p->input_fn_state = input_fn_state;
p->input_fn = input_fn;
p->max_ilen = max_ilen? max_ilen : (size_t)-1;
return 0;
}
static void soxr_delete0(soxr_t p)
{
unsigned i;
if (p->resamplers) for (i = 0; i < p->num_channels; ++i) {
if (p->resamplers[i])
resampler_close(p->resamplers[i]);
free(p->resamplers[i]);
}
free(p->resamplers);
free(p->channel_ptrs);
free(p->shared);
memset(p, 0, sizeof(*p));
}
double soxr_delay(soxr_t p)
{
return (p && !p->error && p->resamplers)? resampler_delay(p->resamplers[0]) : 0;
}
static soxr_error_t fatal_error(soxr_t p, soxr_error_t error)
{
soxr_delete0(p);
return p->error = error;
}
static soxr_error_t initialise(soxr_t p)
{
unsigned i;
size_t shared_size, channel_size;
resampler_sizes(&shared_size, &channel_size);
p->channel_ptrs = calloc(sizeof(*p->channel_ptrs), p->num_channels);
p->shared = calloc(shared_size, 1);
p->resamplers = calloc(sizeof(*p->resamplers), p->num_channels);
if (!p->shared || !p->channel_ptrs || !p->resamplers)
return fatal_error(p, "malloc failed");
for (i = 0; i < p->num_channels; ++i) {
soxr_error_t error;
if (!(p->resamplers[i] = calloc(channel_size, 1)))
return fatal_error(p, "malloc failed");
error = resampler_create(
p->resamplers[i],
p->shared,
p->io_ratio,
&p->q_spec,
&p->runtime_spec,
p->io_spec.scale);
if (error)
return fatal_error(p, error);
}
return 0;
}
soxr_error_t soxr_set_num_channels(soxr_t p, unsigned num_channels)
{
if (!p) return "invalid soxr_t pointer";
if (num_channels == p->num_channels) return p->error;
if (!num_channels) return "invalid # of channels";
if (p->resamplers) return "# of channels can't be changed";
p->num_channels = num_channels;
return soxr_set_io_ratio(p, p->io_ratio, 0);
}
soxr_error_t soxr_set_io_ratio(soxr_t p, double io_ratio, size_t slew_len)
{
unsigned i;
soxr_error_t error;
if (!p) return "invalid soxr_t pointer";
if ((error = p->error)) return error;
if (!p->num_channels) return "must set # channels before O/I ratio";
if (io_ratio <= 0) return "I/O ratio out-of-range";
if (!p->channel_ptrs) {
p->io_ratio = io_ratio;
return initialise(p);
}
if (p->control_block[8]) {
for (i = 0; !error && i < p->num_channels; ++i)
resampler_set_io_ratio(p->resamplers[i], io_ratio, slew_len);
return error;
}
return fabs(p->io_ratio - io_ratio) < 1e-15? 0 :
"Varying O/I ratio is not supported with this quality level";
}
void soxr_delete(soxr_t p)
{
if (p)
soxr_delete0(p), free(p);
}
soxr_error_t soxr_clear(soxr_t p) /* TODO: this, properly. */
{
if (p) {
struct soxr tmp = *p;
soxr_delete0(p);
memset(p, 0, sizeof(*p));
p->input_fn = tmp.input_fn;
p->runtime_spec = tmp.runtime_spec;
p->q_spec = tmp.q_spec;
p->io_spec = tmp.io_spec;
p->num_channels = tmp.num_channels;
p->input_fn_state = tmp.input_fn_state;
memcpy(p->control_block, tmp.control_block, sizeof(p->control_block));
p->deinterleave = tmp.deinterleave;
p->interleave = tmp.interleave;
return 0;
}
return "invalid soxr_t pointer";
}
static void soxr_input_1ch(soxr_t p, unsigned i, soxr_cbuf_t src, size_t len)
{
sample_t * dest = resampler_input(p->resamplers[i], NULL, len);
(*p->deinterleave)(&dest, p->io_spec.itype, &src, len, 1);
}
static size_t soxr_input(soxr_t p, void const * in, size_t len)
{
bool separated = !!(p->io_spec.itype & SOXR_SPLIT);
unsigned i;
if (!p || p->error) return 0;
if (!in && len) {p->error = "null input buffer pointer"; return 0;}
if (!len) {
p->flushing = true;
return 0;
}
if (separated)
for (i = 0; i < p->num_channels; ++i)
soxr_input_1ch(p, i, ((soxr_cbufs_t)in)[i], len);
else {
for (i = 0; i < p->num_channels; ++i)
p->channel_ptrs[i] = resampler_input(p->resamplers[i], NULL, len);
(*p->deinterleave)(
(sample_t **)p->channel_ptrs, p->io_spec.itype, &in, len, p->num_channels);
}
return len;
}
static size_t soxr_output_1ch(soxr_t p, unsigned i, soxr_buf_t dest, size_t len, bool separated)
{
sample_t const * src;
if (p->flushing)
resampler_flush(p->resamplers[i]);
resampler_process(p->resamplers[i], len);
src = resampler_output(p->resamplers[i], NULL, &len);
if (separated)
p->clips += (p->interleave)(p->io_spec.otype, &dest, &src,
len, 1, (p->io_spec.flags & SOXR_NO_DITHER)? 0 : &p->seed);
else p->channel_ptrs[i] = (void /* const */ *)src;
return len;
}
static size_t soxr_output_no_callback(soxr_t p, soxr_buf_t out, size_t len)
{
unsigned u;
size_t done = 0;
bool separated = !!(p->io_spec.otype & SOXR_SPLIT);
#if defined _OPENMP
int i;
if (!p->runtime_spec.num_threads && p->num_channels > 1)
#pragma omp parallel for
for (i = 0; i < (int)p->num_channels; ++i) {
size_t done1;
done1 = soxr_output_1ch(p, (unsigned)i, ((soxr_bufs_t)out)[i], len, separated);
if (!i)
done = done1;
} else
#endif
for (u = 0; u < p->num_channels; ++u)
done = soxr_output_1ch(p, u, ((soxr_bufs_t)out)[u], len, separated);
if (!separated)
p->clips += (p->interleave)(p->io_spec.otype, &out, (sample_t const * const *)p->channel_ptrs,
done, p->num_channels, (p->io_spec.flags & SOXR_NO_DITHER)? 0 : &p->seed);
return done;
}
size_t soxr_output(soxr_t p, void * out, size_t len0)
{
size_t odone, odone0 = 0, olen = len0, osize, idone;
size_t ilen = min(p->max_ilen, (size_t)ceil((double)olen *p->io_ratio));
void const * in = out; /* Set to !=0, so that caller may leave unset. */
bool was_flushing;
if (!p || p->error) return 0;
if (!out && len0) {p->error = "null output buffer pointer"; return 0;}
do {
odone = soxr_output_no_callback(p, out, olen);
odone0 += odone;
if (odone0 == len0 || !p->input_fn || p->flushing)
break;
osize = soxr_datatype_size(p->io_spec.otype) * p->num_channels;
out = (char *)out + osize * odone;
olen -= odone;
idone = p->input_fn(p->input_fn_state, &in, ilen);
was_flushing = p->flushing;
if (!in)
p->error = "input function reported failure";
else soxr_input(p, in, idone);
} while (odone || idone || (!was_flushing && p->flushing));
return odone0;
}
static size_t soxr_i_for_o(soxr_t p, size_t olen, size_t ilen)
{
size_t result;
#if 0
if (p->runtime_spec.flags & SOXR_STRICT_BUFFERING)
result = rate_i_for_o(p->resamplers[0], olen);
else
#endif
result = (size_t)ceil((double)olen * p->io_ratio);
return min(result, ilen);
}
#if 0
static size_t soxr_o_for_i(soxr_t p, size_t ilen, size_t olen)
{
size_t result = (size_t)ceil((double)ilen / p->io_ratio);
return min(result, olen);
}
#endif
soxr_error_t soxr_process(soxr_t p,
void const * in , size_t ilen0, size_t * idone0,
void * out, size_t olen , size_t * odone0)
{
size_t ilen, idone, odone = 0;
unsigned u;
bool flush_requested = false;
if (!p) return "null pointer";
if (!in)
flush_requested = true, ilen = ilen0 = 0;
else {
if ((ptrdiff_t)ilen0 < 0)
flush_requested = true, ilen0 = ~ilen0;
if (idone0 && (1 || flush_requested))
ilen = soxr_i_for_o(p, olen, ilen0);
else
ilen = ilen0/*, olen = soxr_o_for_i(p, ilen, olen)*/;
}
p->flushing |= ilen == ilen0 && flush_requested;
if (!out && !in)
idone = ilen;
else if (p->io_spec.itype & p->io_spec.otype & SOXR_SPLIT) { /* Both i & o */
#if defined _OPENMP
int i;
if (!p->runtime_spec.num_threads && p->num_channels > 1)
#pragma omp parallel for
for (i = 0; i < (int)p->num_channels; ++i) {
size_t done;
if (in)
soxr_input_1ch(p, (unsigned)i, ((soxr_cbufs_t)in)[i], ilen);
done = soxr_output_1ch(p, (unsigned)i, ((soxr_bufs_t)out)[i], olen, true);
if (!i)
odone = done;
} else
#endif
for (u = 0; u < p->num_channels; ++u) {
if (in)
soxr_input_1ch(p, u, ((soxr_cbufs_t)in)[u], ilen);
odone = soxr_output_1ch(p, u, ((soxr_bufs_t)out)[u], olen, true);
}
idone = ilen;
}
else {
idone = ilen? soxr_input (p, in , ilen) : 0;
odone = soxr_output(p, out, olen);
}
if (idone0) *idone0 = idone;
if (odone0) *odone0 = odone;
return p->error;
}
soxr_error_t soxr_oneshot(
double irate, double orate,
unsigned num_channels,
void const * in , size_t ilen, size_t * idone,
void * out, size_t olen, size_t * odone,
soxr_io_spec_t const * io_spec,
soxr_quality_spec_t const * q_spec,
soxr_runtime_spec_t const * runtime_spec)
{
soxr_t resampler = NULL;
soxr_error_t error = q_spec? q_spec->e : 0;
if (!error) {
soxr_quality_spec_t q_spec1;
if (!q_spec)
q_spec1 = soxr_quality_spec(SOXR_LQ, 0), q_spec = &q_spec1;
resampler = soxr_create(irate, orate, num_channels,
&error, io_spec, q_spec, runtime_spec);
}
if (!error) {
error = soxr_process(resampler, in, ~ilen, idone, out, olen, odone);
soxr_delete(resampler);
}
return error;
}
soxr_error_t soxr_set_error(soxr_t p, soxr_error_t error)
{
if (!p) return "null pointer";
if (!p->error && p->error != error) return p->error;
p->error = error;
return 0;
}
|
GB_unop__ainv_fc64_fc64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__ainv_fc64_fc64
// op(A') function: GB_unop_tran__ainv_fc64_fc64
// C type: GxB_FC64_t
// A type: GxB_FC64_t
// cast: GxB_FC64_t cij = aij
// unaryop: cij = GB_FC64_ainv (aij)
#define GB_ATYPE \
GxB_FC64_t
#define GB_CTYPE \
GxB_FC64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_FC64_ainv (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC64_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC64_t z = aij ; \
Cx [pC] = GB_FC64_ainv (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_FC64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__ainv_fc64_fc64
(
GxB_FC64_t *Cx, // Cx and Ax may be aliased
const GxB_FC64_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC64_t aij = Ax [p] ;
GxB_FC64_t z = aij ;
Cx [p] = GB_FC64_ainv (z) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__ainv_fc64_fc64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
atomic-1.c | /* { dg-do run } */
/* { dg-options "-O2 -march=pentium" { target { { i?86-*-* x86_64-*-* } && ilp32 } } } */
#ifdef __i386__
#include "cpuid.h"
#endif
extern void abort (void);
double d;
struct
{
int i;
double e;
int j;
} x;
void
f1 (void)
{
#pragma omp atomic
d += 7.5;
#pragma omp atomic
d *= 2.5;
#pragma omp atomic
d /= 0.25;
}
void
f2 (void)
{
#pragma omp atomic
x.e += 7.5;
#pragma omp atomic
x.e *= 2.5;
#pragma omp atomic
x.e /= 0.25;
}
int
main (void)
{
#ifdef __i386__
unsigned int eax, ebx, ecx, edx;
if (!__get_cpuid (1, &eax, &ebx, &ecx, &edx))
return 0;
if (!(edx & bit_CMPXCHG8B))
return 0;
#endif
d = 1.0;
f1 ();
if (d != 85.0)
abort ();
x.e = 1.0;
f2 ();
if (x.i != 0 || x.e != 85.0 || x.j != 0)
abort ();
return 0;
}
|
shortcut_layer.c | #include "shortcut_layer.h"
#include "convolutional_layer.h"
#include "dark_cuda.h"
#include "blas.h"
#include "utils.h"
#include "gemm.h"
#include <stdio.h>
#include <assert.h>
layer make_shortcut_layer(int batch, int n, int *input_layers, int* input_sizes, int w, int h, int c,
float **layers_output, float **layers_delta, float **layers_output_gpu, float **layers_delta_gpu, WEIGHTS_TYPE_T weights_type, WEIGHTS_NORMALIZATION_T weights_normalizion,
ACTIVATION activation, int train)
{
fprintf(stderr, "Shortcut Layer: ");
int i;
for(i = 0; i < n; ++i) fprintf(stderr, "%d, ", input_layers[i]);
layer l = { (LAYER_TYPE)0 };
l.train = train;
l.type = SHORTCUT;
l.batch = batch;
l.activation = activation;
l.n = n;
l.input_layers = input_layers;
l.input_sizes = input_sizes;
l.layers_output = layers_output;
l.layers_delta = layers_delta;
l.weights_type = weights_type;
l.weights_normalizion = weights_normalizion;
l.learning_rate_scale = 1; // not necessary
//l.w = w2;
//l.h = h2;
//l.c = c2;
l.w = l.out_w = w;
l.h = l.out_h = h;
l.c = l.out_c = c;
l.outputs = w*h*c;
l.inputs = l.outputs;
//if(w != w2 || h != h2 || c != c2) fprintf(stderr, " w = %d, w2 = %d, h = %d, h2 = %d, c = %d, c2 = %d \n", w, w2, h, h2, c, c2);
l.index = l.input_layers[0];
if (train) l.delta = (float*)xcalloc(l.outputs * batch, sizeof(float));
l.output = (float*)xcalloc(l.outputs * batch, sizeof(float));
l.nweights = 0;
if (l.weights_type == PER_FEATURE) l.nweights = (l.n + 1);
else if (l.weights_type == PER_CHANNEL) l.nweights = (l.n + 1) * l.c;
if (l.nweights > 0) {
l.weights = (float*)calloc(l.nweights, sizeof(float));
float scale = sqrt(2. / l.nweights);
for (i = 0; i < l.nweights; ++i) l.weights[i] = 1 + 0.01*rand_uniform(-1, 1);// scale*rand_uniform(-1, 1); // rand_normal();
if (train) l.weight_updates = (float*)calloc(l.nweights, sizeof(float));
l.update = update_shortcut_layer;
}
l.forward = forward_shortcut_layer;
l.backward = backward_shortcut_layer;
#ifndef GPU
if (l.activation == SWISH || l.activation == MISH) l.activation_input = (float*)calloc(l.batch*l.outputs, sizeof(float));
#endif // GPU
#ifdef GPU
if (l.activation == SWISH || l.activation == MISH) l.activation_input_gpu = cuda_make_array(l.activation_input, l.batch*l.outputs);
l.forward_gpu = forward_shortcut_layer_gpu;
l.backward_gpu = backward_shortcut_layer_gpu;
if (l.nweights > 0) {
l.update_gpu = update_shortcut_layer_gpu;
l.weights_gpu = cuda_make_array(l.weights, l.nweights);
if (train) l.weight_updates_gpu = cuda_make_array(l.weight_updates, l.nweights);
}
if (train) l.delta_gpu = cuda_make_array(l.delta, l.outputs*batch);
l.output_gpu = cuda_make_array(l.output, l.outputs*batch);
l.input_sizes_gpu = cuda_make_int_array_new_api(input_sizes, l.n);
l.layers_output_gpu = (float**)cuda_make_array_pointers((void**)layers_output_gpu, l.n);
l.layers_delta_gpu = (float**)cuda_make_array_pointers((void**)layers_delta_gpu, l.n);
#endif // GPU
l.bflops = l.out_w * l.out_h * l.out_c * l.n / 1000000000.;
if (l.weights_type) l.bflops *= 2;
fprintf(stderr, " wt = %d, wn = %d, outputs:%4d x%4d x%4d %5.3f BF\n", l.weights_type, l.weights_normalizion, l.out_w, l.out_h, l.out_c, l.bflops);
return l;
}
void resize_shortcut_layer(layer *l, int w, int h, network *net)
{
//assert(l->w == l->out_w);
//assert(l->h == l->out_h);
l->w = l->out_w = w;
l->h = l->out_h = h;
l->outputs = w*h*l->out_c;
l->inputs = l->outputs;
if (l->train) l->delta = (float*)xrealloc(l->delta, l->outputs * l->batch * sizeof(float));
l->output = (float*)xrealloc(l->output, l->outputs * l->batch * sizeof(float));
int i;
for (i = 0; i < l->n; ++i) {
int index = l->input_layers[i];
l->input_sizes[i] = net->layers[index].outputs;
l->layers_output[i] = net->layers[index].output;
l->layers_delta[i] = net->layers[index].delta;
assert(l->w == net->layers[index].out_w && l->h == net->layers[index].out_h);
}
if (l->activation == SWISH || l->activation == MISH) l->activation_input = (float*)realloc(l->activation_input, l->batch*l->outputs * sizeof(float));
#ifdef GPU
cuda_free(l->output_gpu);
l->output_gpu = cuda_make_array(l->output, l->outputs*l->batch);
if (l->train) {
cuda_free(l->delta_gpu);
l->delta_gpu = cuda_make_array(l->delta, l->outputs*l->batch);
}
float **layers_output_gpu = (float **)calloc(l->n, sizeof(float *));
float **layers_delta_gpu = (float **)calloc(l->n, sizeof(float *));
for (i = 0; i < l->n; ++i) {
const int index = l->input_layers[i];
layers_output_gpu[i] = net->layers[index].output_gpu;
layers_delta_gpu[i] = net->layers[index].delta_gpu;
}
memcpy_ongpu(l->input_sizes_gpu, l->input_sizes, l->n * sizeof(int));
memcpy_ongpu(l->layers_output_gpu, layers_output_gpu, l->n * sizeof(float*));
memcpy_ongpu(l->layers_delta_gpu, layers_delta_gpu, l->n * sizeof(float*));
free(layers_output_gpu);
free(layers_delta_gpu);
if (l->activation == SWISH || l->activation == MISH) {
cuda_free(l->activation_input_gpu);
l->activation_input_gpu = cuda_make_array(l->activation_input, l->batch*l->outputs);
}
#endif
}
void forward_shortcut_layer(const layer l, network_state state)
{
int from_w = state.net.layers[l.index].w;
int from_h = state.net.layers[l.index].h;
int from_c = state.net.layers[l.index].c;
if (l.nweights == 0 && l.n == 1 && from_w == l.w && from_h == l.h && from_c == l.c) {
int size = l.batch * l.w * l.h * l.c;
int i;
#pragma omp parallel for
for(i = 0; i < size; ++i)
l.output[i] = state.input[i] + state.net.layers[l.index].output[i];
}
else {
shortcut_multilayer_cpu(l.outputs * l.batch, l.outputs, l.batch, l.n, l.input_sizes, l.layers_output, l.output, state.input, l.weights, l.nweights, l.weights_normalizion);
}
//copy_cpu(l.outputs*l.batch, state.input, 1, l.output, 1);
//shortcut_cpu(l.batch, from_w, from_h, from_c, state.net.layers[l.index].output, l.out_w, l.out_h, l.out_c, l.output);
//activate_array(l.output, l.outputs*l.batch, l.activation);
if (l.activation == SWISH) activate_array_swish(l.output, l.outputs*l.batch, l.activation_input, l.output);
else if (l.activation == MISH) activate_array_mish(l.output, l.outputs*l.batch, l.activation_input, l.output);
else activate_array_cpu_custom(l.output, l.outputs*l.batch, l.activation);
}
void backward_shortcut_layer(const layer l, network_state state)
{
if (l.activation == SWISH) gradient_array_swish(l.output, l.outputs*l.batch, l.activation_input, l.delta);
else if (l.activation == MISH) gradient_array_mish(l.outputs*l.batch, l.activation_input, l.delta);
else gradient_array(l.output, l.outputs*l.batch, l.activation, l.delta);
backward_shortcut_multilayer_cpu(l.outputs * l.batch, l.outputs, l.batch, l.n, l.input_sizes,
l.layers_delta, state.delta, l.delta, l.weights, l.weight_updates, l.nweights, state.input, l.layers_output, l.weights_normalizion);
//axpy_cpu(l.outputs*l.batch, 1, l.delta, 1, state.delta, 1);
//shortcut_cpu(l.batch, l.out_w, l.out_h, l.out_c, l.delta, l.w, l.h, l.c, state.net.layers[l.index].delta);
}
void update_shortcut_layer(layer l, int batch, float learning_rate_init, float momentum, float decay)
{
if (l.nweights > 0) {
float learning_rate = learning_rate_init*l.learning_rate_scale;
//float momentum = a.momentum;
//float decay = a.decay;
//int batch = a.batch;
axpy_cpu(l.nweights, -decay*batch, l.weights, 1, l.weight_updates, 1);
axpy_cpu(l.nweights, learning_rate / batch, l.weight_updates, 1, l.weights, 1);
scal_cpu(l.nweights, momentum, l.weight_updates, 1);
}
}
#ifdef GPU
void forward_shortcut_layer_gpu(const layer l, network_state state)
{
//copy_ongpu(l.outputs*l.batch, state.input, 1, l.output_gpu, 1);
//simple_copy_ongpu(l.outputs*l.batch, state.input, l.output_gpu);
//shortcut_gpu(l.batch, l.w, l.h, l.c, state.net.layers[l.index].output_gpu, l.out_w, l.out_h, l.out_c, l.output_gpu);
//input_shortcut_gpu(state.input, l.batch, l.w, l.h, l.c, state.net.layers[l.index].output_gpu, l.out_w, l.out_h, l.out_c, l.output_gpu);
//-----------
//if (l.outputs == l.input_sizes[0])
//if(l.n == 1 && l.nweights == 0)
//{
// input_shortcut_gpu(state.input, l.batch, state.net.layers[l.index].w, state.net.layers[l.index].h, state.net.layers[l.index].c,
// state.net.layers[l.index].output_gpu, l.out_w, l.out_h, l.out_c, l.output_gpu);
//}
//else
{
shortcut_multilayer_gpu(l.outputs, l.batch, l.n, l.input_sizes_gpu, l.layers_output_gpu, l.output_gpu, state.input, l.weights_gpu, l.nweights, l.weights_normalizion);
}
if (l.activation == SWISH) activate_array_swish_ongpu(l.output_gpu, l.outputs*l.batch, l.activation_input_gpu, l.output_gpu);
else if (l.activation == MISH) activate_array_mish_ongpu(l.output_gpu, l.outputs*l.batch, l.activation_input_gpu, l.output_gpu);
else activate_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation);
}
void backward_shortcut_layer_gpu(const layer l, network_state state)
{
if (l.activation == SWISH) gradient_array_swish_ongpu(l.output_gpu, l.outputs*l.batch, l.activation_input_gpu, l.delta_gpu);
else if (l.activation == MISH) gradient_array_mish_ongpu(l.outputs*l.batch, l.activation_input_gpu, l.delta_gpu);
else gradient_array_ongpu(l.output_gpu, l.outputs*l.batch, l.activation, l.delta_gpu);
backward_shortcut_multilayer_gpu(l.outputs, l.batch, l.n, l.input_sizes_gpu, l.layers_delta_gpu, state.delta, l.delta_gpu,
l.weights_gpu, l.weight_updates_gpu, l.nweights, state.input, l.layers_output_gpu, l.weights_normalizion);
//axpy_ongpu(l.outputs*l.batch, 1, l.delta_gpu, 1, state.delta, 1);
//shortcut_gpu(l.batch, l.out_w, l.out_h, l.out_c, l.delta_gpu, l.w, l.h, l.c, state.net.layers[l.index].delta_gpu);
}
void update_shortcut_layer_gpu(layer l, int batch, float learning_rate_init, float momentum, float decay)
{
if (l.nweights > 0) {
float learning_rate = learning_rate_init*l.learning_rate_scale;
//float momentum = a.momentum;
//float decay = a.decay;
//int batch = a.batch;
reset_nan_and_inf(l.weight_updates_gpu, l.nweights);
fix_nan_and_inf(l.weights_gpu, l.nweights);
axpy_ongpu(l.nweights, -decay*batch, l.weights_gpu, 1, l.weight_updates_gpu, 1);
axpy_ongpu(l.nweights, learning_rate / batch, l.weight_updates_gpu, 1, l.weights_gpu, 1);
scal_ongpu(l.nweights, momentum, l.weight_updates_gpu, 1);
//if (l.clip) {
// constrain_gpu(l.nweights, l.clip, l.weights_gpu, 1);
//}
}
}
void pull_shortcut_layer(layer l)
{
cuda_pull_array_async(l.weights_gpu, l.weights, l.nweights);
CHECK_CUDA(cudaPeekAtLastError());
CHECK_CUDA(cudaStreamSynchronize(get_cuda_stream()));
}
void push_shortcut_layer(layer l)
{
cuda_push_array(l.weights_gpu, l.weights, l.nweights);
CHECK_CUDA(cudaPeekAtLastError());
}
#endif
|
mysql_netauth_fmt_plug.c | /* Cracker for MySQL network authentication hashes. Hacked together
* during May of 2012 by Dhiru Kholia <dhiru.kholia at gmail.com>.
*
* This software is Copyright (c) 2012, Dhiru Kholia <dhiru.kholia at gmail.com>,
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without modification,
* are permitted. */
#if FMT_EXTERNS_H
extern struct fmt_main fmt_mysqlna;
#elif FMT_REGISTERS_H
john_register_one(&fmt_mysqlna);
#else
#include "sha.h"
#include <string.h>
#include <assert.h>
#include <errno.h>
#include "arch.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#include "options.h"
#ifdef _OPENMP
static int omp_t = 1;
#include <omp.h>
#define OMP_SCALE 64
#endif
#include "memdbg.h"
#define FORMAT_LABEL "mysqlna"
#define FORMAT_NAME "MySQL Network Authentication"
#define ALGORITHM_NAME "SHA1 32/" ARCH_BITS_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define PLAINTEXT_LENGTH 32
#define HEX_LENGTH 40
#define CIPHERTEXT_LENGTH 90
#define BINARY_SIZE 20
#define BINARY_ALIGN MEM_ALIGN_WORD
#define SALT_SIZE sizeof(struct custom_salt)
#define SALT_ALIGN MEM_ALIGN_NONE
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
static struct fmt_tests mysqlna_tests[] = {
{"$mysqlna$2D52396369653E4626293B2F75244D3871507A39*7D63098BEE381A51AA6DF11E307E46BD4F8B6E0C", "openwall"},
{"$mysqlna$615c2b5e79656f7d4931594e5b5d416c7b483365*c3a70da2874db890eb2f0a5e3ea80b2ed17da0d0", "openwall"},
{"$mysqlna$295a687c59275452214b366b39776d3f31757b2e*7343f45c94cccd646a1b29bbfad064a9ee5c0380", "overlord magnum"},
{NULL}
};
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static ARCH_WORD_32 (*crypt_out)[BINARY_SIZE / sizeof(ARCH_WORD_32)];
static struct custom_salt {
char unsigned scramble[20];
} *cur_salt;
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc_tiny(sizeof(*saved_key) *
self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
crypt_out = mem_calloc_tiny(sizeof(*crypt_out) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *p, *q;
if (strncmp(ciphertext, "$mysqlna$", 9))
return 0;
p = ciphertext + 9;
q = strstr(ciphertext, "*");
if(!q)
return 0;
if (q - p != HEX_LENGTH)
return 0;
while (atoi16[ARCH_INDEX(*p)] != 0x7F && p < q)
p++;
if (q - p != 0)
return 0;
if(strlen(p) < HEX_LENGTH)
return 0;
q = p + 1;
while (atoi16[ARCH_INDEX(*q)] != 0x7F)
q++;
return !*q && q - p - 1 == HEX_LENGTH;
}
static char* split(char *ciphertext, int index, struct fmt_main *self)
{
static char out[CIPHERTEXT_LENGTH + 1];
strncpy(out, ciphertext, sizeof(out));
strlwr(out);
return out;
}
static void *get_salt(char *ciphertext)
{
char *ctcopy = strdup(ciphertext);
char *keeptr = ctcopy;
char *p;
int i;
static struct custom_salt cs;
ctcopy += 9; /* skip over "$mysqlna$" */
p = strtok(ctcopy, "*");
for (i = 0; i < 20; i++)
cs.scramble[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
MEM_FREE(keeptr);
return (void *)&cs;
}
static void *get_binary(char *ciphertext)
{
static union {
unsigned char c[BINARY_SIZE];
ARCH_WORD dummy;
} buf;
unsigned char *out = buf.c;
char *p;
int i;
p = strrchr(ciphertext, '*') + 1;
for (i = 0; i < BINARY_SIZE; i++) {
out[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
return out;
}
static int get_hash_0(int index) { return crypt_out[index][0] & 0xf; }
static int get_hash_1(int index) { return crypt_out[index][0] & 0xff; }
static int get_hash_2(int index) { return crypt_out[index][0] & 0xfff; }
static int get_hash_3(int index) { return crypt_out[index][0] & 0xffff; }
static int get_hash_4(int index) { return crypt_out[index][0] & 0xfffff; }
static int get_hash_5(int index) { return crypt_out[index][0] & 0xffffff; }
static int get_hash_6(int index) { return crypt_out[index][0] & 0x7ffffff; }
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index++)
#endif
{
unsigned char stage1_hash[20];
unsigned char inner_hash[20];
unsigned char token[20];
SHA_CTX ctx;
int i;
unsigned char *p = (unsigned char*)crypt_out[index];
SHA1_Init(&ctx);
SHA1_Update(&ctx, saved_key[index], strlen(saved_key[index]));
SHA1_Final(stage1_hash, &ctx);
SHA1_Init(&ctx);
SHA1_Update(&ctx, stage1_hash, 20);
SHA1_Final(inner_hash, &ctx);
SHA1_Init(&ctx);
SHA1_Update(&ctx, cur_salt->scramble, 20);
SHA1_Update(&ctx, inner_hash, 20);
SHA1_Final(token, &ctx);
for(i = 0; i < 20; i++) {
p[i] = token[i] ^ stage1_hash[i];
}
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
#ifdef _OPENMP
for (; index < count; index++)
#endif
if (!memcmp(binary, crypt_out[index], BINARY_SIZE))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static void mysqlna_set_key(char *key, int index)
{
int saved_key_length = strlen(key);
if (saved_key_length > PLAINTEXT_LENGTH)
saved_key_length = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, saved_key_length);
saved_key[index][saved_key_length] = 0;
}
static char *get_key(int index)
{
return saved_key[index];
}
struct fmt_main fmt_mysqlna = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_SPLIT_UNIFIES_CASE,
#if FMT_MAIN_VERSION > 11
{ NULL },
#endif
mysqlna_tests
}, {
init,
fmt_default_done,
fmt_default_reset,
fmt_default_prepare,
valid,
split,
get_binary,
get_salt,
#if FMT_MAIN_VERSION > 11
{ NULL },
#endif
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
set_salt,
mysqlna_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
transform.h | /*
* transform.h
*
* Created on: Dec 28, 2015
* @author: agibsonccc
* @author: raver119@gmail.com
*/
#ifndef TRANSFORM_H_
#define TRANSFORM_H_
#include <vector>
#include <templatemath.h>
#include <ops/ops.h>
#include <ops/special_ops.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include <pairwise_util.h>
#include <dll.h>
#include <loops/reduce.h>
#include <loops/scalar.h>
#include <loops/indexreduce.h>
#include <loops/broadcasting.h>
#ifdef __CUDACC__
#include <cuda.h>
#include <cuda_runtime.h>
#include <helper_cuda.h>
#endif
#ifndef _OPENMP
#define omp_get_thread_num() 0
#define omp_get_max_threads() 1
#endif
#include "legacy_ops.h"
namespace functions {
namespace transform {
template<typename T>
class Transform {
public:
#ifdef __CUDACC__
/**
* Cuda implementation of transform
* @param dx
* @param xShapeInfo
* @param result
* @param resultShapeInfo
* @param extraParams
* @param n
*/
virtual __inline__ __device__ void transform(
T *dy,
int *shapeInfo,
T *params,
T *result,
int *indexes) {
Nd4jIndex n = shape::length(shapeInfo);
int totalThreads = gridDim.x * blockDim.x;
Nd4jIndex i = blockIdx.x * blockDim.x + threadIdx.x;
/* equal, positive, non-unit increments. */
#pragma unroll
for (; i < n; i+= totalThreads) {
result[indexes[i]] = op(dy[indexes[i]], params);
}
}
/**
* Cuda implementation of transform
* @param dx
* @param xShapeInfo
* @param result
* @param resultShapeInfo
* @param extraParams
* @param n
*/
template<typename OpType>
static __inline__ __device__ void transformCuda(
T *dy,
int *shapeInfo,
T *params,
T *result,
int *resultShapeInfo,
int *allocationPointer, T *reductionPointer, UnifiedSharedMemory *manager, int *tadShapeInfo, Nd4jIndex *tadOffsets) {
if(OpType::requiresSpecial) {
OpType::execSpecialCuda(dy,shapeInfo,result,resultShapeInfo,params, allocationPointer, reductionPointer, manager, tadShapeInfo, tadOffsets);
return;
} else {
int *xShape = shape::shapeOf(shapeInfo);
int *xStride = shape::stride(shapeInfo);
char xOrder = shape::order(shapeInfo);
char resultOrder = shape::order(resultShapeInfo);
int xRank = shape::rank(shapeInfo);
int xOffset = shape::offset(shapeInfo);
int xElementWiseStride = shape::elementWiseStride(shapeInfo);
int resultElementWiseStride = shape::elementWiseStride(resultShapeInfo);
int tid = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ int length;
if(threadIdx.x == 0)
length = shape::length(shapeInfo);
__syncthreads();
if(xElementWiseStride >= 1 && resultElementWiseStride >= 1 && xOrder == resultOrder) {
transformCuda<OpType>(
length,
dy,
xElementWiseStride,
params,
result,
resultElementWiseStride, allocationPointer, reductionPointer, manager);
}
else {
/* equal, positive, non-unit increments. */
//long allocSize = sizeof(int) * xRank;
//int *xIdx = shape::cuMalloc(manager->getT1ShapeBuffer(), allocSize);
int xCoord[MAX_RANK];
#pragma unroll
for (Nd4jIndex i = tid; i < length; i+= gridDim.x * blockDim.x) {
//int *xIdx = shape::ind2sub(xRank, xShape, i, xIdx);
shape::ind2sub(xRank,shape::shapeOf(shapeInfo),i, xCoord);
Nd4jIndex xOffset2 = shape::getOffset(xOffset, xShape, xStride, xCoord, xRank);
Nd4jIndex resultOffset2 = shape::getOffset(0,xShape,shape::stride(resultShapeInfo),xCoord,xRank);
result[resultOffset2] = OpType::op(dy[xOffset2], params);
}
}
}
}
/**
* Cuda implementation of transform
* @param dx
* @param xShapeInfo
* @param result
* @param resultShapeInfo
* @param extraParams
* @param n
*/
template<typename OpType>
static __inline__ __device__ void transformCuda(
Nd4jIndex n,
T *dy,
int incy,
T *params,
T *result,
int resultStride,
int *allocationPointer, T *reductionPointer, UnifiedSharedMemory *manager) {
int totalThreads = gridDim.x * blockDim.x;
Nd4jIndex i = blockIdx.x * blockDim.x + threadIdx.x;
if(incy == 1 && resultStride == 1) {
/* equal, positive, non-unit increments. */
#pragma unroll
for (; i < n; i += totalThreads) {
result[i] = OpType::op(dy[i], params);
}
}
else {
/* equal, positive, non-unit increments. */
#pragma unroll
for (; i < n; i += totalThreads) {
result[i * resultStride] = OpType::op(dy[i * incy], params);
}
}
}
static __inline__ __device__ void transformCuda(
const int opNum,
T *dy,
int *shapeInfo,
T *params,
T *result,
int *resultShapeInfo,
int *allocationPointer,
T *reductionPointer,
UnifiedSharedMemory *manager, int *tadShapeInfo, Nd4jIndex *tadOffsets) {
DISPATCH_BY_OPNUM(transformCuda, PARAMS(dy, shapeInfo, params, result, resultShapeInfo, allocationPointer, reductionPointer, manager, tadShapeInfo, tadOffsets), TRANSFORM_OPS);
}
static __inline__ __device__ void transformCuda(
const int opNum,
Nd4jIndex n,
T *dy,
int incy,
T *params,
T *result,
int resultStride,
int *allocationPointer,
T *reductionPointer,
UnifiedSharedMemory *manager) {
DISPATCH_BY_OPNUM(transformCuda, PARAMS(n, dy, incy, params, result, resultStride, allocationPointer, reductionPointer, manager), TRANSFORM_OPS);
}
#endif
static void exec(int opNum, T *dx, int xStride, T *result, int resultStride, T *extraParams, const int n) {
DISPATCH_BY_OPNUM(exec, PARAMS(dx, xStride, result, resultStride, extraParams, n), TRANSFORM_OPS);
}
static void exec(
int opNum,
T *dx,
int *xShapeInfo,
T *result,
int *resultShapeInfo,
T *extraParams,
int *indexes,
int *resultIndexes, int *tadShapeInfo, Nd4jIndex *tadOffsets) {
DISPATCH_BY_OPNUM(exec, PARAMS(dx, xShapeInfo, result, resultShapeInfo, extraParams, indexes, resultIndexes, tadShapeInfo, tadOffsets), TRANSFORM_OPS);
}
static void exec(
int opNum,
T *dx,
int *xShapeInfo,
T *result,
int *resultShapeInfo,
T *extraParams, int *tadShapeInfo, Nd4jIndex *tadOffsets) {
DISPATCH_BY_OPNUM(exec, PARAMS(dx, xShapeInfo, result, resultShapeInfo, extraParams, tadShapeInfo, tadOffsets), TRANSFORM_OPS);
}
template<typename OpType>
static void _CUDA_H exec(
T *dx,
int *xShapeInfo,
T *result,
int *resultShapeInfo,
T *extraParams, int *tadShapeInfo, Nd4jIndex *tadOffsets) {
if(OpType::requiresSpecial) {
OpType::execSpecial(dx,xShapeInfo,result,resultShapeInfo,extraParams, tadShapeInfo, tadOffsets);
return;
}
int n = shape::length(xShapeInfo);
int xElementWiseStride = shape::elementWiseStride(xShapeInfo);
int resultElementWiseStride = shape::elementWiseStride(resultShapeInfo);
if(xElementWiseStride >= 1 && resultElementWiseStride >= 1 && shape::order(xShapeInfo) == shape::order(resultShapeInfo)) {
exec<OpType>(dx,xElementWiseStride,result,resultElementWiseStride,extraParams,n);
}
else {
int shapeIter[MAX_RANK];
int coord[MAX_RANK];
int dim;
int xStridesIter[MAX_RANK];
int resultStridesIter[MAX_RANK];
int *xShape = shape::shapeOf(xShapeInfo);
int *xStride = shape::stride(xShapeInfo);
int *resultStride = shape::stride(resultShapeInfo);
int rank = shape::rank(xShapeInfo);
if(PrepareTwoRawArrayIter<T>(rank,
xShape,
dx,
xStride,
result,
resultStride,
&rank,
shapeIter,
&dx,
xStridesIter,
&result,
resultStridesIter) >= 0) {
ND4J_RAW_ITER_START(dim, rank, coord, shapeIter);
{
// Process the innermost dimension
T *xIter = dx;
T *resultIter = result;
resultIter[0] = OpType::op(xIter[0], extraParams);
}
ND4J_RAW_ITER_TWO_NEXT(dim,
rank,
coord,
shapeIter,
dx,
xStridesIter,
result,
resultStridesIter);
}
}
}
template<typename OpType>
static void exec(
T *dx,
int *xShapeInfo,
T *result,
int *resultShapeInfo,
T *extraParams,
int *indexes,
int *resultIndexes, int *tadShapeInfo, Nd4jIndex *tadOffsets) {
int n = shape::length(xShapeInfo);
#pragma omp parallel for simd schedule(guided) proc_bind(AFFINITY) default(shared)
for (Nd4jIndex i = 0; i < n; i++) {
result[resultIndexes[i]] = OpType::op(dx[indexes[i]], extraParams);
}
}
template<typename OpType>
static void exec(T *dx,
int xStride,
T *result,
int resultStride,
T *extraParams,
const int n) {
int elementsPerThread = n / ELEMENT_THRESHOLD;
int num_threads = nd4j::math::nd4j_max<int>(1, elementsPerThread);
num_threads = nd4j::math::nd4j_min<int>(num_threads, omp_get_max_threads());
int span = (n / num_threads) + 8;
if (xStride == 1 && resultStride == 1) {
#pragma omp parallel num_threads(num_threads) if (num_threads>1) proc_bind(AFFINITY) default(shared)
{
int tid = omp_get_thread_num();
int start = span * tid;
int end = span * (tid + 1);
if (end > n) end = n;
#pragma omp simd
for (Nd4jIndex i = start; i < end; i++) {
result[i] = OpType::op(dx[i], extraParams);
}
}
} else {
#pragma omp parallel num_threads(num_threads) if (num_threads>1) proc_bind(AFFINITY) default(shared)
{
int tid = omp_get_thread_num();
int start = span * tid;
int end = span * (tid + 1);
if (end > n) end = n;
#pragma omp simd
for (Nd4jIndex i = start; i < end; i++) {
result[i*resultStride] = OpType::op(dx[i * xStride], extraParams);
}
}
}
}
};
}
}
#ifdef __CUDACC__
/**
* The c and driver interface
* for th kernels
* @param opNum the op number
* @param n the length of the problem
* @param idx
* the start index
* @param dy the vector to transform
* @param incy the stride for the vector
* @param params the extra parameters for the problem
* @param result the result storage
* @param blockernelHeight the block size for the problem
*/
template <typename T>
__device__ void transformGeneric(
int opNum,
Nd4jIndex n,
T *dy,
int incy,
T *params,
T *result,
int resultStride, int *allocationPointer, T *reductionPointer) {
__shared__ UnifiedSharedMemory *manager;
if(threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
manager = new(shmem) UnifiedSharedMemory((int *) shmem);
manager->init(sizeof(UnifiedSharedMemory), 0, sizeof(functions::transform::Transform<T>), sizeof(shape::TAD), 0);
}
__syncthreads();
functions::transform::Transform<T>::transformCuda(
opNum,
n,
dy,
incy,
params,
result,
resultStride,
allocationPointer,
reductionPointer,
manager);
}
template <typename T, typename OpClass>
__device__ void transformSimpleGeneric(
Nd4jIndex n,
T *dy,
int incy,
T *params,
T *result,
int resultStride, int *allocationPointer, T *reductionPointer) {
__shared__ UnifiedSharedMemory *manager;
if(threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
manager = new(shmem) UnifiedSharedMemory((int *) shmem);
manager->init(sizeof(UnifiedSharedMemory), 0, sizeof(functions::transform::Transform<T>), sizeof(shape::TAD), 0);
}
__syncthreads();
functions::transform::Transform<T>::template transformCuda<OpClass>(
n,
dy,
incy,
params,
result,
resultStride,
allocationPointer,
reductionPointer,
manager);
}
/**
* The c and driver interface
* for th kernels
* @param opNum the op number
* @param n the length of the problem
* @param idx
* the start index
* @param dy the vector to transform
* @param incy the stride for the vector
* @param params the extra parameters for the problem
* @param result the result storage
* @param blockernelHeight the block size for the problem
*/
/*
template <typename T>
__device__ void transformGeneric(
int opNum,
T *dy,
int *xShapeInfo, int xRank,
T *params,
T *result,int *resultShapeInfo, int zRank, int *allocationPointer, T *reductionPointer) {
__shared__ UnifiedSharedMemory *manager;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
manager = new(shmem) UnifiedSharedMemory((int *) shmem);
manager->init(sizeof(UnifiedSharedMemory), 0, sizeof(functions::transform::Transform<T>), sizeof(shape::TAD), xRank);
}
__syncthreads();
functions::transform::Transform<T>::transformCuda(
opNum,
dy,
xShapeInfo,
params,
result,
resultShapeInfo,
allocationPointer,
reductionPointer,
manager);
}
*/
template <typename T, typename OpClass>
__device__ void transformSimpleGeneric(
T *dy,
int *xShapeInfo, int xRank,
T *params,
T *result,int *resultShapeInfo, int zRank, int *allocationPointer, T *reductionPointer, int *tadShapeInfo, Nd4jIndex *tadOffsets) {
__shared__ UnifiedSharedMemory *manager;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
manager = new(shmem) UnifiedSharedMemory((int *) shmem);
manager->init(sizeof(UnifiedSharedMemory), 0, sizeof(functions::transform::Transform<T>), sizeof(shape::TAD), xRank);
}
__syncthreads();
functions::transform::Transform<T>::template transformCuda<OpClass>(
dy,
xShapeInfo,
params,
result,
resultShapeInfo,
allocationPointer,
reductionPointer,
manager, tadShapeInfo, tadOffsets);
}
/**
* The c and driver interface
* for th kernels
* @param opNum the op number
* @param n the length of the problem
* @param idx
* the start index
* @param dy the vector to transform
* @param incy the stride for the vector
* @param params the extra parameters for the problem
* @param result the result storage
* @param blockernelHeight the block size for the problem
*/
template <typename T>
__device__ void transformGenericIndexes(
int opNum,
T *dy,
int *xShapeInfo, int xRank,
T *params,
T *result,int *indexes, int *allocationPointer, T *reductionPointer) {
__shared__ UnifiedSharedMemory *manager;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
manager = new(shmem) UnifiedSharedMemory((int *) shmem);
manager->init(sizeof(UnifiedSharedMemory), 0, sizeof(functions::transform::Transform<T>), sizeof(shape::TAD), xRank);
}
__syncthreads();
/*
functions::transform::Transform<T>::transformCuda(
opNum,
dy,
xShapeInfo,
params,
result,
indexes,
allocationPointer,
reductionPointer,
manager);
*/
}
/**
* The c and driver interface
* for th kernels
* @param opNum the op number
* @param n the length of the problem
* @param idx
* the start index
* @param dy the vector to transform
* @param incy the stride for the vector
* @param params the extra parameters for the problem
* @param result the result storage
* @param blockernelHeight the block size for the problem
*/
extern "C" __global__ void transformDoubleIndexes(
int opNum,
double *dy,
int *shapeInfo, int xRank,
double *params,
double *result,int *indexes, int *allocationPointer, double *reductionPointer) {
transformGenericIndexes<double>(
opNum,
dy,
shapeInfo, xRank,
params,
result,indexes, allocationPointer, reductionPointer);
}
/**
* The c and driver interface
* for th kernels
* @param opNum the op number
* @param n the length of the problem
* @param idx
* the start index
* @param dy the vector to transform
* @param incy the stride for the vector
* @param params the extra parameters for the problem
* @param result the result storage
* @param blockernelHeight the block size for the problem
*/
extern "C" __global__ void transformFloatIndexes(
int opNum,
float *dy,
int *shapeInfo, int xRank,
float *params,
float *result,int *indexes, int *allocationPointer, float *reductionPointer) {
transformGenericIndexes<float>(
opNum,
dy,
shapeInfo, xRank,
params,
result,indexes, allocationPointer, reductionPointer);
}
extern "C" __global__ void transformHalfIndexes(
int opNum,
float16 *dy,
int *shapeInfo, int xRank,
float16 *params,
float16 *result,int *indexes, int *allocationPointer, float16 *reductionPointer) {
transformGenericIndexes<float16>(
opNum,
dy,
shapeInfo, xRank,
params,
result,indexes, allocationPointer, reductionPointer);
}
/**
* This is utility kernel, that updates given special buffer with proper values in device memory
*/
extern "C" __global__ void prepareShapeBuffer(int *dimension, int *maxDimension, int *specialPointer, int rows) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid > 0)
return;
dimension[0] = 0;
maxDimension[0] = 1;
specialPointer[0] = 2;
specialPointer[1] = rows;
specialPointer[2] = 1;
specialPointer[3] = 1;
specialPointer[4] = 1;
specialPointer[5] = 0;
specialPointer[6] = 1;
specialPointer[7] = 99;
}
extern "C" __global__ void prepareDimensionalShapeBuffer(int *xShapeInfoBuffer, float *extraParams, int *zShapeInfo) {
// extraParams[0] - number of dimensions
// extraParams[1] - dimension
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid > 0)
return;
int targetDimension = (int) extraParams[1];
printf("Target dimension: [%i]\n", targetDimension);
int targetWidth = shape::shapeOf(xShapeInfoBuffer)[targetDimension];
printf("Target rank: [%i]\n", targetWidth);
}
template <typename T>
__device__ void fillIsMaxGeneric(T *dx, long length, long idx) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (long i = tid; i < length; i+= blockDim.x * gridDim.x) {
dx[i] = (i == idx? 1.0 : 0.0);
}
}
extern "C" __global__ void fillIsMaxFloat(float *dx, long length, long idx) {
fillIsMaxGeneric<float>(dx, length, idx);
}
extern "C" __global__ void fillIsMaxDouble(double *dx, long length, long idx) {
fillIsMaxGeneric<double>(dx, length, idx);
}
extern "C" __global__ void fillIsMaxHalf(float16 *dx, long length, long idx) {
fillIsMaxGeneric<float16>(dx, length, idx);
}
template <typename T>
__device__ void fillDimensionalIsMaxGeneric(T *dX, int *xShapeInfo, T *dZ, int *zShapeInfo, int *tadOnlyShapeInfo, int *dimension, int dimensionLength, Nd4jIndex *tadOffsets) {
__shared__ int tadLength;
__shared__ int tadEWS;
__shared__ int numTads;
__shared__ int *tadShape;
__shared__ int *tadStride;
__shared__ int tadRank;
__shared__ char tadOrder;
if (threadIdx.x == 0) {
tadLength = shape::tadLength(zShapeInfo, dimension, dimensionLength);
tadEWS = shape::elementWiseStride(tadOnlyShapeInfo);
numTads = shape::length(zShapeInfo) / tadLength;
tadShape = shape::shapeOf(tadOnlyShapeInfo);
tadStride = shape::stride(tadOnlyShapeInfo);
tadRank = shape::rank(tadOnlyShapeInfo);
tadOrder = shape::order(tadOnlyShapeInfo);
}
__syncthreads();
for (int r = blockIdx.x; r < numTads; r+= gridDim.x) {
int tadOffsetForBlock = tadOffsets[r];
int highestElement = (int) dX[r];
if (dimensionLength > 1 || tadEWS < 1) {
int xCoord[MAX_RANK];
for (int e = threadIdx.x; e < tadLength; e += blockDim.x) {
shape::ind2subC(tadRank,tadShape, e, xCoord);
Nd4jIndex xOffset = shape::getOffset(tadOffsetForBlock, tadShape, tadStride, xCoord, tadRank);
dZ[xOffset] = (e == highestElement? (T) 1.0f : (T) 0.0f);
}
} else {
for (int e = threadIdx.x; e < tadLength; e += blockDim.x) {
// so, we just set dZ[e] for each TAD. Sure, e should be replaced with
int idx = tadOffsetForBlock + (e * tadEWS);
dZ[idx] = (e == highestElement? (T) 1.0f : (T) 0.0f);
}
}
}
}
extern "C" __global__ void fillDimensionalIsMaxFloat(float *dx, int *xShapeInfo, float *dz, int *zShapeInfo, int *tadOnlyShapeInfo, int *dimension, int dimensionLength, Nd4jIndex *tadOffsets) {
fillDimensionalIsMaxGeneric<float>(dx, xShapeInfo, dz, zShapeInfo, tadOnlyShapeInfo, dimension, dimensionLength, tadOffsets);
}
extern "C" __global__ void fillDimensionalIsMaxDouble(double *dx, int *xShapeInfo, double *dz, int *zShapeInfo, int *tadOnlyShapeInfo, int *dimension, int dimensionLength, Nd4jIndex *tadOffsets) {
fillDimensionalIsMaxGeneric<double>(dx, xShapeInfo, dz, zShapeInfo, tadOnlyShapeInfo, dimension, dimensionLength, tadOffsets);
}
extern "C" __global__ void fillDimensionalIsMaxHalf(float16 *dx, int *xShapeInfo, float16 *dz, int *zShapeInfo, int *tadOnlyShapeInfo, int *dimension, int dimensionLength, Nd4jIndex *tadOffsets) {
fillDimensionalIsMaxGeneric<float16>(dx, xShapeInfo, dz, zShapeInfo, tadOnlyShapeInfo, dimension, dimensionLength, tadOffsets);
}
template <typename T>
__device__ void concatKernelGeneric(int dimension,
int numArrays,
Nd4jPointer *data,
Nd4jPointer *inputShapeInfos,
T *result,
int *resultShapeInfo, Nd4jPointer *tadPointers, Nd4jPointer *offsetPointers, int *zTadShape, Nd4jIndex *zOffsets) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int zRank = shape::rank(resultShapeInfo);
T **dataT = (T **) data;
int **shapeInfoPointers = (int **) inputShapeInfos;
int **tadShapes = (int **) tadPointers;
Nd4jIndex **tadOffsets = (Nd4jIndex **) offsetPointers;
//__shared__ int tDim[1];
__shared__ int baseIdx;
__shared__ int yLength;
__shared__ char yOrder;
__shared__ int yEWS;
char zOrder = shape::order(resultShapeInfo);
int zEWS = shape::elementWiseStride(resultShapeInfo);
int tadEWS = shape::elementWiseStride(zTadShape);
int zLength = shape::length(resultShapeInfo);
__shared__ int arrOffset;
__shared__ int numTads;
if (shape::isVector(resultShapeInfo)) {
//if (threadIdx.x == 0)
//printf("Vector here\n");
if (zEWS >= 1) {
for (int r = blockIdx.x; r < numArrays; r += gridDim.x) {
if(shape::isVector(shapeInfoPointers[r]) || shape::order(shapeInfoPointers[r]) == shape::order(resultShapeInfo)) {
yLength = shape::length(shapeInfoPointers[r]);
yEWS = shape::elementWiseStride(shapeInfoPointers[r]);
// FIXME: this is bad
__shared__ int baseIdx;
if (threadIdx.x == 0) {
baseIdx = 0;
for (int f = 0; f < r; f++) {
baseIdx += shape::length(shapeInfoPointers[f]);
}
}
__syncthreads();
for (int i = threadIdx.x; i < yLength && baseIdx + i < zLength; i += blockDim.x) {
result[baseIdx + i * zEWS] = dataT[r][i * yEWS];
}
__syncthreads();
} else {
if (tid == 0)
printf("Non-matched order for vector\n");
}
}
} else {
if (tid == 0)
printf("Vector Non-1 zEWS\n");
}
return;
}
// TODO: to be pulled into separate kernel. matrix concatenation
for (int r = 0; r < numArrays; r ++) {
int *currentShape = shapeInfoPointers[r];
T *currentData = dataT[r];
int *currentTad = tadShapes[r];
Nd4jIndex *currentOffsets = tadOffsets[r];
if (threadIdx.x == 0) {
yLength = shape::length(currentTad);
yOrder = shape::order(currentTad);
yEWS = shape::elementWiseStride(currentTad);
numTads = shape::length(currentShape) / yLength;
arrOffset = 0;
for (int f = 0; f < r; f++) {
arrOffset += shape::length(tadShapes[f]);
}
}
__syncthreads();
if (yLength == 1) {
// edge case, each thread will handle it's own tad then
for (int j = tid; j < numTads; j += blockDim.x * gridDim.x) {
Nd4jIndex inputOffset = currentOffsets[j];
Nd4jIndex resultOffset = zOffsets[j];
T *dataTAD = currentData + inputOffset;
T *resultTAD = result + resultOffset;
int sub[MAX_RANK];
if (shape::order(zTadShape) == 'f') {
shape::ind2sub(shape::rank(zTadShape),shape::shapeOf(zTadShape),arrOffset, sub);
} else {
shape::ind2subC(shape::rank(zTadShape),shape::shapeOf(zTadShape),arrOffset, sub);
}
Nd4jIndex baseOffset = shape::getOffset(0,shape::shapeOf(zTadShape),shape::stride(zTadShape), sub, shape::rank(zTadShape));
resultTAD += baseOffset;
int yRank = shape::rank(currentTad);
int tadRank = shape::rank(zTadShape);
shape::ind2subC(yRank, shape::shapeOf(currentTad), 0,sub);
Nd4jIndex yOffset = shape::getOffset(0, shape::shapeOf(currentTad), shape::stride(currentTad), sub, yRank);
resultOffset = shape::getOffset(0, shape::shapeOf(zTadShape), shape::stride(zTadShape), sub, tadRank);
resultTAD[resultOffset] = dataTAD[yOffset];
}
} else {
for (int j = blockIdx.x; j < numTads; j += gridDim.x) {
Nd4jIndex inputOffset = currentOffsets[j];
Nd4jIndex resultOffset = zOffsets[j];
T *dataTAD = currentData + inputOffset;
T *resultTAD = result + resultOffset;
int sub[MAX_RANK];
shape::ind2subC(shape::rank(zTadShape),shape::shapeOf(zTadShape),arrOffset, sub);
Nd4jIndex baseOffset = shape::getOffset(0,shape::shapeOf(zTadShape),shape::stride(zTadShape), sub, shape::rank(zTadShape));
resultTAD += baseOffset;
if (zOrder == yOrder && yEWS > 0 && tadEWS > 0) {
//if (threadIdx.x == 0)
// printf("Branch A\n");
for (int i = threadIdx.x; i < yLength; i += blockDim.x) {
resultTAD[i * tadEWS] = dataTAD[i * yEWS];
}
} else {
if(tadEWS > 0 && shape::order(resultShapeInfo) == shape::order(currentTad)) {
//if (threadIdx.x == 0)
// printf("Branch B\n");
if (threadIdx.x == 0) {
baseIdx = 0;
for (int f = 0; f < r; f++) {
baseIdx += shape::length(shapeInfoPointers[f]);
}
//printf("R: %i; baseIdx: %i;\n", baseIdx);
}
__syncthreads();
if (numTads == 1) {
for(int k = threadIdx.x; k < yLength; k+= blockDim.x) {
resultTAD[baseIdx + k * tadEWS] = dataTAD[k];
}
} else {
int yIdx[MAX_RANK];
int yRank = shape::rank(currentTad);
for (int i = threadIdx.x; i < yLength; i+= blockDim.x) {
shape::ind2subC(yRank, shape::shapeOf(currentTad), i, yIdx);
int yOffset = shape::getOffset(0, shape::shapeOf(currentTad), shape::stride(currentTad), yIdx, yRank);
resultTAD[baseIdx + i * tadEWS] = dataTAD[yOffset];
}
}
__syncthreads();
} else {
//if (threadIdx.x == 0)
// printf("Branch C; yLength: %i;\n", yLength);
int yIdx[MAX_RANK];
int yRank = shape::rank(currentTad);
int tadRank = shape::rank(zTadShape);
for (int i = threadIdx.x; i < yLength; i+= blockDim.x) {
shape::ind2subC(yRank, shape::shapeOf(currentTad), i,yIdx);
int yOffset = shape::getOffset(0, shape::shapeOf(currentTad), shape::stride(currentTad), yIdx, yRank);
int resultOffset = shape::getOffset(0, shape::shapeOf(zTadShape), shape::stride(zTadShape), yIdx, tadRank);
resultTAD[resultOffset] = dataTAD[yOffset];
}
}
}
__syncthreads();
}
}
__syncthreads();
}
}
template <typename T>
__device__ void concatKernelScalarGeneric(int dimension,
int numArrays,
Nd4jPointer *data,
Nd4jPointer *inputShapeInfos,
T *result,
int *resultShapeInfo, Nd4jPointer *tadPointers, Nd4jPointer *offsetPointers) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
T **input = (T **) data;
for (int i = tid; i < numArrays; i += blockDim.x * gridDim.x) {
result[i] = input[i][0];
}
}
extern "C" __global__ void concatKernelScalarFloat(int dimension,
int numArrays,
Nd4jPointer *data,
Nd4jPointer *inputShapeInfo,
float *result,
int *resultShapeInfo, Nd4jPointer *tadPointers, Nd4jPointer *offsetPointers) {
concatKernelScalarGeneric<float>(dimension, numArrays, data, inputShapeInfo, result, resultShapeInfo, tadPointers, offsetPointers);
}
extern "C" __global__ void concatKernelScalarHalf(int dimension,
int numArrays,
Nd4jPointer *data,
Nd4jPointer *inputShapeInfo,
float16 *result,
int *resultShapeInfo, Nd4jPointer *tadPointers, Nd4jPointer *offsetPointers) {
concatKernelScalarGeneric<float16>(dimension, numArrays, data, inputShapeInfo, result, resultShapeInfo, tadPointers, offsetPointers);
}
extern "C" __global__ void concatKernelScalarDouble(int dimension,
int numArrays,
Nd4jPointer *data,
Nd4jPointer *inputShapeInfo,
double *result,
int *resultShapeInfo, Nd4jPointer *tadPointers, Nd4jPointer *offsetPointers) {
concatKernelScalarGeneric<double>(dimension, numArrays, data, inputShapeInfo, result, resultShapeInfo, tadPointers, offsetPointers);
}
template <typename T>
__device__ void concatKernelHStackGeneric(int dimension,
int numArrays,
Nd4jPointer *data,
Nd4jPointer *inputShapeInfos,
T *result,
int *resultShapeInfo, Nd4jPointer *tadPointers, Nd4jPointer *offsetPointers) {
// we expect all data coming in as vectors, and result as 2D matrix
// the only significant difference here is the fact that input lengths might be different
int **inputShapes = (int**) inputShapeInfos;
T **input = (T **) data;
__shared__ int inputEWS;
__shared__ int resultEWS;
__shared__ int inputLength;
if (threadIdx.x == 0) {
resultEWS = shape::elementWiseStride(resultShapeInfo);
}
__syncthreads();
for (int r = blockIdx.x; r < numArrays; r+= gridDim.x) {
__shared__ int baseIdx;
if (threadIdx.x == 0) {
baseIdx = 0;
for (int f = 0; f < r; f++) {
baseIdx += shape::length(inputShapes[f]);
}
}
__syncthreads();
T *inputData = (T *) input[r];
if (threadIdx.x == 0) {
inputEWS = shape::elementWiseStride(inputShapes[r]);
inputLength = shape::length(inputShapes[r]);
}
__syncthreads();
for(int i = threadIdx.x; i < inputLength; i += blockDim.x) {
result[baseIdx + i * resultEWS] = inputData[i * inputEWS];
}
__syncthreads();
}
}
extern "C" __global__ void concatKernelHStackFloat(int dimension,
int numArrays,
Nd4jPointer *data,
Nd4jPointer *inputShapeInfo,
float *result,
int *resultShapeInfo, Nd4jPointer *tadPointers, Nd4jPointer *offsetPointers) {
concatKernelHStackGeneric<float>(dimension, numArrays, data, inputShapeInfo, result, resultShapeInfo, tadPointers, offsetPointers);
}
extern "C" __global__ void concatKernelHStackDouble(int dimension,
int numArrays,
Nd4jPointer *data,
Nd4jPointer *inputShapeInfo,
double *result,
int *resultShapeInfo, Nd4jPointer *tadPointers, Nd4jPointer *offsetPointers) {
concatKernelHStackGeneric<double>(dimension, numArrays, data, inputShapeInfo, result, resultShapeInfo, tadPointers, offsetPointers);
}
extern "C" __global__ void concatKernelHStackHalf(int dimension,
int numArrays,
Nd4jPointer *data,
Nd4jPointer *inputShapeInfo,
float16 *result,
int *resultShapeInfo, Nd4jPointer *tadPointers, Nd4jPointer *offsetPointers) {
concatKernelHStackGeneric<float16>(dimension, numArrays, data, inputShapeInfo, result, resultShapeInfo, tadPointers, offsetPointers);
}
template <typename T>
__device__ void concatKernelVStackGeneric(int dimension,
int numArrays,
Nd4jPointer *data,
Nd4jPointer *inputShapeInfos,
T *result,
int *resultShapeInfo, Nd4jPointer *tadPointers, Nd4jPointer *offsetPointers) {
/*
this is special case for concat: we group bunch of vectors into 2D matrix
also: we expect each inputShapeInfo to have EWS, be a vector, and have equal size
*/
int **inputShapes = (int**) inputShapeInfos;
T **input = (T **) data;
__shared__ int inputEWS;
__shared__ int resultEWS;
__shared__ int inputLength;
if (threadIdx.x == 0) {
inputLength = shape::length(inputShapes[0]);
inputEWS = shape::elementWiseStride(inputShapes[0]);
resultEWS = shape::elementWiseStride(resultShapeInfo);
}
__syncthreads();
for (int r = blockIdx.x; r < numArrays; r+= gridDim.x) {
int resultOffset = r * inputLength * resultEWS;
T *inputData = (T *) input[r];
for(int i = threadIdx.x; i < inputLength; i += blockDim.x) {
result[resultOffset + i * resultEWS] = inputData[i * inputEWS];
}
}
}
extern "C" __global__ void concatKernelVStackFloat(int dimension,
int numArrays,
Nd4jPointer *data,
Nd4jPointer *inputShapeInfo,
float *result,
int *resultShapeInfo, Nd4jPointer *tadPointers, Nd4jPointer *offsetPointers) {
concatKernelVStackGeneric<float>(dimension, numArrays, data, inputShapeInfo, result, resultShapeInfo, tadPointers, offsetPointers);
}
extern "C" __global__ void concatKernelVStackDouble(int dimension,
int numArrays,
Nd4jPointer *data,
Nd4jPointer *inputShapeInfo,
double *result,
int *resultShapeInfo, Nd4jPointer *tadPointers, Nd4jPointer *offsetPointers) {
concatKernelVStackGeneric<double>(dimension, numArrays, data, inputShapeInfo, result, resultShapeInfo, tadPointers, offsetPointers);
}
extern "C" __global__ void concatKernelVStackHalf(int dimension,
int numArrays,
Nd4jPointer *data,
Nd4jPointer *inputShapeInfo,
float16 *result,
int *resultShapeInfo, Nd4jPointer *tadPointers, Nd4jPointer *offsetPointers) {
concatKernelVStackGeneric<float16>(dimension, numArrays, data, inputShapeInfo, result, resultShapeInfo, tadPointers, offsetPointers);
}
extern "C" __global__ void concatKernelDouble(int dimension,
int numArrays,
Nd4jPointer *data,
Nd4jPointer *inputShapeInfo,
double *result,
int *resultShapeInfo, Nd4jPointer *tadPointers, Nd4jPointer *offsetPointers, int *zTadShape, Nd4jIndex *zOffsets) {
concatKernelGeneric<double>(dimension, numArrays, data, inputShapeInfo, result, resultShapeInfo, tadPointers, offsetPointers, zTadShape, zOffsets);
}
extern "C" __global__ void concatKernelFloat(int dimension,
int numArrays,
Nd4jPointer *data,
Nd4jPointer *inputShapeInfo,
float *result,
int *resultShapeInfo, Nd4jPointer *tadPointers, Nd4jPointer *offsetPointers, int *zTadShape, Nd4jIndex *zOffsets) {
concatKernelGeneric<float>(dimension, numArrays, data, inputShapeInfo, result, resultShapeInfo, tadPointers, offsetPointers, zTadShape, zOffsets);
}
extern "C" __global__ void concatKernelHalf(int dimension,
int numArrays,
Nd4jPointer *data,
Nd4jPointer *inputShapeInfo,
float16 *result,
int *resultShapeInfo, Nd4jPointer *tadPointers, Nd4jPointer *offsetPointers, int *zTadShape, Nd4jIndex *zOffsets) {
concatKernelGeneric<float16>(dimension, numArrays, data, inputShapeInfo, result, resultShapeInfo, tadPointers, offsetPointers, zTadShape, zOffsets);
}
template <typename T>
__device__ void pullRowsKernelGeneric(T *x,
int *xShapeInfo,
T *z,
int *zShapeInfo,
int n,
int *indexes,
int *tadShapeInfo,
Nd4jIndex *tadOffsets,
int *zTadShapeInfo,
Nd4jIndex *zTadOffsets) {
int xEWS = shape::elementWiseStride(tadShapeInfo);
int zEWS = shape::elementWiseStride(zTadShapeInfo);
int tadLength = shape::length(tadShapeInfo);
for (int idx = blockIdx.x; idx < n; idx += gridDim.x) {
int tadOffsetForBlock = tadOffsets[indexes[idx]];
T *rX = x + tadOffsetForBlock;
T *rZ = z + zTadOffsets[idx];
for (int i = threadIdx.x; i < tadLength; i += blockDim.x) {
rZ[i * zEWS] = rX[i * xEWS];
}
}
}
extern "C" __global__ void pullRowsKernelHalf(
float16 *x,
int *xShapeInfo,
float16 *z,
int *zShapeInfo,
int n,
int *indexes,
int *tadShapeInfo,
Nd4jIndex *tadOffsets,
int *zTadShapeInfo,
Nd4jIndex *zTadOffsets) {
pullRowsKernelGeneric<float16>(x, xShapeInfo, z, zShapeInfo, n, indexes, tadShapeInfo, tadOffsets, zTadShapeInfo, zTadOffsets);
}
extern "C" __global__ void pullRowsKernelFloat(float *x,
int *xShapeInfo,
float *z,
int *zShapeInfo,
int n,
int *indexes,
int *tadShapeInfo,
Nd4jIndex *tadOffsets,
int *zTadShapeInfo,
Nd4jIndex *zTadOffsets) {
pullRowsKernelGeneric<float>(x, xShapeInfo, z, zShapeInfo, n, indexes, tadShapeInfo, tadOffsets, zTadShapeInfo, zTadOffsets);
}
extern "C" __global__ void pullRowsKernelDouble(double *x,
int *xShapeInfo,
double *z,
int *zShapeInfo,
int n,
int *indexes,
int *tadShapeInfo,
Nd4jIndex *tadOffsets,
int *zTadShapeInfo,
Nd4jIndex *zTadOffsets) {
pullRowsKernelGeneric<double>(x, xShapeInfo, z, zShapeInfo, n, indexes, tadShapeInfo, tadOffsets, zTadShapeInfo, zTadOffsets);
}
template <typename T>
__device__ void convertToHalfGeneric(T *dx, int n, half *dz) {
int tid = threadIdx.x + blockIdx.x * gridDim.x;
for (Nd4jIndex i = tid; i < n; i += blockDim.x * gridDim.x ) {
dz[i] = __float2half((float) dx[i]);
}
}
extern "C" __global__ void kernelFloatsToHalfs(float *dx, int n, half *dz) {
convertToHalfGeneric<float>(dx, n, dz);
}
extern "C" __global__ void kernelDoublesToHalfs(double *dx, int n, half *dz) {
convertToHalfGeneric<double>(dx, n, dz);
}
template <typename T>
__device__ void convertHalfsToGeneric(half *dx, int n, T *dz) {
int tid = threadIdx.x + blockIdx.x * gridDim.x;
for (Nd4jIndex i = tid; i < n; i += blockDim.x * gridDim.x ) {
dz[i] = (T) __half2float(dx[i]);
}
}
extern "C" __global__ void kernelHalfsToDoubles(half *dx, int n, double *dz) {
convertHalfsToGeneric<double>(dx, n, dz);
}
extern "C" __global__ void kernelHalfsToFloats(half *dx, int n, float *dz) {
convertHalfsToGeneric<float>(dx, n, dz);
}
/**
* This kernel accumulates X arrays, and stores result into Z
*
* @tparam T
* @param x
* @param z
* @param n
* @param length
*/
template<typename T>
__device__ void accumulateKernelGeneric(T **x, T *z, int n, const Nd4jIndex length) {
__shared__ T *shmem;
if (threadIdx.x == 0) {
extern __shared__ unsigned char sharedmem[];
shmem = (T *) sharedmem;
}
__syncthreads();
for (int r = blockDim.x * blockIdx.x; r < length; r += blockDim.x * gridDim.x) {
shmem[threadIdx.x] = 0.0f;
Nd4jIndex baseIdx = r;
// aggregation step, we roll over all arrays
for (int ar = 0; ar < n; ar++) {
T *cdata = (T *) x[ar];
cdata += baseIdx;
if (baseIdx + threadIdx.x < length)
shmem[threadIdx.x] += cdata[threadIdx.x];
}
T *wdata = z + baseIdx;
// saving accumulated values
if (baseIdx + threadIdx.x < length) {
wdata[threadIdx.x] = shmem[threadIdx.x];
}
}
}
extern "C" __global__ void accumulateKernelHalf(float16 **dx, float16 *dz, int n, Nd4jIndex length) {
accumulateKernelGeneric<float16>(dx, dz, n, length);
}
extern "C" __global__ void accumulateKernelFloat(float **dx, float *dz, int n, Nd4jIndex length) {
accumulateKernelGeneric<float>(dx, dz, n, length);
}
extern "C" __global__ void accumulateKernelDouble(double **dx, double *dz, int n, Nd4jIndex length) {
accumulateKernelGeneric<double>(dx, dz, n, length);
}
template <typename T>
__device__ void averagingKernelGeneric(T **dx, T *dz, int n, Nd4jIndex length, bool propagate) {
__shared__ T *shmem;
if (threadIdx.x == 0) {
extern __shared__ unsigned char sharedmem[];
shmem = (T *) sharedmem;
}
__syncthreads();
// each block cycles over it's own part of arrays
for (int r = blockDim.x * blockIdx.x; r < length; r += blockDim.x * gridDim.x) {
shmem[threadIdx.x] = (T) 0.0f;
Nd4jIndex baseIdx = r;
// aggregation step, we roll over all arrays
for (int ar = 0; ar < n; ar++) {
T *cdata = (T *) dx[ar];
cdata += baseIdx;
if (baseIdx + threadIdx.x < length)
shmem[threadIdx.x] += cdata[threadIdx.x];
}
// average data in shared memory
if (baseIdx + threadIdx.x < length)
shmem[threadIdx.x] /= n;
// div step & write out step
if (dz != nullptr) {
T *wdata = dz + baseIdx;
if (baseIdx + threadIdx.x < length) {
wdata[threadIdx.x] = shmem[threadIdx.x];
}
}
// propagate averaged data to all arrays
if (propagate)
for (int ar = 0; ar < n; ar++) {
T *cdata = (T *) dx[ar];
cdata += baseIdx;
if (baseIdx + threadIdx.x < length)
cdata[threadIdx.x] = shmem[threadIdx.x];
}
}
}
extern "C" __global__ void averagingKernelHalf(float16 **dx, float16 *dz, int n, Nd4jIndex length, bool propagate) {
averagingKernelGeneric<float16>(dx, dz, n, length, propagate);
}
extern "C" __global__ void averagingKernelFloat(float **dx, float *dz, int n, Nd4jIndex length, bool propagate) {
averagingKernelGeneric<float>(dx, dz, n, length, propagate);
}
extern "C" __global__ void averagingKernelDouble(double **dx, double *dz, int n, Nd4jIndex length, bool propagate) {
averagingKernelGeneric<double>(dx, dz, n, length, propagate);
}
template<typename T>
__device__ void tearKernelGeneric(T *x, int *xShapeInfo, Nd4jPointer *targets, int *zShapeInfo, int *tadShapeInfo, Nd4jIndex *tadOffsets) {
__shared__ Nd4jIndex tadLength;
__shared__ int tadEWS;
__shared__ int zEWS;
__shared__ int tadRank;
__shared__ Nd4jIndex numTads;
__shared__ int zRank;
__shared__ int *tadShape;
__shared__ int *tadStride;
__shared__ int *zShape;
__shared__ int *zStride;
if (threadIdx.x == 0) {
tadLength = shape::length(tadShapeInfo);
tadEWS = shape::elementWiseStride(tadShapeInfo);
zEWS = shape::elementWiseStride(zShapeInfo);
tadRank = shape::rank(tadShapeInfo);
numTads = shape::length(xShapeInfo) / tadLength;
zRank = shape::rank(zShapeInfo);
tadShape = shape::shapeOf(tadShapeInfo);
tadStride = shape::stride(tadShapeInfo);
zShape = shape::shapeOf(zShapeInfo);
zStride = shape::stride(zShapeInfo);
}
__syncthreads();
for (Nd4jIndex r = blockIdx.x; r < numTads; r += gridDim.x) {
T *z = (T *) targets[r];
T *s = x + tadOffsets[r];
if (zEWS > 0 && tadEWS > 0) {
for (Nd4jIndex i = threadIdx.x; i < tadLength; i += blockDim.x) {
z[i * zEWS] = s[i * tadEWS];
}
} else {
int xCoord[MAX_RANK];
int zCoord[MAX_RANK];
for (Nd4jIndex j = 0; j < tadLength; j++) {
shape::ind2sub(tadRank,tadShape, j, xCoord);
shape::ind2sub(zRank, zShape, j, zCoord);
Nd4jIndex xOffset = shape::getOffset(0, tadShape, tadStride, xCoord, tadRank);
Nd4jIndex zOffset = shape::getOffset(0, zShape, zStride, zCoord, zRank);
z[zOffset] = s[xOffset];
}
}
}
}
extern "C" __global__ void tearKernelDouble(double *x, int *xShapeInfo, Nd4jPointer *targets, int *zShapeInfo, int *tadShapeInfo, Nd4jIndex *tadOffsets) {
tearKernelGeneric<double>(x, xShapeInfo, targets, zShapeInfo, tadShapeInfo, tadOffsets);
}
extern "C" __global__ void tearKernelFloat(float *x, int *xShapeInfo, Nd4jPointer *targets, int *zShapeInfo, int *tadShapeInfo, Nd4jIndex *tadOffsets) {
tearKernelGeneric<float>(x, xShapeInfo, targets, zShapeInfo, tadShapeInfo, tadOffsets);
}
extern "C" __global__ void tearKernelHalf(float16 *x, int *xShapeInfo, Nd4jPointer *targets, int *zShapeInfo, int *tadShapeInfo, Nd4jIndex *tadOffsets) {
tearKernelGeneric<float16>(x, xShapeInfo, targets, zShapeInfo, tadShapeInfo, tadOffsets);
}
template<typename T>
__device__ void shuffleKernelGeneric(T **dX, int **xShapeInfo, T **dZ, int **zShapeInfo, int N, int *shuffleMap, int **tadOnlyShapeInfo, Nd4jIndex **tadOffsets) {
// we assume that shuffle map for each X contains pair TAD Y
__shared__ int tadLength;
__shared__ int tadEWS;
__shared__ int tadRank;
__shared__ int numTads;
__shared__ int *tadShape;
__shared__ int *tadStride;
__shared__ int yStride;
for (int f = 0; f < N; f++) {
T *x = (T *) dX[f];
T *z = (T *) dZ[f];
__syncthreads();
if (threadIdx.x == 0) {
tadLength = shape::length(tadOnlyShapeInfo[f]);
tadEWS = shape::elementWiseStride(tadOnlyShapeInfo[f]);
tadRank = shape::rank(tadOnlyShapeInfo[f]);
numTads = shape::length(xShapeInfo[f]) / tadLength;
tadShape = shape::shapeOf(tadOnlyShapeInfo[f]);
tadStride = shape::stride(tadOnlyShapeInfo[f]);
}
__syncthreads();
// we roll over the pairs of TADs, thus limit is numTads / 2
for (Nd4jIndex r = blockIdx.x; r < numTads; r += blockDim.x) {
if (shuffleMap[r] < 0)
continue;
Nd4jIndex oldOffset = tadOffsets[f][r];
Nd4jIndex newOffset = tadOffsets[f][shuffleMap[r]];
T *rX = x + oldOffset;
T *rY = x + newOffset;
T *zX = z + oldOffset;
T *zY = z + newOffset;
// so we're going to change TAD[oldOffset] with TAD[newOffset]
if (tadEWS == 1) {
for (Nd4jIndex i = threadIdx.x; i < tadLength; i += blockDim.x) {
T oldX = rX[i];
rX[i] = rY[i];
zY[i] = oldX;
}
} else {
// well have to iterate using ind2sub
int xCoord[MAX_RANK];
int yCoord[MAX_RANK];
for (Nd4jIndex i = threadIdx.x; i < tadLength; i+= blockDim.x) {
shape::ind2subC(tadRank,tadShape, i, xCoord);
shape::ind2subC(tadRank,tadShape, i, yCoord);
Nd4jIndex xOffset = shape::getOffset(oldOffset, tadShape, tadStride, xCoord, tadRank);
Nd4jIndex yOffset = shape::getOffset(newOffset, tadShape, tadStride, yCoord, tadRank);
T oldX = x[xOffset];
z[xOffset] = x[yOffset];
z[yOffset] = oldX;
}
}
}
}
}
extern "C" __global__ void shuffleKernelDouble(double **x, int **xShapeInfo, double **z, int **zShapeInfo, int N, int *shuffleMap, int **tadOnlyShapeInfo, Nd4jIndex **tadOffsets) {
shuffleKernelGeneric<double>(x, xShapeInfo, z, zShapeInfo, N, shuffleMap, tadOnlyShapeInfo, tadOffsets);
}
extern "C" __global__ void shuffleKernelFloat(float **x, int **xShapeInfo, float **z, int **zShapeInfo, int N, int *shuffleMap, int **tadOnlyShapeInfo, Nd4jIndex **tadOffsets) {
shuffleKernelGeneric<float>(x, xShapeInfo, z, zShapeInfo, N, shuffleMap, tadOnlyShapeInfo, tadOffsets);
}
extern "C" __global__ void shuffleKernelHalf(float16 **x, int **xShapeInfo, float16 **z, int **zShapeInfo, int N, int *shuffleMap, int **tadOnlyShapeInfo, Nd4jIndex **tadOffsets) {
shuffleKernelGeneric<float16>(x, xShapeInfo, z, zShapeInfo, N, shuffleMap, tadOnlyShapeInfo, tadOffsets);
}
// transform strided
DISPATCH_KERNEL_SIMPLE(transformStrided_, transformSimpleGeneric, float, INPUT(Nd4jIndex n, float *x, int xStride, float *extraParams, float *z, int zStride, int *allocationPointer, float *reductionPointer), PARAMS(n, x, xStride, extraParams, z, zStride, allocationPointer, reductionPointer), OPS_A(TRANSFORM_OPS))
DISPATCH_KERNEL_SIMPLE(transformStrided_, transformSimpleGeneric, double, INPUT(Nd4jIndex n, double *x, int xStride, double *extraParams, double *z, int zStride, int *allocationPointer, double *reductionPointer), PARAMS(n, x, xStride, extraParams, z, zStride, allocationPointer, reductionPointer), OPS_A(TRANSFORM_OPS))
DISPATCH_KERNEL_SIMPLE(transformStrided_, transformSimpleGeneric, float16, INPUT(Nd4jIndex n, float16 *x, int xStride, float16 *extraParams, float16 *z, int zStride, int *allocationPointer, float16 *reductionPointer), PARAMS(n, x, xStride, extraParams, z, zStride, allocationPointer, reductionPointer), OPS_A(TRANSFORM_OPS))
// transform shaped
DISPATCH_KERNEL_SIMPLE(transformShaped_, transformSimpleGeneric, float, INPUT(float *x, int *xShape, int xRank, float *extraParams, float *z, int *zShape, int zRank, int *allocationPointer, float *reductionPointer, int *tadShapeInfo, Nd4jIndex *tadOffsets), PARAMS(x, xShape, xRank, extraParams, z, zShape, zRank, allocationPointer, reductionPointer, tadShapeInfo, tadOffsets), OPS_A(TRANSFORM_OPS))
DISPATCH_KERNEL_SIMPLE(transformShaped_, transformSimpleGeneric, double, INPUT(double *x, int *xShape, int xRank, double *extraParams, double *z, int *zShape, int zRank, int *allocationPointer, double *reductionPointer, int *tadShapeInfo, Nd4jIndex *tadOffsets), PARAMS(x, xShape, xRank, extraParams, z, zShape, zRank, allocationPointer, reductionPointer, tadShapeInfo, tadOffsets), OPS_A(TRANSFORM_OPS))
DISPATCH_KERNEL_SIMPLE(transformShaped_, transformSimpleGeneric, float16, INPUT(float16 *x, int *xShape, int xRank, float16 *extraParams, float16 *z, int *zShape, int zRank, int *allocationPointer, float16 *reductionPointer, int *tadShapeInfo, Nd4jIndex *tadOffsets), PARAMS(x, xShape, xRank, extraParams, z, zShape, zRank, allocationPointer, reductionPointer, tadShapeInfo, tadOffsets), OPS_A(TRANSFORM_OPS))
#endif
#endif /* TRANSFORM_H_ */
|
progressive_kd_tree_index.h | #ifndef panene_progressive_kd_tree_index_h
#define panene_progressive_kd_tree_index_h
#include <vector>
#include <algorithm>
#include <random>
#include <cstring>
#include <cstdio>
#include <iostream>
#include <queue>
#include <cassert>
#include <map>
#include <kd_tree_index.h>
#ifdef BENCHMARK
#include <util/timer.h>
#define BENCH(x) x
#else
#define BENCH(x) ((void)0)
#endif
namespace panene
{
enum UpdateStatus {
NoUpdate,
BuildingTree,
InsertingPoints
};
struct TreeWeight {
float addPointWeight;
float updateIndexWeight;
TreeWeight(float addPointWeight_, float updateIndexWeight_) : addPointWeight(addPointWeight_), updateIndexWeight(updateIndexWeight_) {
}
};
struct UpdateResult2 {
size_t numPointsInserted;
size_t addPointOps;
size_t updateIndexOps;
size_t addPointResult;
size_t updateIndexResult;
double addPointElapsed;
double updateIndexElapsed;
UpdateResult2() = default;
UpdateResult2(
size_t addPointOps_, size_t updateIndexOps_,
size_t addPointResult_, size_t updateIndexResult_,
size_t numPointsInserted_,
double addPointElapsed_, double updateIndexElapsed_) :
numPointsInserted(numPointsInserted_),
addPointOps(addPointOps_),
updateIndexOps(updateIndexOps_),
addPointResult(addPointResult_),
updateIndexResult(updateIndexResult_),
addPointElapsed(addPointElapsed_),
updateIndexElapsed(updateIndexElapsed_)
{
}
friend std::ostream& operator<<(std::ostream& os, const UpdateResult2& obj) {
os << "UpdateResult2(addPointOps: " << obj.addPointResult << " / " << obj.addPointOps << ", "
<< "updateIndexOps: " << obj.updateIndexResult << " / " << obj.updateIndexOps << ", numPointsInserted: " << obj.numPointsInserted << ")";
return os;
}
};
template <typename DataSource>
class ProgressiveKDTreeIndex : public KDTreeIndex<DataSource>
{
USE_KDTREE_INDEX_SYMBOLS
typedef DataSource DataSourceT;
public:
ProgressiveKDTreeIndex(DataSource *dataSource_, IndexParams indexParams_, TreeWeight weight_ = TreeWeight(0.3, 0.7), const float reconstructionWeight_ = .25f) : KDTreeIndex<DataSource>(dataSource_, indexParams_, Distance()), weight(weight_), reconstructionWeight(reconstructionWeight_) {
}
size_t addPoints(size_t ops) {
size_t oldSize = size;
size += ops;
if (size > dataSource->size())
size = dataSource->size();
if (oldSize == 0) { // for the first time, build the index as we did in the non-progressive version.
buildIndex();
return ops;
}
else {
for (size_t i = oldSize; i < size; ++i) {
for (size_t j = 0; j < numTrees; ++j) {
trees[j]->size++;
addPointToTree(trees[j], trees[j]->root, i, 0);
}
}
if (updateStatus == UpdateStatus::InsertingPoints) {
for (size_t i = oldSize; i < size && sizeAtUpdate < size; ++i) {
ongoingTree->size++;
addPointToTree(ongoingTree, ongoingTree->root, sizeAtUpdate++, 0);
}
}
return size - oldSize;
}
}
void beginUpdate() {
updateStatus = UpdateStatus::BuildingTree;
sizeAtUpdate = size;
ids.resize(sizeAtUpdate);
for (size_t i = 0; i < sizeAtUpdate; ++i) ids[i] = int(i);
std::random_shuffle(ids.begin(), ids.end());
ongoingTree = new KDTree<NodePtr>(dataSource->capacity());
ongoingTree->root = new(pool) Node(ongoingTree);
std::queue<NodeSplit> empty;
queue = empty;
queue.push(NodeSplit(ongoingTree->root, &ids[0], sizeAtUpdate, 1));
ongoingTree->size = sizeAtUpdate;
}
size_t update(int ops) {
int updatedCount = 0;
while ((ops == -1 || updatedCount < ops) && !queue.empty()) {
NodeSplit nodeSplit = queue.front();
queue.pop();
#if DEBUG
std::cerr << "updatedCount " << updatedCount << std::endl;
#endif
NodePtr node = nodeSplit.node;
IDType *begin = nodeSplit.begin;
int count = nodeSplit.count;
int depth = nodeSplit.depth;
#if DEBUG
std::cerr << begin << " " << count << std::endl;
#endif
// At this point, nodeSplit the two children of nodeSplit are nullptr
if (count == 1) {
node->child1 = node->child2 = NULL; /* Mark as leaf node. */
node->id = *begin; /* Store index of this vec. */ // TODO id of vec
ongoingTree->setInsertionLog(node->id, 1, depth);
}
else {
int idx;
int cutfeat;
DistanceType cutval;
meanSplit(begin, count, idx, cutfeat, cutval);
#if DEBUG
std::cerr << "cut index: " << idx << " cut count: " << count << std::endl;
#endif
node->divfeat = cutfeat;
node->divval = cutval;
node->child1 = new(pool) Node(ongoingTree);
node->child2 = new(pool) Node(ongoingTree);
queue.push(NodeSplit(node->child1, begin, idx, depth + 1));
queue.push(NodeSplit(node->child2, begin + idx, count - idx, depth + 1));
}
updatedCount += 1; // count; // std::min(1, count / 2);
}
if (updateStatus == UpdateStatus::BuildingTree && queue.empty()) {
updateStatus = UpdateStatus::InsertingPoints;
}
if (updateStatus == UpdateStatus::InsertingPoints) {
if (ongoingTree->size < size) {
// insert points from sizeAtUpdate to size
while (ongoingTree->size < size && (ops == -1 || updatedCount < ops)) {
ongoingTree->size++;
addPointToTree(ongoingTree, ongoingTree->root, sizeAtUpdate, 0);
sizeAtUpdate++;
updatedCount++;
}
}
if (ongoingTree->size >= size) {
// finished creating a new tree
ongoingTree->cost = ongoingTree->computeCost();
size_t victimId = 0;
float maxImbalance = trees[0]->computeImbalance();
// find the most unbalanced one
for (size_t i = 1; i < numTrees; ++i) {
float imbalance = trees[i]->computeImbalance();
if (maxImbalance < imbalance) {
maxImbalance = imbalance;
victimId = i;
}
}
// get the victim
auto victim = trees[victimId];
// replace the victim with the newly created tree
delete victim;
trees[victimId] = ongoingTree;
// reset the sizeAtUpdate
sizeAtUpdate = 0;
updateStatus = UpdateStatus::NoUpdate;
}
}
return updatedCount;
}
UpdateResult2 run(size_t ops) {
size_t addPointOps = 0, updateIndexOps = 0;
size_t addPointResult = 0, updateIndexResult = 0;
double addPointElapsed = 0, updateIndexElapsed = 0;
if (updateStatus != UpdateStatus::NoUpdate) {
addPointOps = (size_t)(ops * weight.addPointWeight);
updateIndexOps = (size_t)(ops * weight.updateIndexWeight);
}
else {
addPointOps = ops;
}
BENCH(Timer timer);
BENCH(timer.begin());
if (addPointOps > 0) {
addPointResult = addPoints(addPointOps);
}
if (addPointResult == 0) {
// if we added all points, put all operations to update index
weight.updateIndexWeight += weight.addPointWeight;
weight.addPointWeight = 0;
updateIndexOps = ops;
addPointOps = 0;
}
size_t numPointsInserted = size;
BENCH(addPointElapsed = timer.end());
if (updateStatus != NoUpdate) {
BENCH(timer.begin());
updateIndexResult = update(updateIndexOps);
BENCH(updateIndexElapsed = timer.end());
}
return UpdateResult2(
addPointOps, updateIndexOps,
addPointResult, updateIndexResult,
numPointsInserted,
addPointElapsed, updateIndexElapsed);
}
void checkBeginUpdate() {
if (updateStatus == UpdateStatus::NoUpdate) {
float updateCost = (float)std::log2(size) * size;
if (queryLoss > updateCost * reconstructionWeight) {
beginUpdate();
queryLoss = 0;
}
}
}
void knnSearch(
const IDType &qid,
ResultSet<IDType, DistanceType> &resultSet,
size_t knn,
const SearchParams& params)
{
std::vector<ElementType> vector(dim);
dataSource->get(qid, vector);
float costSum = findNeighbors(vector, resultSet, params);
size_t ideal = std::log2(size);
queryLoss += costSum - numTrees * ideal;
checkBeginUpdate();
}
// this needs to be improved
void knnSearch(
const std::vector<IDType> qids,
std::vector<ResultSet<IDType, DistanceType>> &resultSets,
size_t knn,
const SearchParams& params)
{
std::vector<std::vector<ElementType>> vectors(qids.size());
for (size_t i = 0; i < qids.size(); ++i) {
vectors[i].resize(dim);
dataSource->get(qids[i], vectors[i]);
}
knnSearch(vectors, resultSets, knn, params);
}
void knnSearch(
const std::vector<std::vector<ElementType>> &vectors,
std::vector<ResultSet<IDType, DistanceType>> &resultSets,
size_t knn,
const SearchParams& params)
{
resultSets.resize(vectors.size());
float costSum = 0;
#pragma omp parallel num_threads(params.cores)
{
#pragma omp for schedule(static) reduction(+:costSum)
for (int i = 0; i < (int)vectors.size(); i++) {
resultSets[i] = ResultSet<IDType, DistanceType>(knn);
costSum += findNeighbors(vectors[i], resultSets[i], params);
}
}
queryLoss += costSum;
checkBeginUpdate();
}
// alias for knnSearch(points) since Cython does not seem to support method overloading
void knnSearchVec(
const std::vector<std::vector<ElementType>> &vectors,
std::vector<ResultSet<IDType, DistanceType>> &resultSets,
size_t knn,
const SearchParams& params)
{
knnSearch(vectors, resultSets, knn, params);
}
protected:
void buildIndex() {
std::vector<IDType> ids(size);
for (size_t i = 0; i < size; ++i) {
ids[i] = IDType(i);
}
for (size_t i = 0; i < numTrees; ++i) {
std::random_shuffle(ids.begin(), ids.end());
trees[i]->root = divideTree(trees[i], &ids[0], size, 1);
trees[i]->size = size;
trees[i]->cost = trees[i]->computeCost();
}
}
void addPointToTree(KDTree<NodePtr>* tree, NodePtr node, IDType id, int depth) {
if ((node->child1 == NULL) && (node->child2 == NULL)) {
// if leaf
size_t nodeId = node->id;
size_t divfeat = dataSource->findDimWithMaxSpan(id, nodeId);
NodePtr left = new(pool) Node(tree);
left->child1 = left->child2 = NULL;
NodePtr right = new(pool) Node(tree);
right->child1 = right->child2 = NULL;
ElementType pointValue = dataSource->get(id, divfeat);
ElementType leafValue = dataSource->get(node->id, divfeat);
if (pointValue < leafValue) {
left->id = id;
right->id = node->id;
}
else {
left->id = node->id;
right->id = id;
}
left->divfeat = right->divfeat = -1;
node->divfeat = divfeat;
node->divval = (pointValue + leafValue) / 2;
node->child1 = left;
node->child2 = right;
// incrementally update imbalance
tree->setInsertionLog(id, 0, depth + 2);
tree->incrementFreqByOne(id);
tree->incrementFreqAndDepthByOne(nodeId);
}
else {
if (dataSource->get(id, node->divfeat) < node->divval) {
addPointToTree(tree, node->child1, id, depth + 1);
}
else {
addPointToTree(tree, node->child2, id, depth + 1);
}
}
}
void freeIndex() {
for (size_t i = 0; i < numTrees; ++i) {
if (trees[i] != nullptr) trees[i]->~KDTree();
}
pool.free();
}
public:
float getMaxCachedCost() {
float cost = 0;
for (size_t i = 0; i < numTrees; ++i) {
if (cost < trees[i]->getCachedCost()) {
cost = trees[i]->getCachedCost();
}
}
return cost;
}
std::vector<float> getCachedImbalances() {
std::vector<float> imbalances;
for (size_t i = 0; i < numTrees; ++i) {
imbalances.push_back(trees[i]->getCachedImbalance());
}
return imbalances;
}
std::vector<float> recomputeImbalances() {
std::vector<float> imbalances;
for (size_t i = 0; i < numTrees; ++i) {
imbalances.push_back(trees[i]->computeImbalance());
}
return imbalances;
}
size_t computeMaxDepth() {
size_t maxDepth = 0;
for (size_t j = 0; j < numTrees; ++j) {
size_t depth = trees[j]->computeMaxDepth();
if (maxDepth < depth)
maxDepth = depth;
}
return maxDepth;
}
void printBackstage() {
std::cout << "queue size: " << queue.size() << std::endl;
std::cout << "ongoingTree size: " << ongoingTree->size << std::endl;
}
public:
UpdateStatus updateStatus = UpdateStatus::NoUpdate;
KDTree<NodePtr>* ongoingTree;
float queryLoss = 0.0;
TreeWeight weight;
private:
float reconstructionWeight; // lower => more update
size_t sizeAtUpdate = 0;
std::queue<NodeSplit> queue;
std::vector<size_t> ids;
};
}
#endif
|
core_dzamax.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @precisions normal z -> c d s
*
**/
#include <plasma_core_blas.h>
#include "plasma_types.h"
#include "core_lapack.h"
#include <math.h>
/******************************************************************************/
void plasma_core_omp_dzamax(int colrow, int m, int n,
const plasma_complex64_t *A, int lda,
double *values,
plasma_sequence_t *sequence, plasma_request_t *request)
{
switch (colrow) {
case PlasmaColumnwise:
#pragma omp task depend(in:A[0:lda*n]) \
depend(out:values[0:n])
{
if (sequence->status == PlasmaSuccess) {
for (int j = 0; j < n; j++) {
values[j] = plasma_core_dcabs1(A[lda*j]);
for (int i = 1; i < m; i++) {
double tmp = plasma_core_dcabs1(A[lda*j+i]);
if (tmp > values[j])
values[j] = tmp;
}
}
}
}
break;
case PlasmaRowwise:
#pragma omp task depend(in:A[0:lda*n]) \
depend(out:values[0:m])
{
if (sequence->status == PlasmaSuccess) {
for (int i = 0; i < m; i++)
values[i] = plasma_core_dcabs1(A[i]);
for (int j = 1; j < n; j++) {
for (int i = 0; i < m; i++) {
double tmp = plasma_core_dcabs1(A[lda*j+i]);
if (tmp > values[i])
values[i] = tmp;
}
}
}
}
break;
}
}
|
arrayStack.c | // -----------------------------------------------------------------------------
//
// "00_AccelGraph"
//
// -----------------------------------------------------------------------------
// Copyright (c) 2014-2019 All rights reserved
// -----------------------------------------------------------------------------
// Author : Abdullah Mughrabi
// Email : atmughra@ncsu.edu||atmughrabi@gmail.com
// File : arrayStack.c
// Create : 2019-06-21 17:15:17
// Revise : 2019-09-28 15:36:13
// Editor : Abdullah Mughrabi
// -----------------------------------------------------------------------------
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <string.h>
#include <stdint.h>
#include <omp.h>
#include "myMalloc.h"
#include "arrayStack.h"
#include "bitmap.h"
struct ArrayStack *newArrayStack(uint32_t size)
{
struct ArrayStack *arrayStack = (struct ArrayStack *) my_malloc( sizeof(struct ArrayStack));
arrayStack->head = 0;
arrayStack->tail = 0;
arrayStack->tail_next = 0;
arrayStack->size = size;
arrayStack->Stack = (uint32_t *) my_malloc(size * sizeof(uint32_t));
arrayStack->q_bitmap = newBitmap(size);
arrayStack->q_bitmap_next = newBitmap(size);
return arrayStack;
}
void resetArrayStack(struct ArrayStack *q)
{
q->head = 0;
q->tail = 0;
q->tail_next = 0;
clearBitmap(q->q_bitmap);
}
void freeArrayStack(struct ArrayStack *q)
{
if(q)
{
if(q->q_bitmap_next)
freeBitmap(q->q_bitmap_next);
if(q->q_bitmap)
freeBitmap(q->q_bitmap);
if(q->Stack)
free(q->Stack);
free(q);
}
}
void pushArrayStack (struct ArrayStack *q, uint32_t k)
{
q->Stack[q->tail] = k;
q->tail = (q->tail + 1) % q->size;
q->tail_next = q->tail;
}
void pushArrayStackWithBitmap (struct ArrayStack *q, uint32_t k)
{
q->Stack[q->tail] = k;
setBit(q->q_bitmap, k);
q->tail = q->tail_next;
q->tail++;
q->tail_next++;
}
void pushArrayStackAtomic (struct ArrayStack *q, uint32_t k)
{
uint32_t local_q_tail = __sync_fetch_and_add(&q->tail, 1);
q->Stack[local_q_tail] = k;
}
void pushArrayStackWithBitmapAtomic (struct ArrayStack *q, uint32_t k)
{
uint32_t local_q_tail = __sync_fetch_and_add(&q->tail, 1);
q->Stack[local_q_tail] = k;
setBitAtomic(q->q_bitmap, k);
}
void pushArrayStackDelayed (struct ArrayStack *q, uint32_t k)
{
q->Stack[q->tail_next] = k;
q->tail_next++;
}
void pushArrayStackDelayedWithBitmap (struct ArrayStack *q, uint32_t k)
{
q->Stack[q->tail_next] = k;
setBit(q->q_bitmap_next, k);
q->tail_next++;
}
void pushArrayStackDelayedWithBitmapAtomic (struct ArrayStack *q, uint32_t k)
{
uint32_t local_q_tail_next = __sync_fetch_and_add(&q->tail_next, 1);
setBitAtomic(q->q_bitmap, k);
q->Stack[local_q_tail_next] = k;
}
void slideWindowArrayStack (struct ArrayStack *q)
{
q->head = q->tail;
q->tail = q->tail_next;
}
void slideWindowArrayStackBitmap (struct ArrayStack *q)
{
q->head = q->tail;
q->tail = q->tail_next;
swapBitmaps(&q->q_bitmap, &q->q_bitmap_next);
clearBitmap(q->q_bitmap_next);
}
uint32_t popArrayStack(struct ArrayStack *q)
{
uint32_t k = q->Stack[q->tail - 1];
clearBit(q->q_bitmap, k);
q->tail = q->tail - 1;
return k;
}
uint32_t frontArrayStack (struct ArrayStack *q)
{
uint32_t k = q->Stack[q->head];
return k;
}
uint8_t isEmptyArrayStackCurr (struct ArrayStack *q)
{
if((q->tail > q->head))
return 0;
else
return 1;
}
uint8_t isEmptyArrayStack (struct ArrayStack *q)
{
if(!isEmptyArrayStackCurr(q) || !isEmptyArrayStackNext(q))
return 0;
else
return 1;
}
uint8_t isEmptyArrayStackNext (struct ArrayStack *q)
{
if((q->tail_next > q->head))
return 0;
else
return 1;
}
uint8_t ispushArrayStack (struct ArrayStack *q, uint32_t k)
{
return getBit(q->q_bitmap, k);
}
uint8_t ispushArrayStackNext (struct ArrayStack *q, uint32_t k)
{
return getBit(q->q_bitmap_next, k);
}
uint32_t sizeArrayStackCurr(struct ArrayStack *q)
{
return q->tail - q->head;
}
uint32_t sizeArrayStackNext(struct ArrayStack *q)
{
return q->tail_next - q->tail;
}
uint32_t sizeArrayStack(struct ArrayStack *q)
{
return q->tail_next - q->head;
}
void flushArrayStackToShared(struct ArrayStack *local_q, struct ArrayStack *shared_q)
{
uint32_t shared_q_tail_next = __sync_fetch_and_add(&shared_q->tail_next, local_q->tail);
uint32_t local_q_size = local_q->tail - local_q->head;
memcpy(&shared_q->Stack[shared_q_tail_next], &local_q->Stack[local_q->head], local_q_size * (sizeof(uint32_t)));
local_q->head = 0;
local_q->tail = 0;
local_q->tail_next = 0;
}
void arrayStackGenerateBitmap(struct ArrayStack *q)
{
uint32_t v;
uint32_t i;
#pragma omp parallel for
for(i = q->head ; i < q->tail; i++)
{
v = q->Stack[i];
setBitAtomic(q->q_bitmap, v);
}
}
void arrayStackToBitmap(struct ArrayStack *q, struct Bitmap *b)
{
uint32_t v;
uint32_t i;
#pragma omp parallel for default(none) shared(q,b) private(v,i)
for(i = q->head ; i < q->tail; i++)
{
v = q->Stack[i];
setBitAtomic(b, v);
}
// b->numSetBits = q->q_bitmap->numSetBits;
q->head = q->tail;
q->tail_next = q->tail;
}
void bitmapToArrayStack(struct Bitmap *b, struct ArrayStack *q, struct ArrayStack **localFrontierStacks)
{
#pragma omp parallel default(none) shared(b,localFrontierStacks,q)
{
uint32_t i;
uint32_t t_id = omp_get_thread_num();
struct ArrayStack *localFrontierStack = localFrontierStacks[t_id];
#pragma omp for
for(i = 0 ; i < (b->size); i++)
{
if(getBit(b, i))
{
localFrontierStack->Stack[localFrontierStack->tail] = i;
localFrontierStack->tail++;
}
}
flushArrayStackToShared(localFrontierStack, q);
}
slideWindowArrayStack(q);
}
|
compute_beam_model.c | /*
This file is part of the MCsquare software
Copyright © 2016-2017 Université catholique de Louvain (UCL)
All rights reserved.
The MCsquare software has been developed by Kevin Souris from UCL in the context of a collaboration with IBA s.a.
Each use of this software must be attributed to Université catholique de Louvain (UCL, Louvain-la-Neuve). Any other additional authorizations may be asked to LTTO@uclouvain.be.
The MCsquare software is released under the terms of the open-source Apache 2.0 license. Anyone can use or modify the code provided that the Apache 2.0 license conditions are met. See the Apache 2.0 license for more details https://www.apache.org/licenses/LICENSE-2.0
The MCsquare software is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/
#include "include/compute_beam_model.h"
double ConvertMuToProtons(double weight, double energy)
{
/* http://thread.gmane.org/gmane.comp.science.opengate.user/1564/focus=1572
A MU is defined as a number of nC collected in the Ionization Chamber (IC), which is filled with air.
SP corresponds to proton stopping power in air and it is based on a fit from ICRU data.
K is a constant which depends on the mean energy loss (W) to create an electron/hole pair.
PTP are the temperature and pression corrections.
Using all these parameters correctly allows for MU to absolute number of protons conversion.
*/
// Constant which depends on the mean energy loss (W) to create an electron/hole pair
double K = 35.87; // in eV (other value 34.23 ?)
// Air stopping power (fit ICRU) multiplied by air density
double SP = (9.6139e-9*pow(energy,4) - 7.0508e-6*pow(energy,3) + 2.0028e-3*pow(energy,2) - 2.7615e-1*energy + 2.0082e1) * 1.20479E-3 * 1E6; // in eV / cm
// Temp & Pressure correction
double PTP = 1.0;
// MU calibration (1 MU = 3 nC/cm)
// 1cm de gap effectif
double C = 3.0E-9; // in C / cm
// Gain: 1eV = 1.602176E-19 J
double Gain = (C*K) / (SP*PTP*1.602176E-19);
return weight*Gain;
/*
// Loic's formula (not correct ?)
double K=37.60933;
double SP=9.6139E-09*pow(energy,4)-7.0508E-06*pow(energy,3)+2.0028E-03*pow(energy,2)-2.7615E-01*pow(energy,1)+2.0082E+01*pow(energy,0);
double PTP=1;
double Gain=3./(K*SP*PTP*1.602176E-10);
return (weight*Gain);
*/
}
void deviates (double U[2][2], double sigmas[2], double R[2], VSLStreamStatePtr RNG_Stream)
{
// Returns vector of gaussian randoms based on sigmas, rotated by U,
// with means of 0.
// NE FONCTIONNE PAS (car les nombres gaussiens générés doivent probablement être compris entre -1 et 1)?
// (ou bien la distribution gaussienne 2D =! de 2 distribution gaussiennes 1D) ?
/*
VAR_COMPUTE rnd[2];
#if VAR_COMPUTE_PRECISION==1 // Methods :
vsRngGaussian( VSL_RNG_METHOD_GAUSSIAN_BOXMULLER, RNG_Stream, 2, rnd, 0.0, 1.0 ); // VSL_RNG_METHOD_GAUSSIAN_BOXMULLER
#else // VSL_RNG_METHOD_GAUSSIAN_BOXMULLER2
vdRngGaussian( VSL_RNG_METHOD_GAUSSIAN_BOXMULLER, RNG_Stream, 2, rnd, 0.0, 1.0); // VSL_RNG_METHOD_GAUSSIAN_ICDF
#endif
R[0] = U[0][0]*rnd[0] + U[0][1]*rnd[1];
R[1] = U[1][0]*rnd[0] + U[1][1]*rnd[1];
*/
int n = 2;
double V[2]; // The vector to be returned is R now
double r,v1,v2,fac;
int i = 1;
while ( i <= n ) {
do {
v1 = 2.0 * single_rand_uniform(RNG_Stream) - 1.0;
v2 = 2.0 * single_rand_uniform(RNG_Stream) - 1.0;
r = v1*v1 + v2*v2;
} while ( r > 1.0 );
fac = sqrt(-2.0*log(r)/r);
V[i-1] = v1*fac;
i++;
if ( i <= n ) {
V[i-1] = v2*fac;
i++;
}
}
for ( i = 0; i < n; i++ ) {
V[i] *= sigmas[i];
}
R[0]=U[0][0]*V[0]+U[0][1]*V[1];
R[1]=U[1][0]*V[0]+U[1][1]*V[1];
}
void diagonalize (double A[2][2],double B[2], double T[2][2])
{
double b=-A[0][0]-A[1][1];
double c=A[1][1]*A[0][0]-A[0][1]*A[1][0];
B[0]=0.5*(-b+sqrt(b*b-4*c));
B[1]=0.5*(-b-sqrt(b*b-4*c));
double r_1=sqrt(1+(A[1][0]/(A[1][1]-B[0]))*(A[1][0]/(A[1][1]-B[0])));
double r_2=sqrt(1+(A[1][0]/(A[1][1]-B[1]))*(A[1][0]/(A[1][1]-B[1])));
T[0][0]=1/r_1;
T[0][1]=1/r_2;
T[1][0]=1/r_1*(-A[1][0]/(A[1][1]-B[0]));
T[1][1]=1/r_2*(-A[1][0]/(A[1][1]-B[1]));
// printf("%f %f %f %f %f %f\n",r_1, r_2,T[1][1],T[1][0],B[0],B[1]);
}
double compute_mEnergy (machine_parameters *mac, double energy)
{
// Ce qui compte c'est le range des protons.
// Or, l'énergie configurée sur la machine pour atteindre un certain range n'est pas forcément la même que l'énergie Monte Carlo.
// Il faut donc convertir l'énergie du plan à l'aide d'un fit. Les paramètres du fit doivent être recalculé pour les différentes machines et différents modèles MC.
int i=0;
double val=0;
for (i=0;i<mac->mEnergy_order;i++)
{
val+=mac->mEnergy_poly[i]*pow(energy,mac->mEnergy_order-i);
}
val+=mac->mEnergy_poly[mac->mEnergy_order];
return val;
}
double compute_sEnergy (machine_parameters *mac, double energy)
{
int i=0;
double val=0;
for (i=0;i<mac->sEnergy_order;i++)
{
val+=mac->sEnergy_poly[i]*pow(energy,mac->sEnergy_order-i);
}
val+=mac->sEnergy_poly[mac->sEnergy_order];
return val;
}
double compute_mX (machine_parameters *mac, double energy)
{
int i=0;
double val=0;
for (i=0;i<mac->mX_order;i++)
{
val+=mac->mX_poly[i]*pow(energy,mac->mX_order-i);
}
val+=mac->mX_poly[mac->mX_order];
return val;
}
double compute_mY (machine_parameters *mac, double energy)
{
int i=0;
double val=0;
for (i=0;i<mac->mY_order;i++)
{
val+=mac->mY_poly[i]*pow(energy,mac->mY_order-i);
}
val+=mac->mY_poly[mac->mY_order];
return val;
}
double compute_mTheta (machine_parameters *mac, double energy)
{
int i=0;
double val=0;
for (i=0;i<mac->mTheta_order;i++)
{
val+=mac->mTheta_poly[i]*pow(energy,mac->mTheta_order-i);
}
val+=mac->mTheta_poly[mac->mTheta_order];
return val;
}
double compute_mPhi (machine_parameters *mac, double energy)
{
int i=0;
double val=0;
for (i=0;i<mac->mPhi_order;i++)
{
val+=mac->mPhi_poly[i]*pow(energy,mac->mPhi_order-i);
}
val+=mac->mPhi_poly[mac->mPhi_order];
return val;
}
double compute_eXTheta (machine_parameters *mac, double energy)
{
/*
int i=0;
double val=0;
for (i=0;i<mac->eXTheta_order;i++)
{
val+=mac->eXTheta_poly[i]*pow(energy,mac->eXTheta_order-i);
}
val+=mac->eXTheta_poly[mac->eXTheta_order];
return val;
*/
return 0.5*M_PI * compute_mX(mac, energy) * compute_mTheta(mac, energy);
}
double compute_eYPhi (machine_parameters *mac, double energy)
{
/*
int i=0;
double val=0;
for (i=0;i<mac->eYPhi_order;i++)
{
val+=mac->eYPhi_poly[i]*pow(energy,mac->eYPhi_order-i);
}
val+=mac->eYPhi_poly[mac->eYPhi_order];
return val;
*/
return 0.5*M_PI * compute_mY(mac, energy) * compute_mPhi(mac, energy);
}
void rotateX (double angle, double vector[3])
{
double cx=cos(angle);
double sx=sin(angle);
double tempX=vector [0];
double tempY=vector [1];
double tempZ=vector [2];
vector[0]= tempX;
vector[1]=tempY*cx - tempZ*sx;
vector[2]=tempY*sx + tempZ*cx;
/*
double norm=sqrt(vector[0]*vector[0] + vector[1]*vector[1] + vector[2]*vector[2]);
vector[0]/=norm;
vector[1]/=norm;
vector[2]/=norm;
*/
}
void rotateY (double angle, double vector[3])
{
double cx=cos(angle);
double sx=sin(angle);
double tempX=vector [0];
double tempY=vector [1];
double tempZ=vector [2];
vector[0]= tempX*cx + tempZ*sx;
vector[1]= tempY;
vector[2]=-tempX*sx + tempZ*cx;
/*
double norm=sqrt(vector[0]*vector[0] + vector[1]*vector[1] + vector[2]*vector[2]);
vector[0]/=norm;
vector[1]/=norm;
vector[2]/=norm;
*/
}
void rotateZ (double angle, double vector[3])
{
double cx=cos(angle);
double sx=sin(angle);
double tempX=vector [0];
double tempY=vector [1];
double tempZ=vector [2];
vector[0]= tempX*cx - tempY*sx;
vector[1]= tempX*sx + tempY*cx;
vector[2]= tempZ;
/*
double norm=sqrt(vector[0]*vector[0] + vector[1]*vector[1] + vector[2]*vector[2]);
vector[0]/=norm;
vector[1]/=norm;
vector[2]/=norm;
*/
}
void Sample_particle (Hadron_buffer *hadron, VAR_DATA CT_Length[3], machine_parameters *mac, ControlPoint_parameters *ControlPoint, spot_parameters *spot, VSLStreamStatePtr RNG_Stream)
{
//here we need to sample particle parameters
double E;
double A[2][2];
double T[2][2];
double XTheta[2];
double YPhi[2];
double sigmas[2];
if(mac->Beam_Model == UPenn){
int EnergyID = Sequential_Search(ControlPoint->Energy, mac->Nominal_Energies, mac->Number_Energies);
if(EnergyID < 0) EnergyID = 0;
if(EnergyID > (mac->Number_Energies - 2)) EnergyID = mac->Number_Energies - 2;
VAR_COMPUTE Energy1 = mac->Nominal_Energies[EnergyID];
VAR_COMPUTE Energy2 = mac->Nominal_Energies[EnergyID+1];
E = Linear_Interpolation(ControlPoint->Energy, Energy1, Energy2, mac->Mean_Energies[EnergyID], mac->Mean_Energies[EnergyID+1]);
VAR_COMPUTE sE = Linear_Interpolation(ControlPoint->Energy, Energy1, Energy2, mac->Energy_Spread[EnergyID], mac->Energy_Spread[EnergyID+1]);
E = single_rand_normal(RNG_Stream, E, sE*ControlPoint->Energy/100);
VAR_COMPUTE SpotSizeX, DivergenceX, CorrelationX, SpotSizeY, DivergenceY, CorrelationY;
VAR_COMPUTE weight1 = Linear_Interpolation(ControlPoint->Energy, Energy1, Energy2, mac->Weight1[EnergyID], mac->Weight1[EnergyID+1]);
VAR_COMPUTE weight2 = Linear_Interpolation(ControlPoint->Energy, Energy1, Energy2, mac->Weight2[EnergyID], mac->Weight2[EnergyID+1]);
VAR_COMPUTE rnd = single_rand_uniform(RNG_Stream);
rnd = rnd * (weight1 + weight2);
if(rnd < weight1){
SpotSizeX = Linear_Interpolation(ControlPoint->Energy, Energy1, Energy2, mac->SpotSize1x[EnergyID], mac->SpotSize1x[EnergyID+1]);
DivergenceX = Linear_Interpolation(ControlPoint->Energy, Energy1, Energy2, mac->Divergence1x[EnergyID], mac->Divergence1x[EnergyID+1]);
CorrelationX = Linear_Interpolation(ControlPoint->Energy, Energy1, Energy2, mac->Correlation1x[EnergyID], mac->Correlation1x[EnergyID+1]);
SpotSizeY = Linear_Interpolation(ControlPoint->Energy, Energy1, Energy2, mac->SpotSize1y[EnergyID], mac->SpotSize1y[EnergyID+1]);
DivergenceY = Linear_Interpolation(ControlPoint->Energy, Energy1, Energy2, mac->Divergence1y[EnergyID], mac->Divergence1y[EnergyID+1]);
CorrelationY = Linear_Interpolation(ControlPoint->Energy, Energy1, Energy2, mac->Correlation1y[EnergyID], mac->Correlation1y[EnergyID+1]);
}
else{
SpotSizeX = Linear_Interpolation(ControlPoint->Energy, Energy1, Energy2, mac->SpotSize2x[EnergyID], mac->SpotSize2x[EnergyID+1]);
DivergenceX = Linear_Interpolation(ControlPoint->Energy, Energy1, Energy2, mac->Divergence2x[EnergyID], mac->Divergence2x[EnergyID+1]);
CorrelationX = Linear_Interpolation(ControlPoint->Energy, Energy1, Energy2, mac->Correlation2x[EnergyID], mac->Correlation2x[EnergyID+1]);
SpotSizeY = Linear_Interpolation(ControlPoint->Energy, Energy1, Energy2, mac->SpotSize2y[EnergyID], mac->SpotSize2y[EnergyID+1]);
DivergenceY = Linear_Interpolation(ControlPoint->Energy, Energy1, Energy2, mac->Divergence2y[EnergyID], mac->Divergence2y[EnergyID+1]);
CorrelationY = Linear_Interpolation(ControlPoint->Energy, Energy1, Energy2, mac->Correlation2y[EnergyID], mac->Correlation2y[EnergyID+1]);
}
// Sample X direction
A[0][0] = SpotSizeX * SpotSizeX;
A[1][1] = DivergenceX * DivergenceX;
A[0][1] = CorrelationX * SpotSizeX * DivergenceX;
A[1][0] = A[0][1];
diagonalize(A,sigmas,T);
sigmas[0]=sqrt(sigmas[0]);
sigmas[1]=sqrt(sigmas[1]);
deviates(T,sigmas,XTheta, RNG_Stream);
// Sample Y direction
A[0][0] = SpotSizeY * SpotSizeY;
A[1][1] = DivergenceY * DivergenceY;
A[0][1] = CorrelationY * SpotSizeY * DivergenceY;
A[1][0] = A[0][1];
diagonalize(A,sigmas,T);
sigmas[0]=sqrt(sigmas[0]);
sigmas[1]=sqrt(sigmas[1]);
deviates(T,sigmas,YPhi, RNG_Stream);
}
else{
E = compute_mEnergy(mac,ControlPoint->Energy);
double E_corr = ControlPoint->Energy;
if(ControlPoint->Energy < 70.0){
E_corr = 70.0;
}
double sE = compute_sEnergy (mac,E_corr);
double sX=compute_mX(mac,E_corr);
double sY=compute_mY(mac,E_corr);
double sTheta=compute_mTheta(mac,E_corr);
double sPhi=compute_mPhi(mac,E_corr);
double epsilonXTheta=compute_eXTheta(mac,E_corr);
double epsilonYPhi=compute_eYPhi(mac,E_corr);
// sampling of beam characteristics
E = single_rand_normal(RNG_Stream, E, sE*ControlPoint->Energy/100);
// Sample X direction
epsilonXTheta/=M_PI;
double beta=sX*sX/epsilonXTheta;
double gamma=sTheta*sTheta/epsilonXTheta;
double alpha=-sqrt(beta*gamma-1.);
A[0][0]=sX*sX;
A[1][1]=sTheta*sTheta;
A[0][1]=-alpha*epsilonXTheta;
A[1][0]=A[0][1];
diagonalize(A,sigmas,T);
sigmas[0]=sqrt(sigmas[0]);
sigmas[1]=sqrt(sigmas[1]);
deviates(T,sigmas,XTheta, RNG_Stream);
// Sample Y direction
epsilonYPhi/=M_PI;
beta=sY*sY/epsilonYPhi;
gamma=sPhi*sPhi/epsilonYPhi;
alpha=-sqrt(beta*gamma-1.);
A[0][0]=sY*sY;
A[1][1]=sPhi*sPhi;
A[0][1]=-alpha*epsilonYPhi;
A[1][0]=A[0][1];
diagonalize(A,sigmas,T);
sigmas[0]=sqrt(sigmas[0]);
sigmas[1]=sqrt(sigmas[1]);
deviates(T,sigmas,YPhi, RNG_Stream);
}
// Spot eye view to Beam eye view:
//---- at this point, we have all information of particle characteristics
//---- we perform all preliminary calculations according to BEV reference frame.
double rotation[3];
rotation[0]= M_PI+atan(spot->Spot_Y/mac->mDistanceSMYToIsocenter);
rotation[1]= -atan(spot->Spot_X/mac->mDistanceSMXToIsocenter);
rotation[2]=0.0;
double particle_position[3];
particle_position[0]=XTheta[0];
particle_position[1]=YPhi[0];
particle_position[2]=0.0;
//printf("\nSample: init particle_position: [%.3f ; %.3f ; %.3f]", particle_position[0], particle_position[1], particle_position[2]);
rotateX(rotation[0],particle_position);
rotateY(rotation[1],particle_position);
rotateZ(rotation[2],particle_position);
particle_position[0] += spot->Spot_X*(mac->mDistanceSMXToIsocenter - mac->mDistanceSourcePatient)/mac->mDistanceSMXToIsocenter;
particle_position[1] += spot->Spot_Y*(mac->mDistanceSMYToIsocenter - mac->mDistanceSourcePatient)/mac->mDistanceSMYToIsocenter;
particle_position[2] += mac->mDistanceSourcePatient;
//printf("\nSample: add source distance: [%.3f ; %.3f ; %.3f]", particle_position[0], particle_position[1], particle_position[2]);
double particle_direction[3];
particle_direction[0]=tan(XTheta[1]);
particle_direction[1]=tan(YPhi[1]);
particle_direction[2]=1.0;
double norm=sqrt(particle_direction[0]*particle_direction[0] + particle_direction[1]*particle_direction[1] + particle_direction[2]*particle_direction[2]);
particle_direction[0]/=norm;
particle_direction[1]/=norm;
particle_direction[2]/=norm;
rotateX(rotation[0],particle_direction);
rotateY(rotation[1],particle_direction);
rotateZ(rotation[2],particle_direction);
hadron->x = particle_position[0] / 10.0;
hadron->y = particle_position[1] / 10.0;
hadron->z = particle_position[2] / 10.0;
//printf("\nSample: convert to cm: [%.3f ; %.3f ; %.3f]", hadron->x, hadron->y, hadron->z);
hadron->u = particle_direction[0];
hadron->v = particle_direction[1];
hadron->w = particle_direction[2];
hadron->T = E * 1e6; // energy in eV
hadron->M = 1.0;
hadron->charge = 1.0;
hadron->mass = 1.0;
hadron->type = Proton;
}
void BEV_to_CT_frame(Hadron_buffer *hadron, machine_parameters *mac, field_parameters *field){
//---- we need now to express all particles properties to values consistent with MCsquare
//---- after that, we express all variables in CT reference frame, which is used in MCsquare
double particle_position[3];
particle_position[0] = hadron->x;
particle_position[1] = hadron->y;
particle_position[2] = hadron->z;
rotateY(field->GantryAngle, particle_position);
rotateZ(-field->PatientSupportAngle, particle_position);
// Beam model: XYZ (mm)
// MCsquare: XZY (cm)
hadron->x = particle_position[0] + (field->IsocenterPositionX / 10.0);
hadron->y = particle_position[2] + (field->IsocenterPositionY / 10.0);
hadron->z = particle_position[1] + (field->IsocenterPositionZ / 10.0);
double particle_direction[3];
particle_direction[0] = hadron->u;
particle_direction[1] = hadron->v;
particle_direction[2] = hadron->w;
rotateY(field->GantryAngle, particle_direction);
rotateZ(-field->PatientSupportAngle, particle_direction);
// Beam model: XYZ (mm)
// MCsquare: XZY (cm)
hadron->u = particle_direction[0];
hadron->v = particle_direction[2];
hadron->w = particle_direction[1];
}
void Transport_to_CT(Hadron_buffer *hadron, VAR_DATA CT_Length[3], DATA_config *config){
if(hadron->x >= 0 && hadron->y >= 0 && hadron->z >= 0 && hadron->x <= CT_Length[0] && hadron->y <= CT_Length[1] && hadron->z <= CT_Length[2]) return;
// Translate the particle to the CT image
double Translation[3], new_position[3];
if(hadron->u > 0) Translation[0] = (0 - hadron->x) / hadron->u;
else Translation[0] = (CT_Length[0] - hadron->x) / hadron->u;
if(hadron->v > 0) Translation[1] = (0 - hadron->y) / hadron->v;
else Translation[1] = (CT_Length[1] - hadron->y) / hadron->v;
if(hadron->w > 0) Translation[2] = (0 - hadron->z) / hadron->w;
else Translation[2] = (CT_Length[2] - hadron->z) / hadron->w;
int i;
for(i=0; i<3; i++){
if(Translation[i] < 0) continue;
Translation[i] += 1e-4;
new_position[0] = hadron->x + Translation[i] * hadron->u;
new_position[1] = hadron->y + Translation[i] * hadron->v;
new_position[2] = hadron->z + Translation[i] * hadron->w;
if(new_position[0] > 0.0 && new_position[1] > 0.0 && new_position[2] > 0.0 && new_position[0] < CT_Length[0] && new_position[1] < CT_Length[1] && new_position[2] < CT_Length[2] && !isnan(new_position[0]) && !isnan(new_position[1]) && !isnan(new_position[2])) break;
}
hadron->x = new_position[0];
hadron->y = new_position[1];
hadron->z = new_position[2];
double energy = hadron->T / (UMeV*hadron->mass);
double SP_air = hadron->charge * (9.6139e-9*pow(energy,4) - 7.0508e-6*pow(energy,3) + 2.0028e-3*pow(energy,2) - 2.7615e-1*energy + 2.0082e1) * 1.20479E-3 * 1E6;
double dE = SP_air * Translation[i];
hadron->T = hadron->T - dE;
if(hadron->T < (config->Ecut_Pro * UMeV)) hadron->type = Unknown;
}
void Transport_to_RangeShifter(Hadron_buffer *hadron, ControlPoint_parameters **layer_data, int Nbr_hadrons){
int i;
double IsocenterDistance;
for(i=0; i<Nbr_hadrons; i++){
if(layer_data[i]->RS_setting == OUT || layer_data[i]->RS_Thickness <= 0.0) continue;
IsocenterDistance = layer_data[i]->RS_IsocenterDist + layer_data[i]->RS_Thickness;
//printf("\nTransportRS: IsoDist: [%.3f]", IsocenterDistance);
//printf("\nTransportRS: init position: [%.3f ; %.3f ; %.3f]", hadron[i].x, hadron[i].y, hadron[i].z);
//printf("\nTransportRS: direction: [%.3f ; %.3f ; %.3f]", hadron[i].u, hadron[i].v, hadron[i].w);
hadron[i].x += hadron[i].u * (hadron[i].z - IsocenterDistance) / fabs(hadron[i].w);
hadron[i].y += hadron[i].v * (hadron[i].z - IsocenterDistance) / fabs(hadron[i].w);
hadron[i].z = IsocenterDistance;
//printf("\nTransportRS: post: [%.3f ; %.3f ; %.3f]", hadron[i].x, hadron[i].y, hadron[i].z);
}
}
void Generate_PBS_particle(Hadron_buffer *hadron, int *Nbr_hadrons, VAR_DATA CT_Length[3], plan_parameters *plan, machine_parameters *machine, VSLStreamStatePtr RNG_Stream, DATA_config *config, Materials *material){
ALIGNED_(64) VAR_COMPUTE v_rnd[VLENGTH];
rand_uniform(RNG_Stream, v_rnd);
v_rnd[vALL] = v_rnd[vALL] * plan->cumulative_weight;
ALIGNED_(64) int v_field_index[VLENGTH];
ALIGNED_(64) int v_ControlPoint_index[VLENGTH];
ALIGNED_(64) int v_spot_index[VLENGTH];
Hadron_buffer New_hadrons[50];
field_parameters *New_hadrons_field[50];
ControlPoint_parameters *New_hadrons_layer[50];
int Nbr_New_hadrons = VLENGTH;
int use_RS = 0;
// Generate new particles according to PBS plan
int i;
for(i=0; i<Nbr_New_hadrons; i++){
v_field_index[i] = Sequential_Search(v_rnd[i], plan->Fields_cumulative_PDF, plan->NumberOfFields) + 1;
v_ControlPoint_index[i] = Binary_Search(v_rnd[i], plan->fields[v_field_index[i]].ControlPoints_cumulative_PDF, plan->fields[v_field_index[i]].NumberOfControlPoints) + 1;
v_spot_index[i] = Binary_Search(v_rnd[i], plan->fields[v_field_index[i]].ControlPoints[v_ControlPoint_index[i]].Spots_cumulative_PDF, plan->fields[v_field_index[i]].ControlPoints[v_ControlPoint_index[i]].NbOfScannedSpots) + 1;
Sample_particle (&New_hadrons[i], CT_Length, machine, &plan->fields[v_field_index[i]].ControlPoints[v_ControlPoint_index[i]], &plan->fields[v_field_index[i]].ControlPoints[v_ControlPoint_index[i]].spots[v_spot_index[i]], RNG_Stream);
New_hadrons_field[i] = &plan->fields[v_field_index[i]];
New_hadrons_layer[i] = &plan->fields[v_field_index[i]].ControlPoints[v_ControlPoint_index[i]];
if(New_hadrons_layer[i]->RS_setting == IN && New_hadrons_layer[i]->RS_WET > 0) use_RS = 1;
}
// Range shifter simulation
if(use_RS == 1){
Transport_to_RangeShifter(New_hadrons, New_hadrons_layer, Nbr_New_hadrons);
Simulate_RangeShifter(New_hadrons, New_hadrons_layer, New_hadrons_field, &Nbr_New_hadrons, config, machine, material, RNG_Stream);
}
// Convert BEV to CT reference frame, simulate setup uncertainties, and translate all particles to CT
double position[3];
for(i=0; i<Nbr_New_hadrons; i++){
if(New_hadrons[i].type == Unknown) continue;
//printf("\nAfterSim: init: [%.3f ; %.3f ; %.3f]", New_hadrons[i].x, New_hadrons[i].y, New_hadrons[i].z);
//printf("\nAfterSim: direction BEV: [%.3f ; %.3f ; %.3f]", New_hadrons[i].u, New_hadrons[i].v, New_hadrons[i].w);
BEV_to_CT_frame(&New_hadrons[i], machine, New_hadrons_field[i]);
//printf("\nAfterSim: BEV to CT: [%.3f ; %.3f ; %.3f]", New_hadrons[i].x, New_hadrons[i].y, New_hadrons[i].z);
//printf("\nAfterSim: direction CT: [%.3f ; %.3f ; %.3f]", New_hadrons[i].u, New_hadrons[i].v, New_hadrons[i].w);
Translation_uncertainty(&New_hadrons[i], config, RNG_Stream);
//printf("\nAfterSim: translation uncertainty: [%.3f ; %.3f ; %.3f]", New_hadrons[i].x, New_hadrons[i].y, New_hadrons[i].z);
Transport_to_CT(&New_hadrons[i], CT_Length, config);
//printf("\nAfterSim: transport CT: [%.3f ; %.3f ; %.3f]", New_hadrons[i].x, New_hadrons[i].y, New_hadrons[i].z);
if(New_hadrons[i].x < 0 || New_hadrons[i].y < 0 || New_hadrons[i].z < 0 || New_hadrons[i].x > CT_Length[0] || New_hadrons[i].y > CT_Length[1] || New_hadrons[i].z > CT_Length[2] || isnan(New_hadrons[i].x) || isnan(New_hadrons[i].y) || isnan(New_hadrons[i].z)){
#pragma omp atomic
config->Particle_Generated_outside += 1;
/*
printf("\nWarning: Particle generated outside the geometry: Field:%d - ControlPoint:%d - Spot:%d \n", v_field_index[i], v_ControlPoint_index[i], v_spot_index[i]);
printf("Position: (%f;%f;%f) - Direction: (%f;%f;%f) \n", New_hadrons[i].x, New_hadrons[i].y, New_hadrons[i].z, New_hadrons[i].u, New_hadrons[i].v, New_hadrons[i].w);
if(New_hadrons[i].x < 0) printf("x: %f < 0\n", New_hadrons[i].x);
if(New_hadrons[i].y < 0) printf("y: %f < 0\n", New_hadrons[i].y);
if(New_hadrons[i].z < 0) printf("z: %f < 0\n", New_hadrons[i].z);
if(New_hadrons[i].x > CT_Length[0]) printf("x: %f > %f\n", New_hadrons[i].x, CT_Length[0]);
if(New_hadrons[i].y > CT_Length[1]) printf("y: %f > %f\n", New_hadrons[i].y, CT_Length[1]);
if(New_hadrons[i].z > CT_Length[2]) printf("z: %f > %f\n", New_hadrons[i].z, CT_Length[2]);
*/
}
else if(New_hadrons[i].type != Unknown){
Copy_particle_buffer(&hadron[*Nbr_hadrons], &New_hadrons[i]);
*Nbr_hadrons = *Nbr_hadrons + 1;
}
}
}
plan_parameters* Init_single_spot_plan(plan_parameters *Plan){
plan_parameters *Beamlet = (plan_parameters*)malloc(sizeof(plan_parameters));
strcpy(Beamlet->PlanName, Plan->PlanName);
Beamlet->NumberOfFractions = Plan->NumberOfFractions;
Beamlet->FractionID = Plan->FractionID;
Beamlet->NumberOfFields = 1;
Beamlet->FieldsID = (int*)malloc(sizeof(int));
Beamlet->fields = (field_parameters*)malloc(sizeof(field_parameters));
Beamlet->Fields_cumulative_PDF = (VAR_DATA*)malloc(sizeof(VAR_DATA));
Beamlet->fields[0].NumberOfControlPoints = 1;
Beamlet->fields[0].ControlPoints = (ControlPoint_parameters*)malloc(sizeof(ControlPoint_parameters));
Beamlet->fields[0].ControlPoints_cumulative_PDF = (VAR_DATA*)malloc(sizeof(VAR_DATA));
Beamlet->fields[0].ControlPoints[0].NbOfScannedSpots = 1;
Beamlet->fields[0].ControlPoints[0].spots = (spot_parameters*)malloc(sizeof(spot_parameters));
Beamlet->fields[0].ControlPoints[0].Spots_cumulative_PDF = (VAR_DATA*)malloc(sizeof(VAR_DATA));
return Beamlet;
}
void Select_spot(plan_parameters *Plan, plan_parameters *Beamlet, int FieldID, int ControlPointID, int SpotID){
Beamlet->FieldsID[0] = Plan->fields[FieldID].FieldID;
Beamlet->TotalMetersetWeightOfAllFields = Plan->fields[FieldID].ControlPoints[ControlPointID].spots[SpotID].Spot_Weight;
Beamlet->cumulative_weight = Plan->fields[FieldID].ControlPoints[ControlPointID].spots[SpotID].Spot_Weight;
Beamlet->Fields_cumulative_PDF[0] = Plan->fields[FieldID].ControlPoints[ControlPointID].spots[SpotID].Spot_Weight;
Beamlet->normalization_factor = Plan->normalization_factor;
Beamlet->fields[0].FieldID = Plan->fields[FieldID].FieldID;
Beamlet->fields[0].FinalCumulativeMeterSetWeight = Plan->fields[FieldID].ControlPoints[ControlPointID].spots[SpotID].Spot_Weight;
Beamlet->fields[0].GantryAngle = Plan->fields[FieldID].GantryAngle;
Beamlet->fields[0].PatientSupportAngle = Plan->fields[FieldID].PatientSupportAngle;
Beamlet->fields[0].IsocenterPositionX = Plan->fields[FieldID].IsocenterPositionX;
Beamlet->fields[0].IsocenterPositionY = Plan->fields[FieldID].IsocenterPositionY;
Beamlet->fields[0].IsocenterPositionZ = Plan->fields[FieldID].IsocenterPositionZ;
Beamlet->fields[0].ControlPoints_cumulative_PDF[0] = Plan->fields[FieldID].ControlPoints[ControlPointID].spots[SpotID].Spot_Weight;
Beamlet->fields[0].RS_Type = Plan->fields[FieldID].RS_Type;
Beamlet->fields[0].ControlPoints[0].ControlPointIndex = Plan->fields[FieldID].ControlPoints[ControlPointID].ControlPointIndex;
Beamlet->fields[0].ControlPoints[0].SpotTunnedID = Plan->fields[FieldID].ControlPoints[ControlPointID].SpotTunnedID;
Beamlet->fields[0].ControlPoints[0].CumulativeMetersetWeight = Plan->fields[FieldID].ControlPoints[ControlPointID].spots[SpotID].Spot_Weight;
Beamlet->fields[0].ControlPoints[0].Energy = Plan->fields[FieldID].ControlPoints[ControlPointID].Energy;
Beamlet->fields[0].ControlPoints[0].Spots_cumulative_PDF[0] = Plan->fields[FieldID].ControlPoints[ControlPointID].spots[SpotID].Spot_Weight;
Beamlet->fields[0].ControlPoints[0].RS_setting = Plan->fields[FieldID].ControlPoints[ControlPointID].RS_setting;
Beamlet->fields[0].ControlPoints[0].RS_IsocenterDist = Plan->fields[FieldID].ControlPoints[ControlPointID].RS_IsocenterDist;
Beamlet->fields[0].ControlPoints[0].RS_WET = Plan->fields[FieldID].ControlPoints[ControlPointID].RS_WET;
Beamlet->fields[0].ControlPoints[0].RS_Thickness = Plan->fields[FieldID].ControlPoints[ControlPointID].RS_Thickness;
Beamlet->fields[0].ControlPoints[0].spots[0].Spot_X = Plan->fields[FieldID].ControlPoints[ControlPointID].spots[SpotID].Spot_X;
Beamlet->fields[0].ControlPoints[0].spots[0].Spot_Y = Plan->fields[FieldID].ControlPoints[ControlPointID].spots[SpotID].Spot_Y;
Beamlet->fields[0].ControlPoints[0].spots[0].Spot_Weight = Plan->fields[FieldID].ControlPoints[ControlPointID].spots[SpotID].Spot_Weight;
Beamlet->fields[0].ControlPoints[0].spots[0].Spot_Time = Plan->fields[FieldID].ControlPoints[ControlPointID].spots[SpotID].Spot_Time;
return;
}
plan_parameters* Select_beam(plan_parameters *Plan, int Beam){
printf("\nGenerating sub-plan for beam %d \n\n", Beam+1);
VAR_DATA cumulative_weight = 0;
plan_parameters *beam_plan = (plan_parameters*)malloc(sizeof(plan_parameters));
strcpy(beam_plan->PlanName, "Beam plan");
beam_plan->NumberOfFractions = Plan->NumberOfFractions;
beam_plan->FractionID = Plan->FractionID;
beam_plan->NumberOfFields = 1;
beam_plan->TotalMetersetWeightOfAllFields = 0.0;
beam_plan->fields = (field_parameters*)malloc(1 * sizeof(field_parameters));
beam_plan->Fields_cumulative_PDF = (VAR_DATA*)malloc(1 * sizeof(VAR_DATA));
beam_plan->FieldsID = (int*)malloc(1 * sizeof(int));
beam_plan->FieldsID[0] = Plan->FieldsID[Beam];
beam_plan->fields[0].FieldID = Plan->fields[Beam].FieldID;
beam_plan->fields[0].FinalCumulativeMeterSetWeight = 0.0;
beam_plan->fields[0].GantryAngle = Plan->fields[Beam].GantryAngle;
beam_plan->fields[0].PatientSupportAngle = Plan->fields[Beam].PatientSupportAngle;
beam_plan->fields[0].IsocenterPositionX = Plan->fields[Beam].IsocenterPositionX;
beam_plan->fields[0].IsocenterPositionY = Plan->fields[Beam].IsocenterPositionY;
beam_plan->fields[0].IsocenterPositionZ = Plan->fields[Beam].IsocenterPositionZ;
beam_plan->fields[0].NumberOfControlPoints = Plan->fields[Beam].NumberOfControlPoints;
beam_plan->fields[0].RS_Type = Plan->fields[Beam].RS_Type;
beam_plan->fields[0].ControlPoints = (ControlPoint_parameters*)malloc(Plan->fields[Beam].NumberOfControlPoints * sizeof(ControlPoint_parameters));
beam_plan->fields[0].ControlPoints_cumulative_PDF = (VAR_DATA*)malloc(Plan->fields[Beam].NumberOfControlPoints * sizeof(VAR_DATA));
int j,k;
for(j=0; j<Plan->fields[Beam].NumberOfControlPoints; j++){
beam_plan->fields[0].ControlPoints[j].ControlPointIndex = Plan->fields[Beam].ControlPoints[j].ControlPointIndex;
beam_plan->fields[0].ControlPoints[j].SpotTunnedID = Plan->fields[Beam].ControlPoints[j].SpotTunnedID;
beam_plan->fields[0].ControlPoints[j].CumulativeMetersetWeight = 0.0;
beam_plan->fields[0].ControlPoints[j].Energy = Plan->fields[Beam].ControlPoints[j].Energy;
beam_plan->fields[0].ControlPoints[j].NbOfScannedSpots = Plan->fields[Beam].ControlPoints[j].NbOfScannedSpots;
beam_plan->fields[0].ControlPoints[j].RS_setting = Plan->fields[Beam].ControlPoints[j].RS_setting;
beam_plan->fields[0].ControlPoints[j].RS_IsocenterDist = Plan->fields[Beam].ControlPoints[j].RS_IsocenterDist;
beam_plan->fields[0].ControlPoints[j].RS_WET = Plan->fields[Beam].ControlPoints[j].RS_WET;
beam_plan->fields[0].ControlPoints[j].RS_Thickness = Plan->fields[Beam].ControlPoints[j].RS_Thickness;
beam_plan->fields[0].ControlPoints[j].spots = (spot_parameters*)malloc(Plan->fields[Beam].ControlPoints[j].NbOfScannedSpots * sizeof(spot_parameters));
beam_plan->fields[0].ControlPoints[j].Spots_cumulative_PDF = (VAR_DATA*)malloc(Plan->fields[Beam].ControlPoints[j].NbOfScannedSpots * sizeof(VAR_DATA));
for(k=0; k<Plan->fields[Beam].ControlPoints[j].NbOfScannedSpots; k++){
beam_plan->fields[0].ControlPoints[j].spots[k].Spot_X = Plan->fields[Beam].ControlPoints[j].spots[k].Spot_X;
beam_plan->fields[0].ControlPoints[j].spots[k].Spot_Y = Plan->fields[Beam].ControlPoints[j].spots[k].Spot_Y;
beam_plan->fields[0].ControlPoints[j].spots[k].Spot_Time = Plan->fields[Beam].ControlPoints[j].spots[k].Spot_Time;
beam_plan->fields[0].ControlPoints[j].spots[k].Spot_Weight = Plan->fields[Beam].ControlPoints[j].spots[k].Spot_Weight;
cumulative_weight += beam_plan->fields[0].ControlPoints[j].spots[k].Spot_Weight;
beam_plan->fields[0].ControlPoints[j].Spots_cumulative_PDF[k] = cumulative_weight;
}
beam_plan->fields[0].ControlPoints_cumulative_PDF[j] = cumulative_weight;
beam_plan->fields[0].ControlPoints[j].CumulativeMetersetWeight = cumulative_weight;
}
beam_plan->Fields_cumulative_PDF[0] = cumulative_weight;
beam_plan->fields[0].FinalCumulativeMeterSetWeight = cumulative_weight;
beam_plan->TotalMetersetWeightOfAllFields = cumulative_weight;
beam_plan->cumulative_weight = cumulative_weight;
beam_plan->normalization_factor = Plan->normalization_factor * beam_plan->Fields_cumulative_PDF[0] / Plan->Fields_cumulative_PDF[Plan->NumberOfFields-1];
return beam_plan;
}
|
dynamic.c | #include <stdio.h>
#include <omp.h>
int main()
{
int i;
#pragma omp parallel private(i)
{
#pragma omp for schedule (static)
// #pragma omp for schedule (static, 1)
// #pragma omp for schedule (static, 5)
// #pragma omp for schedule (dynamic)
// #pragma omp for schedule (dynamic, 1)
// #pragma omp for schedule (dynamic, 3)
// #pragma omp for schedule (guided, 2)
// #pragma omp for schedule (auto)
// #pragma omp for schedule (runtime)
for (i = 0; i < 10; ++i) {
printf("Thread %d runs i = %i \n", omp_get_thread_num(), i);
}
}
return 0;
}
|
GB_binop__second_int16.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__second_int16
// A.*B function (eWiseMult): GB_AemultB__second_int16
// A*D function (colscale): GB_AxD__second_int16
// D*A function (rowscale): GB_DxB__second_int16
// C+=B function (dense accum): GB_Cdense_accumB__second_int16
// C+=b function (dense accum): GB_Cdense_accumb__second_int16
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__second_int16
// C=scalar+B (none)
// C=scalar+B' (none)
// C=A+scalar GB_bind2nd__second_int16
// C=A'+scalar GB_bind2nd_tran__second_int16
// C type: int16_t
// A type: int16_t
// B,b type: int16_t
// BinaryOp: cij = bij
#define GB_ATYPE \
int16_t
#define GB_BTYPE \
int16_t
#define GB_CTYPE \
int16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
;
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int16_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = y ;
// op is second
#define GB_OP_IS_SECOND \
1
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_SECOND || GxB_NO_INT16 || GxB_NO_SECOND_INT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__second_int16
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__second_int16
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__second_int16
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int16_t
int16_t bwork = (*((int16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__second_int16
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *GB_RESTRICT Cx = (int16_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__second_int16
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *GB_RESTRICT Cx = (int16_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__second_int16
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__second_int16
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
#if 0
GrB_Info (none)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *Cx = (int16_t *) Cx_output ;
int16_t x = (*((int16_t *) x_input)) ;
int16_t *Bx = (int16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
int16_t bij = Bx [p] ;
Cx [p] = bij ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__second_int16
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int16_t *Cx = (int16_t *) Cx_output ;
int16_t *Ax = (int16_t *) Ax_input ;
int16_t y = (*((int16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
; ;
Cx [p] = y ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = Ax [pA] ; \
Cx [pC] = aij ; \
}
GrB_Info (none)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t x = (*((const int16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
}
#endif
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = y ; \
}
GrB_Info GB_bind2nd_tran__second_int16
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t y = (*((const int16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
omp_nested_loop2.c | #include <stdio.h>
#include <omp.h>
int main()
{
int i, j;
omp_set_num_threads(4);
omp_set_nested(1);
#pragma omp parallel for
for (i = 9; i > 6; i--) {
#pragma omp parallel for
for (j = 0; j < 5; j++) {
printf("[%d] (i,j=%d,%d)\n", omp_get_thread_num(), i, j);
}
}
return 0;
}
|
statistic.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% SSSSS TTTTT AAA TTTTT IIIII SSSSS TTTTT IIIII CCCC %
% SS T A A T I SS T I C %
% SSS T AAAAA T I SSS T I C %
% SS T A A T I SS T I C %
% SSSSS T A A T IIIII SSSSS T IIIII CCCC %
% %
% %
% MagickCore Image Statistical Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/accelerate-private.h"
#include "MagickCore/animate.h"
#include "MagickCore/artifact.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/client.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/compress.h"
#include "MagickCore/constitute.h"
#include "MagickCore/display.h"
#include "MagickCore/draw.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/gem-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/list.h"
#include "MagickCore/image-private.h"
#include "MagickCore/magic.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/module.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/paint.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/profile.h"
#include "MagickCore/property.h"
#include "MagickCore/quantize.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/random_.h"
#include "MagickCore/random-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/segment.h"
#include "MagickCore/semaphore.h"
#include "MagickCore/signature-private.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string_.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/timer.h"
#include "MagickCore/utility.h"
#include "MagickCore/version.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E v a l u a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% EvaluateImage() applies a value to the image with an arithmetic, relational,
% or logical operator to an image. Use these operations to lighten or darken
% an image, to increase or decrease contrast in an image, or to produce the
% "negative" of an image.
%
% The format of the EvaluateImage method is:
%
% MagickBooleanType EvaluateImage(Image *image,
% const MagickEvaluateOperator op,const double value,
% ExceptionInfo *exception)
% MagickBooleanType EvaluateImages(Image *images,
% const MagickEvaluateOperator op,const double value,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o op: A channel op.
%
% o value: A value value.
%
% o exception: return any errors or warnings in this structure.
%
*/
typedef struct _PixelChannels
{
double
channel[MaxPixelChannels];
} PixelChannels;
static PixelChannels **DestroyPixelThreadSet(const Image *images,
PixelChannels **pixels)
{
ssize_t
i;
size_t
rows;
assert(pixels != (PixelChannels **) NULL);
rows=MagickMax(GetImageListLength(images),(size_t)
GetMagickResourceLimit(ThreadResource));
for (i=0; i < (ssize_t) rows; i++)
if (pixels[i] != (PixelChannels *) NULL)
pixels[i]=(PixelChannels *) RelinquishMagickMemory(pixels[i]);
pixels=(PixelChannels **) RelinquishMagickMemory(pixels);
return(pixels);
}
static PixelChannels **AcquirePixelThreadSet(const Image *images)
{
const Image
*next;
PixelChannels
**pixels;
ssize_t
i;
size_t
columns,
number_images,
rows;
number_images=GetImageListLength(images);
rows=MagickMax(number_images,(size_t) GetMagickResourceLimit(ThreadResource));
pixels=(PixelChannels **) AcquireQuantumMemory(rows,sizeof(*pixels));
if (pixels == (PixelChannels **) NULL)
return((PixelChannels **) NULL);
(void) memset(pixels,0,rows*sizeof(*pixels));
columns=MagickMax(number_images,MaxPixelChannels);
for (next=images; next != (Image *) NULL; next=next->next)
columns=MagickMax(next->columns,columns);
for (i=0; i < (ssize_t) rows; i++)
{
ssize_t
j;
pixels[i]=(PixelChannels *) AcquireQuantumMemory(columns,sizeof(**pixels));
if (pixels[i] == (PixelChannels *) NULL)
return(DestroyPixelThreadSet(images,pixels));
for (j=0; j < (ssize_t) columns; j++)
{
ssize_t
k;
for (k=0; k < MaxPixelChannels; k++)
pixels[i][j].channel[k]=0.0;
}
}
return(pixels);
}
static inline double EvaluateMax(const double x,const double y)
{
if (x > y)
return(x);
return(y);
}
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static int IntensityCompare(const void *x,const void *y)
{
const PixelChannels
*color_1,
*color_2;
double
distance;
ssize_t
i;
color_1=(const PixelChannels *) x;
color_2=(const PixelChannels *) y;
distance=0.0;
for (i=0; i < MaxPixelChannels; i++)
distance+=color_1->channel[i]-(double) color_2->channel[i];
return(distance < 0.0 ? -1 : distance > 0.0 ? 1 : 0);
}
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
static double ApplyEvaluateOperator(RandomInfo *random_info,const Quantum pixel,
const MagickEvaluateOperator op,const double value)
{
double
result;
ssize_t
i;
result=0.0;
switch (op)
{
case UndefinedEvaluateOperator:
break;
case AbsEvaluateOperator:
{
result=(double) fabs((double) (pixel+value));
break;
}
case AddEvaluateOperator:
{
result=(double) (pixel+value);
break;
}
case AddModulusEvaluateOperator:
{
/*
This returns a 'floored modulus' of the addition which is a positive
result. It differs from % or fmod() that returns a 'truncated modulus'
result, where floor() is replaced by trunc() and could return a
negative result (which is clipped).
*/
result=pixel+value;
result-=(QuantumRange+1.0)*floor((double) result/(QuantumRange+1.0));
break;
}
case AndEvaluateOperator:
{
result=(double) ((ssize_t) pixel & (ssize_t) (value+0.5));
break;
}
case CosineEvaluateOperator:
{
result=(double) (QuantumRange*(0.5*cos((double) (2.0*MagickPI*
QuantumScale*pixel*value))+0.5));
break;
}
case DivideEvaluateOperator:
{
result=pixel/(value == 0.0 ? 1.0 : value);
break;
}
case ExponentialEvaluateOperator:
{
result=(double) (QuantumRange*exp((double) (value*QuantumScale*pixel)));
break;
}
case GaussianNoiseEvaluateOperator:
{
result=(double) GenerateDifferentialNoise(random_info,pixel,GaussianNoise,
value);
break;
}
case ImpulseNoiseEvaluateOperator:
{
result=(double) GenerateDifferentialNoise(random_info,pixel,ImpulseNoise,
value);
break;
}
case InverseLogEvaluateOperator:
{
result=(QuantumRange*pow((value+1.0),QuantumScale*pixel)-1.0)*
PerceptibleReciprocal(value);
break;
}
case LaplacianNoiseEvaluateOperator:
{
result=(double) GenerateDifferentialNoise(random_info,pixel,
LaplacianNoise,value);
break;
}
case LeftShiftEvaluateOperator:
{
result=(double) pixel;
for (i=0; i < (ssize_t) value; i++)
result*=2.0;
break;
}
case LogEvaluateOperator:
{
if ((QuantumScale*pixel) >= MagickEpsilon)
result=(double) (QuantumRange*log((double) (QuantumScale*value*pixel+
1.0))/log((double) (value+1.0)));
break;
}
case MaxEvaluateOperator:
{
result=(double) EvaluateMax((double) pixel,value);
break;
}
case MeanEvaluateOperator:
{
result=(double) (pixel+value);
break;
}
case MedianEvaluateOperator:
{
result=(double) (pixel+value);
break;
}
case MinEvaluateOperator:
{
result=(double) MagickMin((double) pixel,value);
break;
}
case MultiplicativeNoiseEvaluateOperator:
{
result=(double) GenerateDifferentialNoise(random_info,pixel,
MultiplicativeGaussianNoise,value);
break;
}
case MultiplyEvaluateOperator:
{
result=(double) (value*pixel);
break;
}
case OrEvaluateOperator:
{
result=(double) ((ssize_t) pixel | (ssize_t) (value+0.5));
break;
}
case PoissonNoiseEvaluateOperator:
{
result=(double) GenerateDifferentialNoise(random_info,pixel,PoissonNoise,
value);
break;
}
case PowEvaluateOperator:
{
if (pixel < 0)
result=(double) -(QuantumRange*pow((double) -(QuantumScale*pixel),
(double) value));
else
result=(double) (QuantumRange*pow((double) (QuantumScale*pixel),
(double) value));
break;
}
case RightShiftEvaluateOperator:
{
result=(double) pixel;
for (i=0; i < (ssize_t) value; i++)
result/=2.0;
break;
}
case RootMeanSquareEvaluateOperator:
{
result=((double) pixel*pixel+value);
break;
}
case SetEvaluateOperator:
{
result=value;
break;
}
case SineEvaluateOperator:
{
result=(double) (QuantumRange*(0.5*sin((double) (2.0*MagickPI*
QuantumScale*pixel*value))+0.5));
break;
}
case SubtractEvaluateOperator:
{
result=(double) (pixel-value);
break;
}
case SumEvaluateOperator:
{
result=(double) (pixel+value);
break;
}
case ThresholdEvaluateOperator:
{
result=(double) (((double) pixel <= value) ? 0 : QuantumRange);
break;
}
case ThresholdBlackEvaluateOperator:
{
result=(double) (((double) pixel <= value) ? 0 : pixel);
break;
}
case ThresholdWhiteEvaluateOperator:
{
result=(double) (((double) pixel > value) ? QuantumRange : pixel);
break;
}
case UniformNoiseEvaluateOperator:
{
result=(double) GenerateDifferentialNoise(random_info,pixel,UniformNoise,
value);
break;
}
case XorEvaluateOperator:
{
result=(double) ((ssize_t) pixel ^ (ssize_t) (value+0.5));
break;
}
}
return(result);
}
static Image *AcquireImageCanvas(const Image *images,ExceptionInfo *exception)
{
const Image
*p,
*q;
size_t
columns,
rows;
q=images;
columns=images->columns;
rows=images->rows;
for (p=images; p != (Image *) NULL; p=p->next)
{
if (p->number_channels > q->number_channels)
q=p;
if (p->columns > columns)
columns=p->columns;
if (p->rows > rows)
rows=p->rows;
}
return(CloneImage(q,columns,rows,MagickTrue,exception));
}
MagickExport Image *EvaluateImages(const Image *images,
const MagickEvaluateOperator op,ExceptionInfo *exception)
{
#define EvaluateImageTag "Evaluate/Image"
CacheView
*evaluate_view,
**image_view;
const Image
*view;
Image
*image;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelChannels
**magick_restrict evaluate_pixels;
RandomInfo
**magick_restrict random_info;
size_t
number_images;
ssize_t
n,
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
unsigned long
key;
#endif
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImageCanvas(images,exception);
if (image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
{
image=DestroyImage(image);
return((Image *) NULL);
}
number_images=GetImageListLength(images);
evaluate_pixels=AcquirePixelThreadSet(images);
if (evaluate_pixels == (PixelChannels **) NULL)
{
image=DestroyImage(image);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename);
return((Image *) NULL);
}
image_view=(CacheView **) AcquireQuantumMemory(number_images,
sizeof(*image_view));
if (image_view == (CacheView **) NULL)
{
image=DestroyImage(image);
evaluate_pixels=DestroyPixelThreadSet(images,evaluate_pixels);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename);
return(image);
}
view=images;
for (n=0; n < (ssize_t) number_images; n++)
{
image_view[n]=AcquireVirtualCacheView(view,exception);
view=GetNextImageInList(view);
}
/*
Evaluate image pixels.
*/
status=MagickTrue;
progress=0;
random_info=AcquireRandomInfoThreadSet();
evaluate_view=AcquireAuthenticCacheView(image,exception);
if (op == MedianEvaluateOperator)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,images,image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
const Quantum
**p;
PixelChannels
*evaluate_pixel;
Quantum
*magick_restrict q;
ssize_t
x;
ssize_t
j;
if (status == MagickFalse)
continue;
p=(const Quantum **) AcquireQuantumMemory(number_images,sizeof(*p));
if (p == (const Quantum **) NULL)
{
status=MagickFalse;
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
images->filename);
continue;
}
for (j=0; j < (ssize_t) number_images; j++)
{
p[j]=GetCacheViewVirtualPixels(image_view[j],0,y,image->columns,1,
exception);
if (p[j] == (const Quantum *) NULL)
break;
}
q=QueueCacheViewAuthenticPixels(evaluate_view,0,y,image->columns,1,
exception);
if ((j < (ssize_t) number_images) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
evaluate_pixel=evaluate_pixels[id];
for (x=0; x < (ssize_t) image->columns; x++)
{
const Image
*next;
ssize_t
i;
next=images;
for (j=0; j < (ssize_t) number_images; j++)
{
for (i=0; i < MaxPixelChannels; i++)
evaluate_pixel[j].channel[i]=0.0;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(next,channel);
PixelTrait evaluate_traits = GetPixelChannelTraits(image,channel);
if ((traits == UndefinedPixelTrait) ||
(evaluate_traits == UndefinedPixelTrait) ||
((traits & UpdatePixelTrait) == 0))
continue;
evaluate_pixel[j].channel[i]=ApplyEvaluateOperator(
random_info[id],GetPixelChannel(next,channel,p[j]),op,
evaluate_pixel[j].channel[i]);
}
p[j]+=GetPixelChannels(next);
next=GetNextImageInList(next);
}
qsort((void *) evaluate_pixel,number_images,sizeof(*evaluate_pixel),
IntensityCompare);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits == UndefinedPixelTrait) ||
((traits & UpdatePixelTrait) == 0))
continue;
q[i]=ClampToQuantum(evaluate_pixel[number_images/2].channel[i]);
}
q+=GetPixelChannels(image);
}
p=(const Quantum **) RelinquishMagickMemory((void *) p);
if (SyncCacheViewAuthenticPixels(evaluate_view,exception) == MagickFalse)
status=MagickFalse;
if (images->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(images,EvaluateImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
}
else
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,images,image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const Image
*next;
const int
id = GetOpenMPThreadId();
const Quantum
**p;
PixelChannels
*evaluate_pixel;
Quantum
*magick_restrict q;
ssize_t
i,
x;
ssize_t
j;
if (status == MagickFalse)
continue;
p=(const Quantum **) AcquireQuantumMemory(number_images,sizeof(*p));
if (p == (const Quantum **) NULL)
{
status=MagickFalse;
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
images->filename);
continue;
}
for (j=0; j < (ssize_t) number_images; j++)
{
p[j]=GetCacheViewVirtualPixels(image_view[j],0,y,image->columns,1,
exception);
if (p[j] == (const Quantum *) NULL)
break;
}
q=QueueCacheViewAuthenticPixels(evaluate_view,0,y,image->columns,1,
exception);
if ((j < (ssize_t) number_images) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
evaluate_pixel=evaluate_pixels[id];
for (j=0; j < (ssize_t) image->columns; j++)
for (i=0; i < MaxPixelChannels; i++)
evaluate_pixel[j].channel[i]=0.0;
next=images;
for (j=0; j < (ssize_t) number_images; j++)
{
for (x=0; x < (ssize_t) image->columns; x++)
{
for (i=0; i < (ssize_t) GetPixelChannels(next); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(next,channel);
PixelTrait evaluate_traits = GetPixelChannelTraits(image,channel);
if ((traits == UndefinedPixelTrait) ||
(evaluate_traits == UndefinedPixelTrait))
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
evaluate_pixel[x].channel[i]=ApplyEvaluateOperator(
random_info[id],GetPixelChannel(next,channel,p[j]),j == 0 ?
AddEvaluateOperator : op,evaluate_pixel[x].channel[i]);
}
p[j]+=GetPixelChannels(next);
}
next=GetNextImageInList(next);
}
for (x=0; x < (ssize_t) image->columns; x++)
{
switch (op)
{
case MeanEvaluateOperator:
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
evaluate_pixel[x].channel[i]/=(double) number_images;
break;
}
case MultiplyEvaluateOperator:
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
for (j=0; j < (ssize_t) (number_images-1); j++)
evaluate_pixel[x].channel[i]*=QuantumScale;
}
break;
}
case RootMeanSquareEvaluateOperator:
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
evaluate_pixel[x].channel[i]=sqrt(evaluate_pixel[x].channel[i]/
number_images);
break;
}
default:
break;
}
}
for (x=0; x < (ssize_t) image->columns; x++)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits == UndefinedPixelTrait) ||
((traits & UpdatePixelTrait) == 0))
continue;
q[i]=ClampToQuantum(evaluate_pixel[x].channel[i]);
}
q+=GetPixelChannels(image);
}
p=(const Quantum **) RelinquishMagickMemory((void *) p);
if (SyncCacheViewAuthenticPixels(evaluate_view,exception) == MagickFalse)
status=MagickFalse;
if (images->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(images,EvaluateImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
}
for (n=0; n < (ssize_t) number_images; n++)
image_view[n]=DestroyCacheView(image_view[n]);
image_view=(CacheView **) RelinquishMagickMemory(image_view);
evaluate_view=DestroyCacheView(evaluate_view);
evaluate_pixels=DestroyPixelThreadSet(images,evaluate_pixels);
random_info=DestroyRandomInfoThreadSet(random_info);
if (status == MagickFalse)
image=DestroyImage(image);
return(image);
}
MagickExport MagickBooleanType EvaluateImage(Image *image,
const MagickEvaluateOperator op,const double value,ExceptionInfo *exception)
{
CacheView
*image_view;
const char
*artifact;
MagickBooleanType
clamp,
status;
MagickOffsetType
progress;
RandomInfo
**magick_restrict random_info;
ssize_t
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
unsigned long
key;
#endif
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
clamp=MagickFalse;
artifact=GetImageArtifact(image,"evaluate:clamp");
if (artifact != (const char *) NULL)
clamp=IsStringTrue(artifact);
random_info=AcquireRandomInfoThreadSet();
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
result;
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & CopyPixelTrait) != 0)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
result=ApplyEvaluateOperator(random_info[id],q[i],op,value);
if (op == MeanEvaluateOperator)
result/=2.0;
q[i]=clamp != MagickFalse ? ClampPixel(result) : ClampToQuantum(result);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,EvaluateImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
random_info=DestroyRandomInfoThreadSet(random_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F u n c t i o n I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FunctionImage() applies a value to the image with an arithmetic, relational,
% or logical operator to an image. Use these operations to lighten or darken
% an image, to increase or decrease contrast in an image, or to produce the
% "negative" of an image.
%
% The format of the FunctionImage method is:
%
% MagickBooleanType FunctionImage(Image *image,
% const MagickFunction function,const ssize_t number_parameters,
% const double *parameters,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o function: A channel function.
%
% o parameters: one or more parameters.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Quantum ApplyFunction(Quantum pixel,const MagickFunction function,
const size_t number_parameters,const double *parameters,
ExceptionInfo *exception)
{
double
result;
ssize_t
i;
(void) exception;
result=0.0;
switch (function)
{
case PolynomialFunction:
{
/*
Polynomial: polynomial constants, highest to lowest order (e.g. c0*x^3+
c1*x^2+c2*x+c3).
*/
result=0.0;
for (i=0; i < (ssize_t) number_parameters; i++)
result=result*QuantumScale*pixel+parameters[i];
result*=QuantumRange;
break;
}
case SinusoidFunction:
{
double
amplitude,
bias,
frequency,
phase;
/*
Sinusoid: frequency, phase, amplitude, bias.
*/
frequency=(number_parameters >= 1) ? parameters[0] : 1.0;
phase=(number_parameters >= 2) ? parameters[1] : 0.0;
amplitude=(number_parameters >= 3) ? parameters[2] : 0.5;
bias=(number_parameters >= 4) ? parameters[3] : 0.5;
result=(double) (QuantumRange*(amplitude*sin((double) (2.0*
MagickPI*(frequency*QuantumScale*pixel+phase/360.0)))+bias));
break;
}
case ArcsinFunction:
{
double
bias,
center,
range,
width;
/*
Arcsin (peged at range limits for invalid results): width, center,
range, and bias.
*/
width=(number_parameters >= 1) ? parameters[0] : 1.0;
center=(number_parameters >= 2) ? parameters[1] : 0.5;
range=(number_parameters >= 3) ? parameters[2] : 1.0;
bias=(number_parameters >= 4) ? parameters[3] : 0.5;
result=2.0*PerceptibleReciprocal(width)*(QuantumScale*pixel-center);
if (result <= -1.0)
result=bias-range/2.0;
else
if (result >= 1.0)
result=bias+range/2.0;
else
result=(double) (range/MagickPI*asin((double) result)+bias);
result*=QuantumRange;
break;
}
case ArctanFunction:
{
double
center,
bias,
range,
slope;
/*
Arctan: slope, center, range, and bias.
*/
slope=(number_parameters >= 1) ? parameters[0] : 1.0;
center=(number_parameters >= 2) ? parameters[1] : 0.5;
range=(number_parameters >= 3) ? parameters[2] : 1.0;
bias=(number_parameters >= 4) ? parameters[3] : 0.5;
result=(double) (MagickPI*slope*(QuantumScale*pixel-center));
result=(double) (QuantumRange*(range/MagickPI*atan((double)
result)+bias));
break;
}
case UndefinedFunction:
break;
}
return(ClampToQuantum(result));
}
MagickExport MagickBooleanType FunctionImage(Image *image,
const MagickFunction function,const size_t number_parameters,
const double *parameters,ExceptionInfo *exception)
{
#define FunctionImageTag "Function/Image "
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
if (AccelerateFunctionImage(image,function,number_parameters,parameters,
exception) != MagickFalse)
return(MagickTrue);
#endif
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ApplyFunction(q[i],function,number_parameters,parameters,
exception);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,FunctionImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e E n t r o p y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageEntropy() returns the entropy of one or more image channels.
%
% The format of the GetImageEntropy method is:
%
% MagickBooleanType GetImageEntropy(const Image *image,double *entropy,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o entropy: the average entropy of the selected channels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageEntropy(const Image *image,
double *entropy,ExceptionInfo *exception)
{
ChannelStatistics
*channel_statistics;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
channel_statistics=GetImageStatistics(image,exception);
if (channel_statistics == (ChannelStatistics *) NULL)
return(MagickFalse);
*entropy=channel_statistics[CompositePixelChannel].entropy;
channel_statistics=(ChannelStatistics *) RelinquishMagickMemory(
channel_statistics);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e E x t r e m a %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageExtrema() returns the extrema of one or more image channels.
%
% The format of the GetImageExtrema method is:
%
% MagickBooleanType GetImageExtrema(const Image *image,size_t *minima,
% size_t *maxima,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o minima: the minimum value in the channel.
%
% o maxima: the maximum value in the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageExtrema(const Image *image,
size_t *minima,size_t *maxima,ExceptionInfo *exception)
{
double
max,
min;
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=GetImageRange(image,&min,&max,exception);
*minima=(size_t) ceil(min-0.5);
*maxima=(size_t) floor(max+0.5);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e K u r t o s i s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageKurtosis() returns the kurtosis and skewness of one or more image
% channels.
%
% The format of the GetImageKurtosis method is:
%
% MagickBooleanType GetImageKurtosis(const Image *image,double *kurtosis,
% double *skewness,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o kurtosis: the kurtosis of the channel.
%
% o skewness: the skewness of the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageKurtosis(const Image *image,
double *kurtosis,double *skewness,ExceptionInfo *exception)
{
ChannelStatistics
*channel_statistics;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
channel_statistics=GetImageStatistics(image,exception);
if (channel_statistics == (ChannelStatistics *) NULL)
return(MagickFalse);
*kurtosis=channel_statistics[CompositePixelChannel].kurtosis;
*skewness=channel_statistics[CompositePixelChannel].skewness;
channel_statistics=(ChannelStatistics *) RelinquishMagickMemory(
channel_statistics);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e M e a n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageMean() returns the mean and standard deviation of one or more image
% channels.
%
% The format of the GetImageMean method is:
%
% MagickBooleanType GetImageMean(const Image *image,double *mean,
% double *standard_deviation,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o mean: the average value in the channel.
%
% o standard_deviation: the standard deviation of the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageMean(const Image *image,double *mean,
double *standard_deviation,ExceptionInfo *exception)
{
ChannelStatistics
*channel_statistics;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
channel_statistics=GetImageStatistics(image,exception);
if (channel_statistics == (ChannelStatistics *) NULL)
return(MagickFalse);
*mean=channel_statistics[CompositePixelChannel].mean;
*standard_deviation=
channel_statistics[CompositePixelChannel].standard_deviation;
channel_statistics=(ChannelStatistics *) RelinquishMagickMemory(
channel_statistics);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e M e d i a n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageMedian() returns the median pixel of one or more image channels.
%
% The format of the GetImageMedian method is:
%
% MagickBooleanType GetImageMedian(const Image *image,double *median,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o median: the average value in the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageMedian(const Image *image,double *median,
ExceptionInfo *exception)
{
ChannelStatistics
*channel_statistics;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
channel_statistics=GetImageStatistics(image,exception);
if (channel_statistics == (ChannelStatistics *) NULL)
return(MagickFalse);
*median=channel_statistics[CompositePixelChannel].median;
channel_statistics=(ChannelStatistics *) RelinquishMagickMemory(
channel_statistics);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e M o m e n t s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageMoments() returns the normalized moments of one or more image
% channels.
%
% The format of the GetImageMoments method is:
%
% ChannelMoments *GetImageMoments(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static size_t GetImageChannels(const Image *image)
{
ssize_t
i;
size_t
channels;
channels=0;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
channels++;
}
return((size_t) (channels == 0 ? 1 : channels));
}
MagickExport ChannelMoments *GetImageMoments(const Image *image,
ExceptionInfo *exception)
{
#define MaxNumberImageMoments 8
CacheView
*image_view;
ChannelMoments
*channel_moments;
double
channels,
M00[MaxPixelChannels+1],
M01[MaxPixelChannels+1],
M02[MaxPixelChannels+1],
M03[MaxPixelChannels+1],
M10[MaxPixelChannels+1],
M11[MaxPixelChannels+1],
M12[MaxPixelChannels+1],
M20[MaxPixelChannels+1],
M21[MaxPixelChannels+1],
M22[MaxPixelChannels+1],
M30[MaxPixelChannels+1];
PointInfo
centroid[MaxPixelChannels+1];
ssize_t
c,
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
channel_moments=(ChannelMoments *) AcquireQuantumMemory(MaxPixelChannels+1,
sizeof(*channel_moments));
if (channel_moments == (ChannelMoments *) NULL)
return(channel_moments);
(void) memset(channel_moments,0,(MaxPixelChannels+1)*
sizeof(*channel_moments));
(void) memset(centroid,0,sizeof(centroid));
(void) memset(M00,0,sizeof(M00));
(void) memset(M01,0,sizeof(M01));
(void) memset(M02,0,sizeof(M02));
(void) memset(M03,0,sizeof(M03));
(void) memset(M10,0,sizeof(M10));
(void) memset(M11,0,sizeof(M11));
(void) memset(M12,0,sizeof(M12));
(void) memset(M20,0,sizeof(M20));
(void) memset(M21,0,sizeof(M21));
(void) memset(M22,0,sizeof(M22));
(void) memset(M30,0,sizeof(M30));
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*magick_restrict p;
ssize_t
x;
/*
Compute center of mass (centroid).
*/
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
M00[channel]+=QuantumScale*p[i];
M00[MaxPixelChannels]+=QuantumScale*p[i];
M10[channel]+=x*QuantumScale*p[i];
M10[MaxPixelChannels]+=x*QuantumScale*p[i];
M01[channel]+=y*QuantumScale*p[i];
M01[MaxPixelChannels]+=y*QuantumScale*p[i];
}
p+=GetPixelChannels(image);
}
}
for (c=0; c <= MaxPixelChannels; c++)
{
/*
Compute center of mass (centroid).
*/
centroid[c].x=M10[c]*PerceptibleReciprocal(M00[c]);
centroid[c].y=M01[c]*PerceptibleReciprocal(M00[c]);
}
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*magick_restrict p;
ssize_t
x;
/*
Compute the image moments.
*/
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
M11[channel]+=(x-centroid[channel].x)*(y-centroid[channel].y)*
QuantumScale*p[i];
M11[MaxPixelChannels]+=(x-centroid[channel].x)*(y-centroid[channel].y)*
QuantumScale*p[i];
M20[channel]+=(x-centroid[channel].x)*(x-centroid[channel].x)*
QuantumScale*p[i];
M20[MaxPixelChannels]+=(x-centroid[channel].x)*(x-centroid[channel].x)*
QuantumScale*p[i];
M02[channel]+=(y-centroid[channel].y)*(y-centroid[channel].y)*
QuantumScale*p[i];
M02[MaxPixelChannels]+=(y-centroid[channel].y)*(y-centroid[channel].y)*
QuantumScale*p[i];
M21[channel]+=(x-centroid[channel].x)*(x-centroid[channel].x)*
(y-centroid[channel].y)*QuantumScale*p[i];
M21[MaxPixelChannels]+=(x-centroid[channel].x)*(x-centroid[channel].x)*
(y-centroid[channel].y)*QuantumScale*p[i];
M12[channel]+=(x-centroid[channel].x)*(y-centroid[channel].y)*
(y-centroid[channel].y)*QuantumScale*p[i];
M12[MaxPixelChannels]+=(x-centroid[channel].x)*(y-centroid[channel].y)*
(y-centroid[channel].y)*QuantumScale*p[i];
M22[channel]+=(x-centroid[channel].x)*(x-centroid[channel].x)*
(y-centroid[channel].y)*(y-centroid[channel].y)*QuantumScale*p[i];
M22[MaxPixelChannels]+=(x-centroid[channel].x)*(x-centroid[channel].x)*
(y-centroid[channel].y)*(y-centroid[channel].y)*QuantumScale*p[i];
M30[channel]+=(x-centroid[channel].x)*(x-centroid[channel].x)*
(x-centroid[channel].x)*QuantumScale*p[i];
M30[MaxPixelChannels]+=(x-centroid[channel].x)*(x-centroid[channel].x)*
(x-centroid[channel].x)*QuantumScale*p[i];
M03[channel]+=(y-centroid[channel].y)*(y-centroid[channel].y)*
(y-centroid[channel].y)*QuantumScale*p[i];
M03[MaxPixelChannels]+=(y-centroid[channel].y)*(y-centroid[channel].y)*
(y-centroid[channel].y)*QuantumScale*p[i];
}
p+=GetPixelChannels(image);
}
}
channels=(double) GetImageChannels(image);
M00[MaxPixelChannels]/=channels;
M01[MaxPixelChannels]/=channels;
M02[MaxPixelChannels]/=channels;
M03[MaxPixelChannels]/=channels;
M10[MaxPixelChannels]/=channels;
M11[MaxPixelChannels]/=channels;
M12[MaxPixelChannels]/=channels;
M20[MaxPixelChannels]/=channels;
M21[MaxPixelChannels]/=channels;
M22[MaxPixelChannels]/=channels;
M30[MaxPixelChannels]/=channels;
for (c=0; c <= MaxPixelChannels; c++)
{
/*
Compute elliptical angle, major and minor axes, eccentricity, & intensity.
*/
channel_moments[c].centroid=centroid[c];
channel_moments[c].ellipse_axis.x=sqrt((2.0*PerceptibleReciprocal(M00[c]))*
((M20[c]+M02[c])+sqrt(4.0*M11[c]*M11[c]+(M20[c]-M02[c])*(M20[c]-M02[c]))));
channel_moments[c].ellipse_axis.y=sqrt((2.0*PerceptibleReciprocal(M00[c]))*
((M20[c]+M02[c])-sqrt(4.0*M11[c]*M11[c]+(M20[c]-M02[c])*(M20[c]-M02[c]))));
channel_moments[c].ellipse_angle=RadiansToDegrees(1.0/2.0*atan(2.0*
M11[c]*PerceptibleReciprocal(M20[c]-M02[c])));
if (fabs(M11[c]) < 0.0)
{
if ((fabs(M20[c]-M02[c]) >= 0.0) &&
((M20[c]-M02[c]) < 0.0))
channel_moments[c].ellipse_angle+=90.0;
}
else
if (M11[c] < 0.0)
{
if (fabs(M20[c]-M02[c]) >= 0.0)
{
if ((M20[c]-M02[c]) < 0.0)
channel_moments[c].ellipse_angle+=90.0;
else
channel_moments[c].ellipse_angle+=180.0;
}
}
else
if ((fabs(M20[c]-M02[c]) >= 0.0) && ((M20[c]-M02[c]) < 0.0))
channel_moments[c].ellipse_angle+=90.0;
channel_moments[c].ellipse_eccentricity=sqrt(1.0-(
channel_moments[c].ellipse_axis.y*
channel_moments[c].ellipse_axis.y*PerceptibleReciprocal(
channel_moments[c].ellipse_axis.x*
channel_moments[c].ellipse_axis.x)));
channel_moments[c].ellipse_intensity=M00[c]*
PerceptibleReciprocal(MagickPI*channel_moments[c].ellipse_axis.x*
channel_moments[c].ellipse_axis.y+MagickEpsilon);
}
for (c=0; c <= MaxPixelChannels; c++)
{
/*
Normalize image moments.
*/
M10[c]=0.0;
M01[c]=0.0;
M11[c]*=PerceptibleReciprocal(pow(M00[c],1.0+(1.0+1.0)/2.0));
M20[c]*=PerceptibleReciprocal(pow(M00[c],1.0+(2.0+0.0)/2.0));
M02[c]*=PerceptibleReciprocal(pow(M00[c],1.0+(0.0+2.0)/2.0));
M21[c]*=PerceptibleReciprocal(pow(M00[c],1.0+(2.0+1.0)/2.0));
M12[c]*=PerceptibleReciprocal(pow(M00[c],1.0+(1.0+2.0)/2.0));
M22[c]*=PerceptibleReciprocal(pow(M00[c],1.0+(2.0+2.0)/2.0));
M30[c]*=PerceptibleReciprocal(pow(M00[c],1.0+(3.0+0.0)/2.0));
M03[c]*=PerceptibleReciprocal(pow(M00[c],1.0+(0.0+3.0)/2.0));
M00[c]=1.0;
}
image_view=DestroyCacheView(image_view);
for (c=0; c <= MaxPixelChannels; c++)
{
/*
Compute Hu invariant moments.
*/
channel_moments[c].invariant[0]=M20[c]+M02[c];
channel_moments[c].invariant[1]=(M20[c]-M02[c])*(M20[c]-M02[c])+4.0*M11[c]*
M11[c];
channel_moments[c].invariant[2]=(M30[c]-3.0*M12[c])*(M30[c]-3.0*M12[c])+
(3.0*M21[c]-M03[c])*(3.0*M21[c]-M03[c]);
channel_moments[c].invariant[3]=(M30[c]+M12[c])*(M30[c]+M12[c])+
(M21[c]+M03[c])*(M21[c]+M03[c]);
channel_moments[c].invariant[4]=(M30[c]-3.0*M12[c])*(M30[c]+M12[c])*
((M30[c]+M12[c])*(M30[c]+M12[c])-3.0*(M21[c]+M03[c])*(M21[c]+M03[c]))+
(3.0*M21[c]-M03[c])*(M21[c]+M03[c])*(3.0*(M30[c]+M12[c])*(M30[c]+M12[c])-
(M21[c]+M03[c])*(M21[c]+M03[c]));
channel_moments[c].invariant[5]=(M20[c]-M02[c])*((M30[c]+M12[c])*
(M30[c]+M12[c])-(M21[c]+M03[c])*(M21[c]+M03[c]))+4.0*M11[c]*
(M30[c]+M12[c])*(M21[c]+M03[c]);
channel_moments[c].invariant[6]=(3.0*M21[c]-M03[c])*(M30[c]+M12[c])*
((M30[c]+M12[c])*(M30[c]+M12[c])-3.0*(M21[c]+M03[c])*(M21[c]+M03[c]))-
(M30[c]-3*M12[c])*(M21[c]+M03[c])*(3.0*(M30[c]+M12[c])*(M30[c]+M12[c])-
(M21[c]+M03[c])*(M21[c]+M03[c]));
channel_moments[c].invariant[7]=M11[c]*((M30[c]+M12[c])*(M30[c]+M12[c])-
(M03[c]+M21[c])*(M03[c]+M21[c]))-(M20[c]-M02[c])*(M30[c]+M12[c])*
(M03[c]+M21[c]);
}
if (y < (ssize_t) image->rows)
channel_moments=(ChannelMoments *) RelinquishMagickMemory(channel_moments);
return(channel_moments);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l P e r c e p t u a l H a s h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImagePerceptualHash() returns the perceptual hash of one or more
% image channels.
%
% The format of the GetImagePerceptualHash method is:
%
% ChannelPerceptualHash *GetImagePerceptualHash(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double MagickLog10(const double x)
{
#define Log10Epsilon (1.0e-11)
if (fabs(x) < Log10Epsilon)
return(log10(Log10Epsilon));
return(log10(fabs(x)));
}
MagickExport ChannelPerceptualHash *GetImagePerceptualHash(const Image *image,
ExceptionInfo *exception)
{
ChannelPerceptualHash
*perceptual_hash;
char
*colorspaces,
*p,
*q;
const char
*artifact;
MagickBooleanType
status;
ssize_t
i;
perceptual_hash=(ChannelPerceptualHash *) AcquireQuantumMemory(
MaxPixelChannels+1UL,sizeof(*perceptual_hash));
if (perceptual_hash == (ChannelPerceptualHash *) NULL)
return((ChannelPerceptualHash *) NULL);
artifact=GetImageArtifact(image,"phash:colorspaces");
if (artifact != NULL)
colorspaces=AcquireString(artifact);
else
colorspaces=AcquireString("sRGB,HCLp");
perceptual_hash[0].number_colorspaces=0;
perceptual_hash[0].number_channels=0;
q=colorspaces;
for (i=0; (p=StringToken(",",&q)) != (char *) NULL; i++)
{
ChannelMoments
*moments;
Image
*hash_image;
size_t
j;
ssize_t
channel,
colorspace;
if (i >= MaximumNumberOfPerceptualColorspaces)
break;
colorspace=ParseCommandOption(MagickColorspaceOptions,MagickFalse,p);
if (colorspace < 0)
break;
perceptual_hash[0].colorspace[i]=(ColorspaceType) colorspace;
hash_image=BlurImage(image,0.0,1.0,exception);
if (hash_image == (Image *) NULL)
break;
hash_image->depth=8;
status=TransformImageColorspace(hash_image,(ColorspaceType) colorspace,
exception);
if (status == MagickFalse)
break;
moments=GetImageMoments(hash_image,exception);
perceptual_hash[0].number_colorspaces++;
perceptual_hash[0].number_channels+=GetImageChannels(hash_image);
hash_image=DestroyImage(hash_image);
if (moments == (ChannelMoments *) NULL)
break;
for (channel=0; channel <= MaxPixelChannels; channel++)
for (j=0; j < MaximumNumberOfImageMoments; j++)
perceptual_hash[channel].phash[i][j]=
(-MagickLog10(moments[channel].invariant[j]));
moments=(ChannelMoments *) RelinquishMagickMemory(moments);
}
colorspaces=DestroyString(colorspaces);
return(perceptual_hash);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e R a n g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageRange() returns the range of one or more image channels.
%
% The format of the GetImageRange method is:
%
% MagickBooleanType GetImageRange(const Image *image,double *minima,
% double *maxima,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o minima: the minimum value in the channel.
%
% o maxima: the maximum value in the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageRange(const Image *image,double *minima,
double *maxima,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
initialize,
status;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=MagickTrue;
initialize=MagickTrue;
*maxima=0.0;
*minima=0.0;
image_view=AcquireVirtualCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status,initialize) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
row_maxima = 0.0,
row_minima = 0.0;
MagickBooleanType
row_initialize;
const Quantum
*magick_restrict p;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
row_initialize=MagickTrue;
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
if (row_initialize != MagickFalse)
{
row_minima=(double) p[i];
row_maxima=(double) p[i];
row_initialize=MagickFalse;
}
else
{
if ((double) p[i] < row_minima)
row_minima=(double) p[i];
if ((double) p[i] > row_maxima)
row_maxima=(double) p[i];
}
}
p+=GetPixelChannels(image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetImageRange)
#endif
{
if (initialize != MagickFalse)
{
*minima=row_minima;
*maxima=row_maxima;
initialize=MagickFalse;
}
else
{
if (row_minima < *minima)
*minima=row_minima;
if (row_maxima > *maxima)
*maxima=row_maxima;
}
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e S t a t i s t i c s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageStatistics() returns statistics for each channel in the image. The
% statistics include the channel depth, its minima, maxima, mean, standard
% deviation, kurtosis and skewness. You can access the red channel mean, for
% example, like this:
%
% channel_statistics=GetImageStatistics(image,exception);
% red_mean=channel_statistics[RedPixelChannel].mean;
%
% Use MagickRelinquishMemory() to free the statistics buffer.
%
% The format of the GetImageStatistics method is:
%
% ChannelStatistics *GetImageStatistics(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static ssize_t GetMedianPixel(Quantum *pixels,const size_t n)
{
#define SwapPixels(alpha,beta) \
{ \
Quantum gamma=(alpha); \
(alpha)=(beta);(beta)=gamma; \
}
ssize_t
low = 0,
high = (ssize_t) n-1,
median = (low+high)/2;
for ( ; ; )
{
ssize_t
l = low+1,
h = high,
mid = (low+high)/2;
if (high <= low)
return(median);
if (high == (low+1))
{
if (pixels[low] > pixels[high])
SwapPixels(pixels[low],pixels[high]);
return(median);
}
if (pixels[mid] > pixels[high])
SwapPixels(pixels[mid],pixels[high]);
if (pixels[low] > pixels[high])
SwapPixels(pixels[low], pixels[high]);
if (pixels[mid] > pixels[low])
SwapPixels(pixels[mid],pixels[low]);
SwapPixels(pixels[mid],pixels[low+1]);
for ( ; ; )
{
do l++; while (pixels[low] > pixels[l]);
do h--; while (pixels[h] > pixels[low]);
if (h < l)
break;
SwapPixels(pixels[l],pixels[h]);
}
SwapPixels(pixels[low],pixels[h]);
if (h <= median)
low=l;
if (h >= median)
high=h-1;
}
}
MagickExport ChannelStatistics *GetImageStatistics(const Image *image,
ExceptionInfo *exception)
{
ChannelStatistics
*channel_statistics;
double
area,
channels,
*histogram,
standard_deviation;
MagickStatusType
status;
MemoryInfo
*median_info;
Quantum
*median;
QuantumAny
range;
size_t
depth;
ssize_t
i,
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
histogram=(double *) AcquireQuantumMemory(MaxMap+1UL,GetPixelChannels(image)*
sizeof(*histogram));
channel_statistics=(ChannelStatistics *) AcquireQuantumMemory(
MaxPixelChannels+1,sizeof(*channel_statistics));
if ((channel_statistics == (ChannelStatistics *) NULL) ||
(histogram == (double *) NULL))
{
if (histogram != (double *) NULL)
histogram=(double *) RelinquishMagickMemory(histogram);
if (channel_statistics != (ChannelStatistics *) NULL)
channel_statistics=(ChannelStatistics *) RelinquishMagickMemory(
channel_statistics);
return(channel_statistics);
}
(void) memset(channel_statistics,0,(MaxPixelChannels+1)*
sizeof(*channel_statistics));
for (i=0; i <= (ssize_t) MaxPixelChannels; i++)
{
channel_statistics[i].depth=1;
channel_statistics[i].maxima=(-MagickMaximumValue);
channel_statistics[i].minima=MagickMaximumValue;
}
(void) memset(histogram,0,(MaxMap+1)*GetPixelChannels(image)*
sizeof(*histogram));
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*magick_restrict p;
ssize_t
x;
/*
Compute pixel statistics.
*/
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelReadMask(image,p) <= (QuantumRange/2))
{
p+=GetPixelChannels(image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
if (channel_statistics[channel].depth != MAGICKCORE_QUANTUM_DEPTH)
{
depth=channel_statistics[channel].depth;
range=GetQuantumRange(depth);
status=p[i] != ScaleAnyToQuantum(ScaleQuantumToAny(p[i],range),
range) ? MagickTrue : MagickFalse;
if (status != MagickFalse)
{
channel_statistics[channel].depth++;
if (channel_statistics[channel].depth >
channel_statistics[CompositePixelChannel].depth)
channel_statistics[CompositePixelChannel].depth=
channel_statistics[channel].depth;
i--;
continue;
}
}
if ((double) p[i] < channel_statistics[channel].minima)
channel_statistics[channel].minima=(double) p[i];
if ((double) p[i] > channel_statistics[channel].maxima)
channel_statistics[channel].maxima=(double) p[i];
channel_statistics[channel].sum+=p[i];
channel_statistics[channel].sum_squared+=(double) p[i]*p[i];
channel_statistics[channel].sum_cubed+=(double) p[i]*p[i]*p[i];
channel_statistics[channel].sum_fourth_power+=(double) p[i]*p[i]*p[i]*
p[i];
channel_statistics[channel].area++;
if ((double) p[i] < channel_statistics[CompositePixelChannel].minima)
channel_statistics[CompositePixelChannel].minima=(double) p[i];
if ((double) p[i] > channel_statistics[CompositePixelChannel].maxima)
channel_statistics[CompositePixelChannel].maxima=(double) p[i];
histogram[GetPixelChannels(image)*ScaleQuantumToMap(
ClampToQuantum((double) p[i]))+i]++;
channel_statistics[CompositePixelChannel].sum+=(double) p[i];
channel_statistics[CompositePixelChannel].sum_squared+=(double)
p[i]*p[i];
channel_statistics[CompositePixelChannel].sum_cubed+=(double)
p[i]*p[i]*p[i];
channel_statistics[CompositePixelChannel].sum_fourth_power+=(double)
p[i]*p[i]*p[i]*p[i];
channel_statistics[CompositePixelChannel].area++;
}
p+=GetPixelChannels(image);
}
}
for (i=0; i <= (ssize_t) MaxPixelChannels; i++)
{
/*
Normalize pixel statistics.
*/
area=PerceptibleReciprocal(channel_statistics[i].area);
channel_statistics[i].sum*=area;
channel_statistics[i].sum_squared*=area;
channel_statistics[i].sum_cubed*=area;
channel_statistics[i].sum_fourth_power*=area;
channel_statistics[i].mean=channel_statistics[i].sum;
channel_statistics[i].variance=channel_statistics[i].sum_squared;
standard_deviation=sqrt(channel_statistics[i].variance-
(channel_statistics[i].mean*channel_statistics[i].mean));
standard_deviation=sqrt(PerceptibleReciprocal(channel_statistics[i].area-
1.0)*channel_statistics[i].area*standard_deviation*standard_deviation);
channel_statistics[i].standard_deviation=standard_deviation;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
number_bins;
ssize_t
j;
/*
Compute pixel entropy.
*/
PixelChannel channel = GetPixelChannelChannel(image,i);
number_bins=0.0;
for (j=0; j <= (ssize_t) MaxMap; j++)
if (histogram[GetPixelChannels(image)*j+i] > 0.0)
number_bins++;
area=PerceptibleReciprocal(channel_statistics[channel].area);
for (j=0; j <= (ssize_t) MaxMap; j++)
{
double
count;
count=area*histogram[GetPixelChannels(image)*j+i];
channel_statistics[channel].entropy+=-count*MagickLog10(count)*
PerceptibleReciprocal(MagickLog10(number_bins));
channel_statistics[CompositePixelChannel].entropy+=-count*
MagickLog10(count)*PerceptibleReciprocal(MagickLog10(number_bins))/
GetPixelChannels(image);
}
}
histogram=(double *) RelinquishMagickMemory(histogram);
for (i=0; i <= (ssize_t) MaxPixelChannels; i++)
{
/*
Compute kurtosis & skewness statistics.
*/
standard_deviation=PerceptibleReciprocal(
channel_statistics[i].standard_deviation);
channel_statistics[i].skewness=(channel_statistics[i].sum_cubed-3.0*
channel_statistics[i].mean*channel_statistics[i].sum_squared+2.0*
channel_statistics[i].mean*channel_statistics[i].mean*
channel_statistics[i].mean)*(standard_deviation*standard_deviation*
standard_deviation);
channel_statistics[i].kurtosis=(channel_statistics[i].sum_fourth_power-4.0*
channel_statistics[i].mean*channel_statistics[i].sum_cubed+6.0*
channel_statistics[i].mean*channel_statistics[i].mean*
channel_statistics[i].sum_squared-3.0*channel_statistics[i].mean*
channel_statistics[i].mean*1.0*channel_statistics[i].mean*
channel_statistics[i].mean)*(standard_deviation*standard_deviation*
standard_deviation*standard_deviation)-3.0;
}
median_info=AcquireVirtualMemory(image->columns,image->rows*sizeof(*median));
if (median_info == (MemoryInfo *) NULL)
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
else
{
median=(Quantum *) GetVirtualMemoryBlob(median_info);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
size_t
n = 0;
/*
Compute median statistics for each channel.
*/
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*magick_restrict p;
ssize_t
x;
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelReadMask(image,p) <= (QuantumRange/2))
{
p+=GetPixelChannels(image);
continue;
}
median[n++]=p[i];
}
p+=GetPixelChannels(image);
}
channel_statistics[channel].median=(double) median[
GetMedianPixel(median,n)];
}
median_info=RelinquishVirtualMemory(median_info);
}
channel_statistics[CompositePixelChannel].mean=0.0;
channel_statistics[CompositePixelChannel].median=0.0;
channel_statistics[CompositePixelChannel].standard_deviation=0.0;
channel_statistics[CompositePixelChannel].entropy=0.0;
for (i=0; i < (ssize_t) MaxPixelChannels; i++)
{
channel_statistics[CompositePixelChannel].mean+=
channel_statistics[i].mean;
channel_statistics[CompositePixelChannel].median+=
channel_statistics[i].median;
channel_statistics[CompositePixelChannel].standard_deviation+=
channel_statistics[i].standard_deviation;
channel_statistics[CompositePixelChannel].entropy+=
channel_statistics[i].entropy;
}
channels=(double) GetImageChannels(image);
channel_statistics[CompositePixelChannel].mean/=channels;
channel_statistics[CompositePixelChannel].median/=channels;
channel_statistics[CompositePixelChannel].standard_deviation/=channels;
channel_statistics[CompositePixelChannel].entropy/=channels;
if (y < (ssize_t) image->rows)
channel_statistics=(ChannelStatistics *) RelinquishMagickMemory(
channel_statistics);
return(channel_statistics);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P o l y n o m i a l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PolynomialImage() returns a new image where each pixel is the sum of the
% pixels in the image sequence after applying its corresponding terms
% (coefficient and degree pairs).
%
% The format of the PolynomialImage method is:
%
% Image *PolynomialImage(const Image *images,const size_t number_terms,
% const double *terms,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image sequence.
%
% o number_terms: the number of terms in the list. The actual list length
% is 2 x number_terms + 1 (the constant).
%
% o terms: the list of polynomial coefficients and degree pairs and a
% constant.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *PolynomialImage(const Image *images,
const size_t number_terms,const double *terms,ExceptionInfo *exception)
{
#define PolynomialImageTag "Polynomial/Image"
CacheView
*polynomial_view;
Image
*image;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelChannels
**magick_restrict polynomial_pixels;
size_t
number_images;
ssize_t
y;
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImageCanvas(images,exception);
if (image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
{
image=DestroyImage(image);
return((Image *) NULL);
}
number_images=GetImageListLength(images);
polynomial_pixels=AcquirePixelThreadSet(images);
if (polynomial_pixels == (PixelChannels **) NULL)
{
image=DestroyImage(image);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename);
return((Image *) NULL);
}
/*
Polynomial image pixels.
*/
status=MagickTrue;
progress=0;
polynomial_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
CacheView
*image_view;
const Image
*next;
const int
id = GetOpenMPThreadId();
PixelChannels
*polynomial_pixel;
Quantum
*magick_restrict q;
ssize_t
i,
j,
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(polynomial_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
polynomial_pixel=polynomial_pixels[id];
for (j=0; j < (ssize_t) image->columns; j++)
for (i=0; i < MaxPixelChannels; i++)
polynomial_pixel[j].channel[i]=0.0;
next=images;
for (j=0; j < (ssize_t) number_images; j++)
{
const Quantum
*p;
if (j >= (ssize_t) number_terms)
continue;
image_view=AcquireVirtualCacheView(next,exception);
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
image_view=DestroyCacheView(image_view);
break;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
for (i=0; i < (ssize_t) GetPixelChannels(next); i++)
{
MagickRealType
coefficient,
degree;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(next,channel);
PixelTrait polynomial_traits=GetPixelChannelTraits(image,channel);
if ((traits == UndefinedPixelTrait) ||
(polynomial_traits == UndefinedPixelTrait))
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
coefficient=(MagickRealType) terms[2*j];
degree=(MagickRealType) terms[(j << 1)+1];
polynomial_pixel[x].channel[i]+=coefficient*
pow(QuantumScale*GetPixelChannel(image,channel,p),degree);
}
p+=GetPixelChannels(next);
}
image_view=DestroyCacheView(image_view);
next=GetNextImageInList(next);
}
for (x=0; x < (ssize_t) image->columns; x++)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum(QuantumRange*polynomial_pixel[x].channel[i]);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(polynomial_view,exception) == MagickFalse)
status=MagickFalse;
if (images->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(images,PolynomialImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
polynomial_view=DestroyCacheView(polynomial_view);
polynomial_pixels=DestroyPixelThreadSet(images,polynomial_pixels);
if (status == MagickFalse)
image=DestroyImage(image);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S t a t i s t i c I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% StatisticImage() makes each pixel the min / max / median / mode / etc. of
% the neighborhood of the specified width and height.
%
% The format of the StatisticImage method is:
%
% Image *StatisticImage(const Image *image,const StatisticType type,
% const size_t width,const size_t height,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o type: the statistic type (median, mode, etc.).
%
% o width: the width of the pixel neighborhood.
%
% o height: the height of the pixel neighborhood.
%
% o exception: return any errors or warnings in this structure.
%
*/
typedef struct _SkipNode
{
size_t
next[9],
count,
signature;
} SkipNode;
typedef struct _SkipList
{
ssize_t
level;
SkipNode
*nodes;
} SkipList;
typedef struct _PixelList
{
size_t
length,
seed;
SkipList
skip_list;
size_t
signature;
} PixelList;
static PixelList *DestroyPixelList(PixelList *pixel_list)
{
if (pixel_list == (PixelList *) NULL)
return((PixelList *) NULL);
if (pixel_list->skip_list.nodes != (SkipNode *) NULL)
pixel_list->skip_list.nodes=(SkipNode *) RelinquishAlignedMemory(
pixel_list->skip_list.nodes);
pixel_list=(PixelList *) RelinquishMagickMemory(pixel_list);
return(pixel_list);
}
static PixelList **DestroyPixelListThreadSet(PixelList **pixel_list)
{
ssize_t
i;
assert(pixel_list != (PixelList **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (pixel_list[i] != (PixelList *) NULL)
pixel_list[i]=DestroyPixelList(pixel_list[i]);
pixel_list=(PixelList **) RelinquishMagickMemory(pixel_list);
return(pixel_list);
}
static PixelList *AcquirePixelList(const size_t width,const size_t height)
{
PixelList
*pixel_list;
pixel_list=(PixelList *) AcquireMagickMemory(sizeof(*pixel_list));
if (pixel_list == (PixelList *) NULL)
return(pixel_list);
(void) memset((void *) pixel_list,0,sizeof(*pixel_list));
pixel_list->length=width*height;
pixel_list->skip_list.nodes=(SkipNode *) AcquireAlignedMemory(65537UL,
sizeof(*pixel_list->skip_list.nodes));
if (pixel_list->skip_list.nodes == (SkipNode *) NULL)
return(DestroyPixelList(pixel_list));
(void) memset(pixel_list->skip_list.nodes,0,65537UL*
sizeof(*pixel_list->skip_list.nodes));
pixel_list->signature=MagickCoreSignature;
return(pixel_list);
}
static PixelList **AcquirePixelListThreadSet(const size_t width,
const size_t height)
{
PixelList
**pixel_list;
ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
pixel_list=(PixelList **) AcquireQuantumMemory(number_threads,
sizeof(*pixel_list));
if (pixel_list == (PixelList **) NULL)
return((PixelList **) NULL);
(void) memset(pixel_list,0,number_threads*sizeof(*pixel_list));
for (i=0; i < (ssize_t) number_threads; i++)
{
pixel_list[i]=AcquirePixelList(width,height);
if (pixel_list[i] == (PixelList *) NULL)
return(DestroyPixelListThreadSet(pixel_list));
}
return(pixel_list);
}
static void AddNodePixelList(PixelList *pixel_list,const size_t color)
{
SkipList
*p;
ssize_t
level;
size_t
search,
update[9];
/*
Initialize the node.
*/
p=(&pixel_list->skip_list);
p->nodes[color].signature=pixel_list->signature;
p->nodes[color].count=1;
/*
Determine where it belongs in the list.
*/
search=65536UL;
for (level=p->level; level >= 0; level--)
{
while (p->nodes[search].next[level] < color)
search=p->nodes[search].next[level];
update[level]=search;
}
/*
Generate a pseudo-random level for this node.
*/
for (level=0; ; level++)
{
pixel_list->seed=(pixel_list->seed*42893621L)+1L;
if ((pixel_list->seed & 0x300) != 0x300)
break;
}
if (level > 8)
level=8;
if (level > (p->level+2))
level=p->level+2;
/*
If we're raising the list's level, link back to the root node.
*/
while (level > p->level)
{
p->level++;
update[p->level]=65536UL;
}
/*
Link the node into the skip-list.
*/
do
{
p->nodes[color].next[level]=p->nodes[update[level]].next[level];
p->nodes[update[level]].next[level]=color;
} while (level-- > 0);
}
static inline void GetMedianPixelList(PixelList *pixel_list,Quantum *pixel)
{
SkipList
*p;
size_t
color;
ssize_t
count;
/*
Find the median value for each of the color.
*/
p=(&pixel_list->skip_list);
color=65536L;
count=0;
do
{
color=p->nodes[color].next[0];
count+=p->nodes[color].count;
} while (count <= (ssize_t) (pixel_list->length >> 1));
*pixel=ScaleShortToQuantum((unsigned short) color);
}
static inline void GetModePixelList(PixelList *pixel_list,Quantum *pixel)
{
SkipList
*p;
size_t
color,
max_count,
mode;
ssize_t
count;
/*
Make each pixel the 'predominant color' of the specified neighborhood.
*/
p=(&pixel_list->skip_list);
color=65536L;
mode=color;
max_count=p->nodes[mode].count;
count=0;
do
{
color=p->nodes[color].next[0];
if (p->nodes[color].count > max_count)
{
mode=color;
max_count=p->nodes[mode].count;
}
count+=p->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
*pixel=ScaleShortToQuantum((unsigned short) mode);
}
static inline void GetNonpeakPixelList(PixelList *pixel_list,Quantum *pixel)
{
SkipList
*p;
size_t
color,
next,
previous;
ssize_t
count;
/*
Finds the non peak value for each of the colors.
*/
p=(&pixel_list->skip_list);
color=65536L;
next=p->nodes[color].next[0];
count=0;
do
{
previous=color;
color=next;
next=p->nodes[color].next[0];
count+=p->nodes[color].count;
} while (count <= (ssize_t) (pixel_list->length >> 1));
if ((previous == 65536UL) && (next != 65536UL))
color=next;
else
if ((previous != 65536UL) && (next == 65536UL))
color=previous;
*pixel=ScaleShortToQuantum((unsigned short) color);
}
static inline void InsertPixelList(const Quantum pixel,PixelList *pixel_list)
{
size_t
signature;
unsigned short
index;
index=ScaleQuantumToShort(pixel);
signature=pixel_list->skip_list.nodes[index].signature;
if (signature == pixel_list->signature)
{
pixel_list->skip_list.nodes[index].count++;
return;
}
AddNodePixelList(pixel_list,index);
}
static void ResetPixelList(PixelList *pixel_list)
{
int
level;
SkipNode
*root;
SkipList
*p;
/*
Reset the skip-list.
*/
p=(&pixel_list->skip_list);
root=p->nodes+65536UL;
p->level=0;
for (level=0; level < 9; level++)
root->next[level]=65536UL;
pixel_list->seed=pixel_list->signature++;
}
MagickExport Image *StatisticImage(const Image *image,const StatisticType type,
const size_t width,const size_t height,ExceptionInfo *exception)
{
#define StatisticImageTag "Statistic/Image"
CacheView
*image_view,
*statistic_view;
Image
*statistic_image;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelList
**magick_restrict pixel_list;
ssize_t
center,
y;
/*
Initialize statistics image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
statistic_image=CloneImage(image,0,0,MagickTrue,
exception);
if (statistic_image == (Image *) NULL)
return((Image *) NULL);
status=SetImageStorageClass(statistic_image,DirectClass,exception);
if (status == MagickFalse)
{
statistic_image=DestroyImage(statistic_image);
return((Image *) NULL);
}
pixel_list=AcquirePixelListThreadSet(MagickMax(width,1),MagickMax(height,1));
if (pixel_list == (PixelList **) NULL)
{
statistic_image=DestroyImage(statistic_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
/*
Make each pixel the min / max / median / mode / etc. of the neighborhood.
*/
center=(ssize_t) GetPixelChannels(image)*(image->columns+MagickMax(width,1))*
(MagickMax(height,1)/2L)+GetPixelChannels(image)*(MagickMax(width,1)/2L);
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
statistic_view=AcquireAuthenticCacheView(statistic_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,statistic_image,statistic_image->rows,1)
#endif
for (y=0; y < (ssize_t) statistic_image->rows; y++)
{
const int
id = GetOpenMPThreadId();
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-((ssize_t) MagickMax(width,1)/2L),y-
(ssize_t) (MagickMax(height,1)/2L),image->columns+MagickMax(width,1),
MagickMax(height,1),exception);
q=QueueCacheViewAuthenticPixels(statistic_view,0,y,statistic_image->columns, 1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) statistic_image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
area,
maximum,
minimum,
sum,
sum_squared;
Quantum
pixel;
const Quantum
*magick_restrict pixels;
ssize_t
u;
ssize_t
v;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait statistic_traits=GetPixelChannelTraits(statistic_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(statistic_traits == UndefinedPixelTrait))
continue;
if (((statistic_traits & CopyPixelTrait) != 0) ||
(GetPixelWriteMask(image,p) <= (QuantumRange/2)))
{
SetPixelChannel(statistic_image,channel,p[center+i],q);
continue;
}
if ((statistic_traits & UpdatePixelTrait) == 0)
continue;
pixels=p;
area=0.0;
minimum=pixels[i];
maximum=pixels[i];
sum=0.0;
sum_squared=0.0;
ResetPixelList(pixel_list[id]);
for (v=0; v < (ssize_t) MagickMax(height,1); v++)
{
for (u=0; u < (ssize_t) MagickMax(width,1); u++)
{
if ((type == MedianStatistic) || (type == ModeStatistic) ||
(type == NonpeakStatistic))
{
InsertPixelList(pixels[i],pixel_list[id]);
pixels+=GetPixelChannels(image);
continue;
}
area++;
if (pixels[i] < minimum)
minimum=(double) pixels[i];
if (pixels[i] > maximum)
maximum=(double) pixels[i];
sum+=(double) pixels[i];
sum_squared+=(double) pixels[i]*pixels[i];
pixels+=GetPixelChannels(image);
}
pixels+=GetPixelChannels(image)*image->columns;
}
switch (type)
{
case ContrastStatistic:
{
pixel=ClampToQuantum(MagickAbsoluteValue((maximum-minimum)*
PerceptibleReciprocal(maximum+minimum)));
break;
}
case GradientStatistic:
{
pixel=ClampToQuantum(MagickAbsoluteValue(maximum-minimum));
break;
}
case MaximumStatistic:
{
pixel=ClampToQuantum(maximum);
break;
}
case MeanStatistic:
default:
{
pixel=ClampToQuantum(sum/area);
break;
}
case MedianStatistic:
{
GetMedianPixelList(pixel_list[id],&pixel);
break;
}
case MinimumStatistic:
{
pixel=ClampToQuantum(minimum);
break;
}
case ModeStatistic:
{
GetModePixelList(pixel_list[id],&pixel);
break;
}
case NonpeakStatistic:
{
GetNonpeakPixelList(pixel_list[id],&pixel);
break;
}
case RootMeanSquareStatistic:
{
pixel=ClampToQuantum(sqrt(sum_squared/area));
break;
}
case StandardDeviationStatistic:
{
pixel=ClampToQuantum(sqrt(sum_squared/area-(sum/area*sum/area)));
break;
}
}
SetPixelChannel(statistic_image,channel,pixel,q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(statistic_image);
}
if (SyncCacheViewAuthenticPixels(statistic_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,StatisticImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
statistic_view=DestroyCacheView(statistic_view);
image_view=DestroyCacheView(image_view);
pixel_list=DestroyPixelListThreadSet(pixel_list);
if (status == MagickFalse)
statistic_image=DestroyImage(statistic_image);
return(statistic_image);
}
|
lagrange_dual.h | /*****************************************************************************/
// Copyright (c) 2020-2021 Yuji KOGUMA
// Released under the MIT license
// https://opensource.org/licenses/mit-license.php
/*****************************************************************************/
#ifndef PRINTEMPS_SOLVER_LAGRANGE_DUAL_LAGRANGE_DUAL_H__
#define PRINTEMPS_SOLVER_LAGRANGE_DUAL_LAGRANGE_DUAL_H__
#include "lagrange_dual_print.h"
#include "lagrange_dual_termination_status.h"
#include "lagrange_dual_result.h"
namespace printemps {
namespace solver {
namespace lagrange_dual {
/*****************************************************************************/
template <class T_Variable, class T_Expression>
void bound_dual(
model::Model<T_Variable, T_Expression>* a_model_ptr,
std::vector<multi_array::ValueProxy<double>>* a_dual_value_proxies) {
for (auto&& proxy : a_model_ptr->constraint_proxies()) {
for (auto&& constraint : proxy.flat_indexed_constraints()) {
int proxy_index = constraint.proxy_index();
int flat_index = constraint.flat_index();
auto& lagrange_multiplier =
(*a_dual_value_proxies)[proxy_index].flat_indexed_values(
flat_index);
switch (constraint.sense()) {
case model_component::ConstraintSense::Less: {
lagrange_multiplier = std::max(lagrange_multiplier, 0.0);
break;
}
case model_component::ConstraintSense::Greater: {
lagrange_multiplier = std::min(lagrange_multiplier, 0.0);
break;
}
case model_component::ConstraintSense::Equal: {
/// nothing to do
break;
}
default: {
/// nothing to do
}
}
}
}
}
/*****************************************************************************/
template <class T_Variable, class T_Expression>
LagrangeDualResult<T_Variable, T_Expression> solve(
model::Model<T_Variable, T_Expression>* a_model_ptr, //
const option::Option& a_OPTION, //
const std::vector<multi_array::ValueProxy<T_Variable>>& //
a_INITIAL_VARIABLE_VALUE_PROXIES, //
const solution::IncumbentHolder<T_Variable, T_Expression>& //
a_INCUMBENT_HOLDER) {
/**
* Define type aliases.
*/
using Model_T = model::Model<T_Variable, T_Expression>;
using Result_T = LagrangeDualResult<T_Variable, T_Expression>;
using IncumbentHolder_T =
solution::IncumbentHolder<T_Variable, T_Expression>;
/**
* Start to measure computational time.
*/
utility::TimeKeeper time_keeper;
/**
* Copy arguments as local variables.
*/
Model_T* model_ptr = a_model_ptr;
option::Option option = a_OPTION;
IncumbentHolder_T incumbent_holder = a_INCUMBENT_HOLDER;
/**
* Reset the local augmented incumbent.
*/
incumbent_holder.reset_local_augmented_incumbent();
model_ptr->import_variable_values(a_INITIAL_VARIABLE_VALUE_PROXIES);
model_ptr->update();
/**
* Initialize the solution and update the model.
*/
solution::SolutionScore solution_score = model_ptr->evaluate({});
int update_status =
incumbent_holder.try_update_incumbent(model_ptr, solution_score);
int total_update_status = 0;
/**
* Prepare the primal solution.
*/
auto primal_incumbent = model_ptr->export_solution();
/**
* Prepare the dual solution as lagrange multipliers.
*/
std::vector<multi_array::ValueProxy<double>> dual_value_proxies =
model_ptr->generate_constraint_parameter_proxies(0.0);
bound_dual(model_ptr, &dual_value_proxies);
auto dual_value_proxies_incumbent = dual_value_proxies;
/**
* Prepare the lagrangian incumbent and its queue.
*/
double lagrangian_incumbent = -HUGE_VALF;
utility::FixedSizeQueue<double> queue(option.lagrange_dual.queue_size);
/**
* Prepare the step size for subgradient algorithm.
*/
double step_size = 1.0 / model_ptr->number_of_variables();
/**
* Prepare feasible solutions holder.
*/
std::vector<solution::SparseSolution<T_Variable, T_Expression>>
feasible_solutions;
/**
* Prepare other local variables.
*/
LagrangeDualTerminationStatus termination_status =
LagrangeDualTerminationStatus::ITERATION_OVER;
/**
* Print the header of optimization progress table and print the
* initial solution status.
*/
utility::print_single_line(option.verbose >= option::verbose::Full);
utility::print_message("Lagrange dual starts.",
option.verbose >= option::verbose::Full);
print_table_header(option.verbose >= option::verbose::Full);
print_table_initial(model_ptr, //
-HUGE_VALF, //
step_size, //
solution_score, //
incumbent_holder, //
option.verbose >= option::verbose::Full);
/**
* Iterations start.
*/
int iteration = 0;
auto variable_ptrs = model_ptr->variable_reference().variable_ptrs;
auto constraint_ptrs = model_ptr->constraint_reference().constraint_ptrs;
while (true) {
/**
* Check the terminating condition.
*/
double elapsed_time = time_keeper.clock();
if (elapsed_time > option.lagrange_dual.time_max) {
termination_status = LagrangeDualTerminationStatus::TIME_OVER;
break;
}
if (elapsed_time + option.lagrange_dual.time_offset > option.time_max) {
termination_status = LagrangeDualTerminationStatus::TIME_OVER;
break;
}
if (iteration >= option.lagrange_dual.iteration_max) {
termination_status = LagrangeDualTerminationStatus::ITERATION_OVER;
break;
}
if (incumbent_holder.feasible_incumbent_objective() <=
option.target_objective_value) {
termination_status = LagrangeDualTerminationStatus::REACH_TARGET;
break;
}
/**
* Update the dual solution.
*/
const int CONSTRAINTS_SIZE = constraint_ptrs.size();
#ifdef _OPENMP
#pragma omp parallel for if (option.is_enabled_parallel_evaluation) \
schedule(static)
#endif
for (auto i = 0; i < CONSTRAINTS_SIZE; i++) {
double constraint_value = constraint_ptrs[i]->constraint_value();
int proxy_index = constraint_ptrs[i]->proxy_index();
int flat_index = constraint_ptrs[i]->flat_index();
dual_value_proxies[proxy_index].flat_indexed_values(flat_index) +=
step_size * constraint_value;
}
/**
* Bound the values of dual solution.
*/
bound_dual(model_ptr, &dual_value_proxies);
/**
* Update the primal optimal solution so that it minimizes lagrangian
* for the updated dual solution.
*/
const int VARIABLES_SIZE = variable_ptrs.size();
#ifdef _OPENMP
#pragma omp parallel for if (option.is_enabled_parallel_evaluation) \
schedule(static)
#endif
for (auto i = 0; i < VARIABLES_SIZE; i++) {
if (variable_ptrs[i]->is_fixed()) {
continue;
}
double coefficient = variable_ptrs[i]->objective_sensitivity();
for (auto&& item : variable_ptrs[i]->constraint_sensitivities()) {
const auto& constraint_ptr = item.first;
double sensitivity = item.second;
int proxy_index = constraint_ptr->proxy_index();
int flat_index = constraint_ptr->flat_index();
coefficient +=
dual_value_proxies[proxy_index].flat_indexed_values(
flat_index) *
sensitivity * model_ptr->sign();
}
if (coefficient > 0) {
if (model_ptr->is_minimization()) {
variable_ptrs[i]->set_value_if_mutable(
variable_ptrs[i]->lower_bound());
} else {
variable_ptrs[i]->set_value_if_mutable(
variable_ptrs[i]->upper_bound());
}
} else {
if (model_ptr->is_minimization()) {
variable_ptrs[i]->set_value_if_mutable(
variable_ptrs[i]->upper_bound());
} else {
variable_ptrs[i]->set_value_if_mutable(
variable_ptrs[i]->lower_bound());
}
}
}
/**
* Update the model.
*/
model_ptr->update();
solution_score = model_ptr->evaluate({});
update_status =
incumbent_holder.try_update_incumbent(model_ptr, solution_score);
total_update_status = update_status || total_update_status;
/**
* Store the current feasible solution.
*/
if (solution_score.is_feasible) {
feasible_solutions.push_back(model_ptr->export_plain_solution());
}
/**
* Compute the lagrangian value.
*/
double lagrangian = model_ptr->compute_lagrangian(dual_value_proxies) *
model_ptr->sign();
/**
* Update the lagrangian incumbent.
*/
if (lagrangian > lagrangian_incumbent) {
lagrangian_incumbent = lagrangian;
auto primal_incumbent = model_ptr->export_solution();
dual_value_proxies_incumbent = dual_value_proxies;
}
/**
* Update the lagrangian queue.
*/
queue.push(lagrangian);
double queue_average = queue.average();
double queue_max = queue.max();
/**
* Adjust the step size.
*/
if (queue.size() > 0) {
if (lagrangian > queue_average) {
step_size *= option.lagrange_dual.step_size_extend_rate;
}
if (lagrangian < queue_max) {
step_size *= option.lagrange_dual.step_size_reduce_rate;
}
}
/**
* Print the optimization progress.
*/
if (iteration % std::max(option.lagrange_dual.log_interval, 1) == 0 ||
update_status > 1) {
print_table_body(model_ptr, //
iteration, //
lagrangian, //
step_size, //
solution_score, //
update_status, //
incumbent_holder, //
option.verbose >= option::verbose::Full);
}
/**
* Terminate the loop if lagrangian converges.
*/
if (queue.size() == option.lagrange_dual.queue_size &&
fabs(lagrangian - queue_average) <
std::max(1.0, fabs(queue_average)) *
option.lagrange_dual.tolerance) {
termination_status = LagrangeDualTerminationStatus::CONVERGE;
break;
}
iteration++;
}
/**
* Print the footer of the optimization progress table.
*/
print_table_footer(option.verbose >= option::verbose::Full);
/**
* Prepare the result.
*/
Result_T result;
result.lagrangian = lagrangian_incumbent;
result.primal_solution = primal_incumbent;
result.dual_value_proxies = dual_value_proxies_incumbent;
result.incumbent_holder = incumbent_holder;
result.total_update_status = total_update_status;
result.number_of_iterations = iteration;
result.termination_status = termination_status;
result.feasible_solutions = feasible_solutions;
return result;
}
} // namespace lagrange_dual
} // namespace solver
} // namespace printemps
#endif
/*****************************************************************************/
// END
/*****************************************************************************/ |
BsplineFunctor.h | //////////////////////////////////////////////////////////////////////////////////////
// This file is distributed under the University of Illinois/NCSA Open Source License.
// See LICENSE file in top directory for details.
//
// Copyright (c) 2016 Jeongnim Kim and QMCPACK developers.
//
// File developed by: John R. Gergely, University of Illinois at Urbana-Champaign
// Ken Esler, kpesler@gmail.com, University of Illinois at Urbana-Champaign
// Miguel Morales, moralessilva2@llnl.gov, Lawrence Livermore National Laboratory
// Raymond Clay III, j.k.rofling@gmail.com, Lawrence Livermore National Laboratory
// Jeremy McMinnis, jmcminis@gmail.com, University of Illinois at Urbana-Champaign
// Jeongnim Kim, jeongnim.kim@gmail.com, University of Illinois at Urbana-Champaign
// Jaron T. Krogel, krogeljt@ornl.gov, Oak Ridge National Laboratory
// Mark A. Berrill, berrillma@ornl.gov, Oak Ridge National Laboratory
// Amrita Mathuriya, amrita.mathuriya@intel.com, Intel Corp.
//
// File created by: Ken Esler, kpesler@gmail.com, University of Illinois at Urbana-Champaign
//////////////////////////////////////////////////////////////////////////////////////
#ifndef QMCPLUSPLUS_BSPLINE_FUNCTOR_H
#define QMCPLUSPLUS_BSPLINE_FUNCTOR_H
#include "Numerics/OptimizableFunctorBase.h"
#include "Utilities/ProgressReportEngine.h"
#include "OhmmsData/AttributeSet.h"
#include "Numerics/LinearFit.h"
#include "simd/allocator.hpp"
#include <cstdio>
namespace qmcplusplus
{
template<class T>
struct BsplineFunctor: public OptimizableFunctorBase
{
typedef real_type value_type;
int NumParams;
int Dummy;
const real_type A[16], dA[16], d2A[16], d3A[16];
aligned_vector<real_type> SplineCoefs;
//static const real_type A[16], dA[16], d2A[16];
real_type DeltaR, DeltaRInv;
real_type CuspValue;
real_type Y, dY, d2Y;
// Stores the derivatives w.r.t. SplineCoefs
// of the u, du/dr, and d2u/dr2
std::vector<TinyVector<real_type,3> > SplineDerivs;
std::vector<real_type> Parameters;
std::vector<std::string> ParameterNames;
std::string elementType, pairType;
std::string fileName;
int ResetCount;
bool notOpt;
bool periodic;
///constructor
BsplineFunctor(real_type cusp=0.0) :
NumParams(0),
A{-1.0/6.0, 3.0/6.0, -3.0/6.0, 1.0/6.0,
3.0/6.0, -6.0/6.0, 0.0/6.0, 4.0/6.0,
-3.0/6.0, 3.0/6.0, 3.0/6.0, 1.0/6.0,
1.0/6.0, 0.0/6.0, 0.0/6.0, 0.0/6.0},
dA{0.0, -0.5, 1.0, -0.5,
0.0, 1.5, -2.0, 0.0,
0.0, -1.5, 1.0, 0.5,
0.0, 0.5, 0.0, 0.0},
d2A{0.0, 0.0, -1.0, 1.0,
0.0, 0.0, 3.0, -2.0,
0.0, 0.0, -3.0, 1.0,
0.0, 0.0, 1.0, 0.0},
d3A{0.0, 0.0, 0.0, -1.0,
0.0, 0.0, 0.0, 3.0,
0.0, 0.0, 0.0, -3.0,
0.0, 0.0, 0.0, 1.0},
CuspValue(cusp), ResetCount(0), notOpt(false), periodic(true)
{
cutoff_radius = 0.0;
}
OptimizableFunctorBase* makeClone() const
{
return new BsplineFunctor(*this);
}
void setCusp(real_type c)
{
CuspValue = c;
}
void setPeriodic(bool p)
{
periodic = p;
}
void resize(int n)
{
NumParams = n;
int numCoefs = NumParams + 4;
int numKnots = numCoefs - 2;
DeltaR = cutoff_radius / (real_type)(numKnots - 1);
DeltaRInv = 1.0/DeltaR;
Parameters.resize(n);
SplineCoefs.resize(numCoefs);
SplineDerivs.resize(numCoefs);
}
void reset()
{
int numCoefs = NumParams + 4;
int numKnots = numCoefs - 2;
DeltaR = cutoff_radius / (real_type)(numKnots - 1);
DeltaRInv = 1.0/DeltaR;
for (int i=0; i<SplineCoefs.size(); i++)
SplineCoefs[i] = 0.0;
// Ensure that cusp conditions is satisfied at the origin
SplineCoefs[1] = Parameters[0];
SplineCoefs[2] = Parameters[1];
SplineCoefs[0] = Parameters[1] - 2.0*DeltaR * CuspValue;
for (int i=2; i<Parameters.size(); i++)
SplineCoefs[i+1] = Parameters[i];
}
/** compute value, gradient and laplacian for [iStart, iEnd) pairs
* @param iat dummy
* @param iStart starting particle index
* @param iEnd ending particle index
* @param _distArray distance arrUay
* @param _valArray u(r_j) for j=[iStart,iEnd)
* @param _gradArray du(r_j)/dr /r_j for j=[iStart,iEnd)
* @param _lapArray d2u(r_j)/dr2 for j=[iStart,iEnd)
* @param distArrayCompressed temp storage to filter r_j < cutoff_radius
* @param distIndices temp storage for the compressed index
*/
void evaluateVGL(const int iat, const int iStart, const int iEnd,
const T* _distArray,
T* restrict _valArray,
T* restrict _gradArray,
T* restrict _laplArray,
T* restrict distArrayCompressed, int* restrict distIndices ) const;
/** evaluate sum of the pair potentials for [iStart,iEnd)
* @param iat dummy
* @param iStart starting particle index
* @param iEnd ending particle index
* @param _distArray distance arrUay
* @param distArrayCompressed temp storage to filter r_j < cutoff_radius
* @return \f$\sum u(r_j)\f$ for r_j < cutoff_radius
*/
T evaluateV(const int iat, const int iStart, const int iEnd,
const T* restrict _distArray,
T* restrict distArrayCompressed) const;
inline real_type evaluate(real_type r)
{
if (r >= cutoff_radius)
return 0.0;
r *= DeltaRInv;
real_type ipart, t;
t = std::modf(r, &ipart);
int i = (int) ipart;
real_type tp[4];
tp[0] = t*t*t;
tp[1] = t*t;
tp[2] = t;
tp[3] = 1.0;
return
(SplineCoefs[i+0]*(A[ 0]*tp[0] + A[ 1]*tp[1] + A[ 2]*tp[2] + A[ 3]*tp[3])+
SplineCoefs[i+1]*(A[ 4]*tp[0] + A[ 5]*tp[1] + A[ 6]*tp[2] + A[ 7]*tp[3])+
SplineCoefs[i+2]*(A[ 8]*tp[0] + A[ 9]*tp[1] + A[10]*tp[2] + A[11]*tp[3])+
SplineCoefs[i+3]*(A[12]*tp[0] + A[13]*tp[1] + A[14]*tp[2] + A[15]*tp[3]));
}
inline real_type evaluate(real_type r, real_type rinv)
{
return Y=evaluate(r,dY,d2Y);
}
inline void evaluateAll(real_type r, real_type rinv)
{
Y=evaluate(r,dY,d2Y);
}
inline real_type
evaluate(real_type r, real_type& dudr, real_type& d2udr2)
{
if (r >= cutoff_radius)
{
dudr = d2udr2 = 0.0;
return 0.0;
}
// real_type eps = 1.0e-5;
// real_type dudr_FD = (evaluate(r+eps)-evaluate(r-eps))/(2.0*eps);
// real_type d2udr2_FD = (evaluate(r+eps)+evaluate(r-eps)-2.0*evaluate(r))/(eps*eps);
r *= DeltaRInv;
real_type ipart, t;
t = std::modf(r, &ipart);
int i = (int) ipart;
real_type tp[4];
tp[0] = t*t*t;
tp[1] = t*t;
tp[2] = t;
tp[3] = 1.0;
d2udr2 = DeltaRInv * DeltaRInv *
(SplineCoefs[i+0]*(d2A[ 0]*tp[0] + d2A[ 1]*tp[1] + d2A[ 2]*tp[2] + d2A[ 3]*tp[3])+
SplineCoefs[i+1]*(d2A[ 4]*tp[0] + d2A[ 5]*tp[1] + d2A[ 6]*tp[2] + d2A[ 7]*tp[3])+
SplineCoefs[i+2]*(d2A[ 8]*tp[0] + d2A[ 9]*tp[1] + d2A[10]*tp[2] + d2A[11]*tp[3])+
SplineCoefs[i+3]*(d2A[12]*tp[0] + d2A[13]*tp[1] + d2A[14]*tp[2] + d2A[15]*tp[3]));
dudr = DeltaRInv *
(SplineCoefs[i+0]*(dA[ 0]*tp[0] + dA[ 1]*tp[1] + dA[ 2]*tp[2] + dA[ 3]*tp[3])+
SplineCoefs[i+1]*(dA[ 4]*tp[0] + dA[ 5]*tp[1] + dA[ 6]*tp[2] + dA[ 7]*tp[3])+
SplineCoefs[i+2]*(dA[ 8]*tp[0] + dA[ 9]*tp[1] + dA[10]*tp[2] + dA[11]*tp[3])+
SplineCoefs[i+3]*(dA[12]*tp[0] + dA[13]*tp[1] + dA[14]*tp[2] + dA[15]*tp[3]));
// if (std::abs(dudr_FD-dudr) > 1.0e-8)
// std::cerr << "Error in BsplineFunction: dudr = " << dudr
// << " dudr_FD = " << dudr_FD << std::endl;
// if (std::abs(d2udr2_FD-d2udr2) > 1.0e-4)
// std::cerr << "Error in BsplineFunction: r = " << r << " d2udr2 = " << dudr
// << " d2udr2_FD = " << d2udr2_FD << " rcut = " << cutoff_radius << std::endl;
return
(SplineCoefs[i+0]*(A[ 0]*tp[0] + A[ 1]*tp[1] + A[ 2]*tp[2] + A[ 3]*tp[3])+
SplineCoefs[i+1]*(A[ 4]*tp[0] + A[ 5]*tp[1] + A[ 6]*tp[2] + A[ 7]*tp[3])+
SplineCoefs[i+2]*(A[ 8]*tp[0] + A[ 9]*tp[1] + A[10]*tp[2] + A[11]*tp[3])+
SplineCoefs[i+3]*(A[12]*tp[0] + A[13]*tp[1] + A[14]*tp[2] + A[15]*tp[3]));
}
inline real_type
evaluate(real_type r, real_type& dudr, real_type& d2udr2, real_type &d3udr3)
{
if (r >= cutoff_radius)
{
dudr = d2udr2 = d3udr3 = 0.0;
return 0.0;
}
// real_type eps = 1.0e-5;
// real_type dudr_FD = (evaluate(r+eps)-evaluate(r-eps))/(2.0*eps);
// real_type d2udr2_FD = (evaluate(r+eps)+evaluate(r-eps)-2.0*evaluate(r))/(eps*eps);
// real_type d3udr3_FD = (-1.0*evaluate(r+1.0*eps)
// +2.0*evaluate(r+0.5*eps)
// -2.0*evaluate(r-0.5*eps)
// +1.0*evaluate(r-1.0*eps))/(eps*eps*eps);
r *= DeltaRInv;
real_type ipart, t;
t = std::modf(r, &ipart);
int i = (int) ipart;
real_type tp[4];
tp[0] = t*t*t;
tp[1] = t*t;
tp[2] = t;
tp[3] = 1.0;
d3udr3 = DeltaRInv * DeltaRInv * DeltaRInv *
(SplineCoefs[i+0]*(d3A[ 0]*tp[0] + d3A[ 1]*tp[1] + d3A[ 2]*tp[2] + d3A[ 3]*tp[3])+
SplineCoefs[i+1]*(d3A[ 4]*tp[0] + d3A[ 5]*tp[1] + d3A[ 6]*tp[2] + d3A[ 7]*tp[3])+
SplineCoefs[i+2]*(d3A[ 8]*tp[0] + d3A[ 9]*tp[1] + d3A[10]*tp[2] + d3A[11]*tp[3])+
SplineCoefs[i+3]*(d3A[12]*tp[0] + d3A[13]*tp[1] + d3A[14]*tp[2] + d3A[15]*tp[3]));
d2udr2 = DeltaRInv * DeltaRInv *
(SplineCoefs[i+0]*(d2A[ 0]*tp[0] + d2A[ 1]*tp[1] + d2A[ 2]*tp[2] + d2A[ 3]*tp[3])+
SplineCoefs[i+1]*(d2A[ 4]*tp[0] + d2A[ 5]*tp[1] + d2A[ 6]*tp[2] + d2A[ 7]*tp[3])+
SplineCoefs[i+2]*(d2A[ 8]*tp[0] + d2A[ 9]*tp[1] + d2A[10]*tp[2] + d2A[11]*tp[3])+
SplineCoefs[i+3]*(d2A[12]*tp[0] + d2A[13]*tp[1] + d2A[14]*tp[2] + d2A[15]*tp[3]));
dudr = DeltaRInv *
(SplineCoefs[i+0]*(dA[ 0]*tp[0] + dA[ 1]*tp[1] + dA[ 2]*tp[2] + dA[ 3]*tp[3])+
SplineCoefs[i+1]*(dA[ 4]*tp[0] + dA[ 5]*tp[1] + dA[ 6]*tp[2] + dA[ 7]*tp[3])+
SplineCoefs[i+2]*(dA[ 8]*tp[0] + dA[ 9]*tp[1] + dA[10]*tp[2] + dA[11]*tp[3])+
SplineCoefs[i+3]*(dA[12]*tp[0] + dA[13]*tp[1] + dA[14]*tp[2] + dA[15]*tp[3]));
// if (std::abs(dudr_FD-dudr) > 1.0e-8)
// std::cerr << "Error in BsplineFunction: dudr = " << dudr
// << " dudr_FD = " << dudr_FD << std::endl;
// if (std::abs(d2udr2_FD-d2udr2) > 1.0e-4)
// std::cerr << "Error in BsplineFunction: r = " << r << " d2udr2 = " << dudr
// << " d2udr2_FD = " << d2udr2_FD << " rcut = " << cutoff_radius << std::endl;
// if (std::abs(d3udr3_FD-d3udr3) > 1.0e-4)
// std::cerr << "Error in BsplineFunction: r = " << r << " d3udr3 = " << dudr
// << " d3udr3_FD = " << d3udr3_FD << " rcut = " << cutoff_radius << std::endl;
return
(SplineCoefs[i+0]*(A[ 0]*tp[0] + A[ 1]*tp[1] + A[ 2]*tp[2] + A[ 3]*tp[3])+
SplineCoefs[i+1]*(A[ 4]*tp[0] + A[ 5]*tp[1] + A[ 6]*tp[2] + A[ 7]*tp[3])+
SplineCoefs[i+2]*(A[ 8]*tp[0] + A[ 9]*tp[1] + A[10]*tp[2] + A[11]*tp[3])+
SplineCoefs[i+3]*(A[12]*tp[0] + A[13]*tp[1] + A[14]*tp[2] + A[15]*tp[3]));
}
inline bool
evaluateDerivatives(real_type r, std::vector<TinyVector<real_type,3> >& derivs)
{
if (r >= cutoff_radius)
return false;
r *= DeltaRInv;
real_type ipart, t;
t = std::modf(r, &ipart);
int i = (int) ipart;
real_type tp[4];
tp[0] = t*t*t;
tp[1] = t*t;
tp[2] = t;
tp[3] = 1.0;
SplineDerivs[0] = TinyVector<real_type,3>(0.0);
// d/dp_i u(r)
SplineDerivs[i+0][0] = A[ 0]*tp[0] + A[ 1]*tp[1] + A[ 2]*tp[2] + A[ 3]*tp[3];
SplineDerivs[i+1][0] = A[ 4]*tp[0] + A[ 5]*tp[1] + A[ 6]*tp[2] + A[ 7]*tp[3];
SplineDerivs[i+2][0] = A[ 8]*tp[0] + A[ 9]*tp[1] + A[10]*tp[2] + A[11]*tp[3];
SplineDerivs[i+3][0] = A[12]*tp[0] + A[13]*tp[1] + A[14]*tp[2] + A[15]*tp[3];
// d/dp_i du/dr
SplineDerivs[i+0][1] = DeltaRInv * (dA[ 1]*tp[1] + dA[ 2]*tp[2] + dA[ 3]*tp[3]);
SplineDerivs[i+1][1] = DeltaRInv * (dA[ 5]*tp[1] + dA[ 6]*tp[2] + dA[ 7]*tp[3]);
SplineDerivs[i+2][1] = DeltaRInv * (dA[ 9]*tp[1] + dA[10]*tp[2] + dA[11]*tp[3]);
SplineDerivs[i+3][1] = DeltaRInv * (dA[13]*tp[1] + dA[14]*tp[2] + dA[15]*tp[3]);
// d/dp_i d2u/dr2
SplineDerivs[i+0][2] = DeltaRInv * DeltaRInv * (d2A[ 2]*tp[2] + d2A[ 3]*tp[3]);
SplineDerivs[i+1][2] = DeltaRInv * DeltaRInv * (d2A[ 6]*tp[2] + d2A[ 7]*tp[3]);
SplineDerivs[i+2][2] = DeltaRInv * DeltaRInv * (d2A[10]*tp[2] + d2A[11]*tp[3]);
SplineDerivs[i+3][2] = DeltaRInv * DeltaRInv * (d2A[14]*tp[2] + d2A[15]*tp[3]);
int imin=std::max(i,1);
int imax=std::min(i+4,NumParams+1);
for (int n=imin; n<imax; ++n)
derivs[n-1] = SplineDerivs[n];
derivs[1]+=SplineDerivs[0];
//real_type v[4],dv[4],d2v[4];
//v[0] = A[ 0]*tp[0] + A[ 1]*tp[1] + A[ 2]*tp[2] + A[ 3]*tp[3];
//v[1] = A[ 4]*tp[0] + A[ 5]*tp[1] + A[ 6]*tp[2] + A[ 7]*tp[3];
//v[2] = A[ 8]*tp[0] + A[ 9]*tp[1] + A[10]*tp[2] + A[11]*tp[3];
//v[3] = A[12]*tp[0] + A[13]*tp[1] + A[14]*tp[2] + A[15]*tp[3];
//// d/dp_i du/dr
//dv[0] = DeltaRInv * (dA[ 1]*tp[1] + dA[ 2]*tp[2] + dA[ 3]*tp[3]);
//dv[1] = DeltaRInv * (dA[ 5]*tp[1] + dA[ 6]*tp[2] + dA[ 7]*tp[3]);
//dv[2] = DeltaRInv * (dA[ 9]*tp[1] + dA[10]*tp[2] + dA[11]*tp[3]);
//dv[3] = DeltaRInv * (dA[13]*tp[1] + dA[14]*tp[2] + dA[15]*tp[3]);
//// d/dp_i d2u/dr2
//d2v[0] = DeltaRInv * DeltaRInv * (d2A[ 2]*tp[2] + d2A[ 3]*tp[3]);
//d2v[1] = DeltaRInv * DeltaRInv * (d2A[ 6]*tp[2] + d2A[ 7]*tp[3]);
//d2v[2] = DeltaRInv * DeltaRInv * (d2A[10]*tp[2] + d2A[11]*tp[3]);
//d2v[3] = DeltaRInv * DeltaRInv * (d2A[14]*tp[2] + d2A[15]*tp[3]);
//int imin=std::max(i,1);
//int imax=std::min(i+4,NumParams+1)-1;
//int n=imin-1, j=imin-i;
//while(n<imax && j<4)
//{
// derivs[n] = TinyVector<real_type,3>(v[j],dv[j],d2v[j]);
// n++; j++;
//}
//if(i==0) derivs[1]+= TinyVector<real_type,3>(v[0],dv[0],d2v[0]);
return true;
}
inline bool evaluateDerivatives(real_type r, std::vector<real_type>& derivs)
{
if (r >= cutoff_radius) return false;
real_type tp[4],v[4],ipart,t;
t = std::modf(r*DeltaRInv, &ipart);
tp[0] = t*t*t;
tp[1] = t*t;
tp[2] = t;
tp[3] = 1.0;
v[0] = A[ 0]*tp[0] + A[ 1]*tp[1] + A[ 2]*tp[2] + A[ 3]*tp[3];
v[1] = A[ 4]*tp[0] + A[ 5]*tp[1] + A[ 6]*tp[2] + A[ 7]*tp[3];
v[2] = A[ 8]*tp[0] + A[ 9]*tp[1] + A[10]*tp[2] + A[11]*tp[3];
v[3] = A[12]*tp[0] + A[13]*tp[1] + A[14]*tp[2] + A[15]*tp[3];
int i = (int) ipart;
int imin=std::max(i,1);
int imax=std::min(i+4,NumParams+1)-1;
int n=imin-1, j=imin-i;
while(n<imax && j<4)
{
derivs[n] = v[j];
n++; j++;
}
if(i==0) derivs[1]+= v[0];
return true;
}
inline real_type f(real_type r)
{
if (r>=cutoff_radius)
return 0.0;
return evaluate(r);
}
inline real_type df(real_type r)
{
if (r>=cutoff_radius)
return 0.0;
real_type du, d2u;
evaluate(r, du, d2u);
return du;
}
bool put(xmlNodePtr cur)
{
ReportEngine PRE("BsplineFunctor","put(xmlNodePtr)");
//CuspValue = -1.0e10;
NumParams = 0;
//cutoff_radius = 0.0;
OhmmsAttributeSet rAttrib;
real_type radius = -1.0;
rAttrib.add(NumParams, "size");
rAttrib.add(radius, "rcut");
rAttrib.add(radius, "cutoff");
rAttrib.put(cur);
if (radius < 0.0)
if (periodic)
app_log() << " Jastrow cutoff unspecified. Setting to Wigner-Seitz radius = " << cutoff_radius << ".\n";
else
{
APP_ABORT(" Jastrow cutoff unspecified. Cutoff must be given when using open boundary conditions");
}
else
if (periodic && radius > cutoff_radius)
{
if (radius - cutoff_radius > 1e-4)
{
APP_ABORT( " The Jastrow cutoff specified should not be larger than Wigner-Seitz radius.");
}
else
{
app_log() << " The Jastrow cutoff specified is slightly larger than the Wigner-Seitz radius.";
app_log() << " Setting to Wigner-Seitz radius = " << cutoff_radius << ".\n";
}
}
else
cutoff_radius = radius;
if (NumParams == 0)
{
PRE.error("You must specify a positive number of parameters for the Bspline jastrow function.",true);
}
app_log() << " size = " << NumParams << " parameters " << std::endl;
app_log() << " cusp = " << CuspValue << std::endl;
app_log() << " rcut = " << cutoff_radius << std::endl;
resize(NumParams);
// Now read coefficents
xmlNodePtr xmlCoefs = cur->xmlChildrenNode;
while (xmlCoefs != NULL)
{
std::string cname((const char*)xmlCoefs->name);
if (cname == "coefficients")
{
std::string type("0"), id("0");
std::string optimize("yes");
OhmmsAttributeSet cAttrib;
cAttrib.add(id, "id");
cAttrib.add(type, "type");
cAttrib.add(optimize, "optimize");
cAttrib.put(xmlCoefs);
if (type != "Array")
{
PRE.error("Unknown correlation type " + type + " in BsplineFunctor." + "Resetting to \"Array\"");
xmlNewProp(xmlCoefs, (const xmlChar*) "type", (const xmlChar*) "Array");
}
std::vector<real_type> params;
putContent(params, xmlCoefs);
if (params.size() == NumParams)
Parameters = params;
else
{
app_log() << "Changing number of Bspline parameters from "
<< params.size() << " to " << NumParams << ". Performing fit:\n";
// Fit function to new number of parameters
const int numPoints = 500;
BsplineFunctor<T> tmp_func(CuspValue);
tmp_func.cutoff_radius = cutoff_radius;
tmp_func.resize(params.size());
tmp_func.Parameters = params;
tmp_func.reset();
std::vector<real_type> y(numPoints);
Matrix<real_type> basis(numPoints,NumParams);
std::vector<TinyVector<real_type,3> > derivs(NumParams);
for (int i=0; i<numPoints; i++)
{
real_type r = (real_type)i / (real_type)numPoints * cutoff_radius;
y[i] = tmp_func.evaluate(r);
evaluateDerivatives(r, derivs);
for (int j=0; j<NumParams; j++)
basis(i,j) = derivs[j][0];
}
resize(NumParams);
LinearFit(y, basis, Parameters);
app_log() << "New parameters are:\n";
for (int i=0; i < Parameters.size(); i++)
app_log() << " " << Parameters[i] << std::endl;
}
if(optimize == "yes")
{
notOpt=false;
}
else
{
notOpt=true;
}
for (int i=0; i< NumParams; i++)
{
std::stringstream sstr;
sstr << id << "_" << i;
myVars.insert(sstr.str(),Parameters[i],!notOpt,optimize::LOGLINEAR_P);
}
app_log() << "Parameter Name Value\n";
myVars.print(app_log());
}
xmlCoefs = xmlCoefs->next;
}
reset();
real_type zeros=0;
for (int i=0; i< NumParams; i++)
zeros+=Parameters[i]*Parameters[i];
return zeros>1.0e-12; //true if Parameters are not zero
}
void initialize(int numPoints, std::vector<real_type>& x, std::vector<real_type>& y
, real_type cusp, real_type rcut, std::string& id, std::string& optimize )
{
ReportEngine PRE("BsplineFunctor","initialize");
NumParams = numPoints;
cutoff_radius = rcut;
CuspValue = cusp;
if (NumParams == 0)
{
PRE.error("You must specify a positive number of parameters for the Bspline jastrow function.",true);
}
app_log() << "Initializing BsplineFunctor from array. \n";
app_log() << " size = " << NumParams << " parameters " << std::endl;
app_log() << " cusp = " << CuspValue << std::endl;
app_log() << " rcut = " << cutoff_radius << std::endl;
resize(NumParams);
int npts = x.size();
Matrix<real_type> basis(npts,NumParams);
std::vector<TinyVector<real_type,3> > derivs(NumParams);
for (int i=0; i<npts; i++)
{
real_type r = x[i];
if (r > cutoff_radius)
{
PRE.error("Error in BsplineFunctor::initialize: r > cutoff_radius.",true);
}
evaluateDerivatives(r, derivs);
for (int j=0; j<NumParams; j++)
basis(i,j) = derivs[j][0];
}
resize(NumParams);
LinearFit(y, basis, Parameters);
app_log() << "New parameters are:\n";
for (int i=0; i < Parameters.size(); i++)
app_log() << " " << Parameters[i] << std::endl;
#if QMC_BUILD_LEVEL < 5
if(optimize == "yes")
{
// Setup parameter names
for (int i=0; i< NumParams; i++)
{
std::stringstream sstr;
sstr << id << "_" << i;
myVars.insert(sstr.str(),Parameters[i],true,optimize::LOGLINEAR_P);
}
app_log() << "Parameter Name Value\n";
myVars.print(app_log());
}
else
#endif
{
notOpt=true;
app_log() << "Parameters of BsplineFunctor id:"
<<id <<" are not being optimized.\n";
}
reset();
}
void reportStatus(std::ostream& os)
{
if (notOpt)
return;
myVars.print(os);
}
void checkOutVariables(const opt_variables_type& active)
{
if (notOpt)
return;
myVars.getIndex(active);
}
void checkInVariables(opt_variables_type& active)
{
if (notOpt)
return;
active.insertFrom(myVars);
}
void resetParameters(const opt_variables_type& active)
{
if (notOpt)
return;
for (int i=0; i<Parameters.size(); ++i)
{
int loc=myVars.where(i);
if (loc>=0)
Parameters[i]=myVars[i]=active[loc];
}
// if (ResetCount++ == 100)
// {
// ResetCount = 0;
// if(ReportLevel) print();
// }
reset();
}
// check if this object has active optimizable parameters
bool isOptimizable()
{
if (notOpt)
return false;
for (int i=0; i<Parameters.size(); ++i)
{
int loc=myVars.where(i);
if (loc>=0)
return true;
}
return false;
}
};
template<typename T>
inline T
BsplineFunctor<T>::evaluateV(const int iat, const int iStart, const int iEnd,
const T* restrict _distArray, T* restrict distArrayCompressed ) const
{
const real_type* restrict distArray = _distArray + iStart;
ASSUME_ALIGNED(distArrayCompressed);
int iCount = 0;
const int iLimit = iEnd-iStart;
#pragma vector always
for ( int jat = 0; jat < iLimit; jat++ ) {
real_type r = distArray[jat];
// pick the distances smaller than the cutoff and avoid the reference atom
if ( r < cutoff_radius && iStart+jat != iat )
distArrayCompressed[iCount++] = distArray[jat];
}
real_type d = 0.0;
#pragma omp simd reduction (+:d)
for ( int jat = 0; jat < iCount; jat++ ) {
real_type r = distArrayCompressed[jat];
r *= DeltaRInv;
int i = (int)r;
real_type t = r - real_type(i);
real_type tp0 = t*t*t;
real_type tp1 = t*t;
real_type tp2 = t;
real_type d1 = SplineCoefs[i+0]*(A[ 0]*tp0 + A[ 1]*tp1 + A[ 2]*tp2 + A[ 3]);
real_type d2 = SplineCoefs[i+1]*(A[ 4]*tp0 + A[ 5]*tp1 + A[ 6]*tp2 + A[ 7]);
real_type d3 = SplineCoefs[i+2]*(A[ 8]*tp0 + A[ 9]*tp1 + A[10]*tp2 + A[11]);
real_type d4 = SplineCoefs[i+3]*(A[12]*tp0 + A[13]*tp1 + A[14]*tp2 + A[15]);
d += ( d1 + d2 + d3 + d4 );
}
return d;
}
template<typename T>
inline void BsplineFunctor<T>::evaluateVGL(const int iat, const int iStart, const int iEnd,
const T* _distArray, T* restrict _valArray,
T* restrict _gradArray, T* restrict _laplArray,
T* restrict distArrayCompressed, int* restrict distIndices ) const
{
real_type dSquareDeltaRinv = DeltaRInv * DeltaRInv;
constexpr real_type cZero(0);
constexpr real_type cOne(1);
constexpr real_type cMOne(-1);
// START_MARK_FIRST();
ASSUME_ALIGNED(distIndices);
ASSUME_ALIGNED(distArrayCompressed);
int iCount = 0;
int iLimit = iEnd-iStart;
const real_type* distArray = _distArray + iStart;
real_type* valArray = _valArray + iStart;
real_type* gradArray = _gradArray + iStart;
real_type* laplArray = _laplArray + iStart;
#pragma vector always
for ( int jat = 0; jat < iLimit; jat++ ) {
real_type r = distArray[jat];
if ( r < cutoff_radius && iStart+jat != iat ) {
distIndices[iCount] = jat;
distArrayCompressed[iCount] = r;
iCount++;
}
}
#pragma omp simd
for ( int j = 0; j < iCount; j++ ) {
real_type r = distArrayCompressed[j];
int iScatter = distIndices[j];
real_type rinv = cOne/r;
r *= DeltaRInv;
int iGather = (int)r;
real_type t = r - real_type(iGather);
real_type tp0 = t*t*t;
real_type tp1 = t*t;
real_type tp2 = t;
real_type sCoef0 = SplineCoefs[iGather+0];
real_type sCoef1 = SplineCoefs[iGather+1];
real_type sCoef2 = SplineCoefs[iGather+2];
real_type sCoef3 = SplineCoefs[iGather+3];
laplArray[iScatter] = dSquareDeltaRinv *
(sCoef0*( d2A[ 2]*tp2 + d2A[ 3])+
sCoef1*( d2A[ 6]*tp2 + d2A[ 7])+
sCoef2*( d2A[10]*tp2 + d2A[11])+
sCoef3*( d2A[14]*tp2 + d2A[15]));
gradArray[iScatter] = DeltaRInv * rinv *
(sCoef0*( dA[ 1]*tp1 + dA[ 2]*tp2 + dA[ 3])+
sCoef1*( dA[ 5]*tp1 + dA[ 6]*tp2 + dA[ 7])+
sCoef2*( dA[ 9]*tp1 + dA[10]*tp2 + dA[11])+
sCoef3*( dA[13]*tp1 + dA[14]*tp2 + dA[15]));
valArray[iScatter] = (sCoef0*(A[ 0]*tp0 + A[ 1]*tp1 + A[ 2]*tp2 + A[ 3])+
sCoef1*(A[ 4]*tp0 + A[ 5]*tp1 + A[ 6]*tp2 + A[ 7])+
sCoef2*(A[ 8]*tp0 + A[ 9]*tp1 + A[10]*tp2 + A[11])+
sCoef3*(A[12]*tp0 + A[13]*tp1 + A[14]*tp2 + A[15]));
}
}
}
#endif
|
GB_unop__identity_int64_int16.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_int64_int16)
// op(A') function: GB (_unop_tran__identity_int64_int16)
// C type: int64_t
// A type: int16_t
// cast: int64_t cij = (int64_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
int16_t
#define GB_CTYPE \
int64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
int64_t z = (int64_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int16_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int64_t z = (int64_t) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT64 || GxB_NO_INT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_int64_int16)
(
int64_t *Cx, // Cx and Ax may be aliased
const int16_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int16_t aij = Ax [p] ;
int64_t z = (int64_t) aij ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int16_t aij = Ax [p] ;
int64_t z = (int64_t) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_int64_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
data.c | #include "data.h"
#include "utils.h"
#include "image.h"
#include "cuda.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
list *get_paths(char *filename)
{
char *path;
FILE *file = fopen(filename, "r");
if(!file) file_error(filename);
list *lines = make_list();
while((path=fgetl(file))){
list_insert(lines, path);
}
fclose(file);
return lines;
}
/*
char **get_random_paths_indexes(char **paths, int n, int m, int *indexes)
{
char **random_paths = calloc(n, sizeof(char*));
int i;
pthread_mutex_lock(&mutex);
for(i = 0; i < n; ++i){
int index = rand()%m;
indexes[i] = index;
random_paths[i] = paths[index];
if(i == 0) printf("%s\n", paths[index]);
}
pthread_mutex_unlock(&mutex);
return random_paths;
}
*/
char **get_random_paths(char **paths, int n, int m)
{
char **random_paths = calloc(n, sizeof(char*));
int i;
pthread_mutex_lock(&mutex);
for(i = 0; i < n; ++i){
int index = rand()%m;
random_paths[i] = paths[index];
//if(i == 0) printf("%s\n", paths[index]);
}
pthread_mutex_unlock(&mutex);
return random_paths;
}
char **find_replace_paths(char **paths, int n, char *find, char *replace)
{
char **replace_paths = calloc(n, sizeof(char*));
int i;
for(i = 0; i < n; ++i){
char replaced[4096];
find_replace(paths[i], find, replace, replaced);
replace_paths[i] = copy_string(replaced);
}
return replace_paths;
}
matrix load_image_paths_gray(char **paths, int n, int w, int h)
{
int i;
matrix X;
X.rows = n;
X.vals = calloc(X.rows, sizeof(float*));
X.cols = 0;
for(i = 0; i < n; ++i){
image im = load_image(paths[i], w, h, 3);
image gray = grayscale_image(im);
free_image(im);
im = gray;
X.vals[i] = im.data;
X.cols = im.h*im.w*im.c;
}
return X;
}
matrix load_image_paths(char **paths, int n, int w, int h)
{
int i;
matrix X;
X.rows = n;
X.vals = calloc(X.rows, sizeof(float*));
X.cols = 0;
for(i = 0; i < n; ++i){
image im = load_image_color(paths[i], w, h);
X.vals[i] = im.data;
X.cols = im.h*im.w*im.c;
}
return X;
}
matrix load_image_augment_paths(char **paths, int n, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure, int center)
{
int i;
matrix X;
X.rows = n;
X.vals = calloc(X.rows, sizeof(float*));
X.cols = 0;
for(i = 0; i < n; ++i){
image im = load_image_color(paths[i], 0, 0);
image crop;
if(center){
crop = center_crop_image(im, size, size);
} else {
crop = random_augment_image(im, angle, aspect, min, max, size, size);
}
int flip = rand()%2;
if (flip) flip_image(crop);
random_distort_image(crop, hue, saturation, exposure);
/*
show_image(im, "orig");
show_image(crop, "crop");
cvWaitKey(0);
*/
//grayscale_image_3c(crop);
free_image(im);
X.vals[i] = crop.data;
X.cols = crop.h*crop.w*crop.c;
}
return X;
}
box_label *read_boxes(char *filename, int *n)
{
FILE *file = fopen(filename, "r");
if(!file) file_error(filename);
float x, y, h, w;
int id;
int count = 0;
int size = 64;
box_label *boxes = calloc(size, sizeof(box_label));
while(fscanf(file, "%d %f %f %f %f", &id, &x, &y, &w, &h) == 5){
if(count == size) {
size = size * 2;
boxes = realloc(boxes, size*sizeof(box_label));
}
boxes[count].id = id;
boxes[count].x = x;
boxes[count].y = y;
boxes[count].h = h;
boxes[count].w = w;
boxes[count].left = x - w/2;
boxes[count].right = x + w/2;
boxes[count].top = y - h/2;
boxes[count].bottom = y + h/2;
++count;
}
fclose(file);
*n = count;
return boxes;
}
void randomize_boxes(box_label *b, int n)
{
int i;
for(i = 0; i < n; ++i){
box_label swap = b[i];
int index = rand()%n;
b[i] = b[index];
b[index] = swap;
}
}
void correct_boxes(box_label *boxes, int n, float dx, float dy, float sx, float sy, int flip)
{
int i;
for(i = 0; i < n; ++i){
if(boxes[i].x == 0 && boxes[i].y == 0) {
boxes[i].x = 999999;
boxes[i].y = 999999;
boxes[i].w = 999999;
boxes[i].h = 999999;
continue;
}
boxes[i].left = boxes[i].left * sx - dx;
boxes[i].right = boxes[i].right * sx - dx;
boxes[i].top = boxes[i].top * sy - dy;
boxes[i].bottom = boxes[i].bottom* sy - dy;
if(flip){
float swap = boxes[i].left;
boxes[i].left = 1. - boxes[i].right;
boxes[i].right = 1. - swap;
}
boxes[i].left = constrain(0, 1, boxes[i].left);
boxes[i].right = constrain(0, 1, boxes[i].right);
boxes[i].top = constrain(0, 1, boxes[i].top);
boxes[i].bottom = constrain(0, 1, boxes[i].bottom);
boxes[i].x = (boxes[i].left+boxes[i].right)/2;
boxes[i].y = (boxes[i].top+boxes[i].bottom)/2;
boxes[i].w = (boxes[i].right - boxes[i].left);
boxes[i].h = (boxes[i].bottom - boxes[i].top);
boxes[i].w = constrain(0, 1, boxes[i].w);
boxes[i].h = constrain(0, 1, boxes[i].h);
}
}
void fill_truth_swag(char *path, float *truth, int classes, int flip, float dx, float dy, float sx, float sy)
{
char labelpath[4096];
find_replace(path, "images", "labels", labelpath);
find_replace(labelpath, "JPEGImages", "labels", labelpath);
find_replace(labelpath, ".jpg", ".txt", labelpath);
find_replace(labelpath, ".JPG", ".txt", labelpath);
find_replace(labelpath, ".JPEG", ".txt", labelpath);
int count = 0;
box_label *boxes = read_boxes(labelpath, &count);
randomize_boxes(boxes, count);
correct_boxes(boxes, count, dx, dy, sx, sy, flip);
float x,y,w,h;
int id;
int i;
for (i = 0; i < count && i < 90; ++i) {
x = boxes[i].x;
y = boxes[i].y;
w = boxes[i].w;
h = boxes[i].h;
id = boxes[i].id;
if (w < .0 || h < .0) continue;
int index = (4+classes) * i;
truth[index++] = x;
truth[index++] = y;
truth[index++] = w;
truth[index++] = h;
if (id < classes) truth[index+id] = 1;
}
free(boxes);
}
void fill_truth_region(char *path, float *truth, int classes, int num_boxes, int flip, float dx, float dy, float sx, float sy)
{
char labelpath[4096];
find_replace(path, "images", "labels", labelpath);
find_replace(labelpath, "JPEGImages", "labels", labelpath);
find_replace(labelpath, ".jpg", ".txt", labelpath);
find_replace(labelpath, ".png", ".txt", labelpath);
find_replace(labelpath, ".JPG", ".txt", labelpath);
find_replace(labelpath, ".JPEG", ".txt", labelpath);
int count = 0;
box_label *boxes = read_boxes(labelpath, &count);
randomize_boxes(boxes, count);
correct_boxes(boxes, count, dx, dy, sx, sy, flip);
float x,y,w,h;
int id;
int i;
for (i = 0; i < count; ++i) {
x = boxes[i].x;
y = boxes[i].y;
w = boxes[i].w;
h = boxes[i].h;
id = boxes[i].id;
if (w < .005 || h < .005) continue;
int col = (int)(x*num_boxes);
int row = (int)(y*num_boxes);
x = x*num_boxes - col;
y = y*num_boxes - row;
int index = (col+row*num_boxes)*(5+classes);
if (truth[index]) continue;
truth[index++] = 1;
if (id < classes) truth[index+id] = 1;
index += classes;
truth[index++] = x;
truth[index++] = y;
truth[index++] = w;
truth[index++] = h;
}
free(boxes);
}
void load_rle(image im, int *rle, int n)
{
int count = 0;
int curr = 0;
int i,j;
for(i = 0; i < n; ++i){
for(j = 0; j < rle[i]; ++j){
im.data[count++] = curr;
}
curr = 1 - curr;
}
for(; count < im.h*im.w*im.c; ++count){
im.data[count] = curr;
}
}
void or_image(image src, image dest, int c)
{
int i;
for(i = 0; i < src.w*src.h; ++i){
if(src.data[i]) dest.data[dest.w*dest.h*c + i] = 1;
}
}
void exclusive_image(image src)
{
int k, j, i;
int s = src.w*src.h;
for(k = 0; k < src.c-1; ++k){
for(i = 0; i < s; ++i){
if (src.data[k*s + i]){
for(j = k+1; j < src.c; ++j){
src.data[j*s + i] = 0;
}
}
}
}
}
box bound_image(image im)
{
int x,y;
int minx = im.w;
int miny = im.h;
int maxx = 0;
int maxy = 0;
for(y = 0; y < im.h; ++y){
for(x = 0; x < im.w; ++x){
if(im.data[y*im.w + x]){
minx = (x < minx) ? x : minx;
miny = (y < miny) ? y : miny;
maxx = (x > maxx) ? x : maxx;
maxy = (y > maxy) ? y : maxy;
}
}
}
box b = {minx, miny, maxx-minx + 1, maxy-miny + 1};
//printf("%f %f %f %f\n", b.x, b.y, b.w, b.h);
return b;
}
void fill_truth_iseg(char *path, int num_boxes, float *truth, int classes, int w, int h, augment_args aug, int flip, int mw, int mh)
{
char labelpath[4096];
find_replace(path, "images", "mask", labelpath);
find_replace(labelpath, "JPEGImages", "mask", labelpath);
find_replace(labelpath, ".jpg", ".txt", labelpath);
find_replace(labelpath, ".JPG", ".txt", labelpath);
find_replace(labelpath, ".JPEG", ".txt", labelpath);
FILE *file = fopen(labelpath, "r");
if(!file) file_error(labelpath);
char buff[32788];
int id;
int i = 0;
int j;
image part = make_image(w, h, 1);
while((fscanf(file, "%d %s", &id, buff) == 2) && i < num_boxes){
int n = 0;
int *rle = read_intlist(buff, &n, 0);
load_rle(part, rle, n);
image sized = rotate_crop_image(part, aug.rad, aug.scale, aug.w, aug.h, aug.dx, aug.dy, aug.aspect);
if(flip) flip_image(sized);
image mask = resize_image(sized, mw, mh);
truth[i*(mw*mh+1)] = id;
for(j = 0; j < mw*mh; ++j){
truth[i*(mw*mh + 1) + 1 + j] = mask.data[j];
}
++i;
free_image(mask);
free_image(sized);
free(rle);
}
if(i < num_boxes) truth[i*(mw*mh+1)] = -1;
fclose(file);
free_image(part);
}
void fill_truth_mask(char *path, int num_boxes, float *truth, int classes, int w, int h, augment_args aug, int flip, int mw, int mh)
{
char labelpath[4096];
find_replace(path, "images", "mask", labelpath);
find_replace(labelpath, "JPEGImages", "mask", labelpath);
find_replace(labelpath, ".jpg", ".txt", labelpath);
find_replace(labelpath, ".JPG", ".txt", labelpath);
find_replace(labelpath, ".JPEG", ".txt", labelpath);
FILE *file = fopen(labelpath, "r");
if(!file) file_error(labelpath);
char buff[32788];
int id;
int i = 0;
image part = make_image(w, h, 1);
while((fscanf(file, "%d %s", &id, buff) == 2) && i < num_boxes){
int n = 0;
int *rle = read_intlist(buff, &n, 0);
load_rle(part, rle, n);
image sized = rotate_crop_image(part, aug.rad, aug.scale, aug.w, aug.h, aug.dx, aug.dy, aug.aspect);
if(flip) flip_image(sized);
box b = bound_image(sized);
if(b.w > 0){
image crop = crop_image(sized, b.x, b.y, b.w, b.h);
image mask = resize_image(crop, mw, mh);
truth[i*(4 + mw*mh + 1) + 0] = (b.x + b.w/2.)/sized.w;
truth[i*(4 + mw*mh + 1) + 1] = (b.y + b.h/2.)/sized.h;
truth[i*(4 + mw*mh + 1) + 2] = b.w/sized.w;
truth[i*(4 + mw*mh + 1) + 3] = b.h/sized.h;
int j;
for(j = 0; j < mw*mh; ++j){
truth[i*(4 + mw*mh + 1) + 4 + j] = mask.data[j];
}
truth[i*(4 + mw*mh + 1) + 4 + mw*mh] = id;
free_image(crop);
free_image(mask);
++i;
}
free_image(sized);
free(rle);
}
fclose(file);
free_image(part);
}
void fill_truth_detection(char *path, int num_boxes, float *truth, int classes, int flip, float dx, float dy, float sx, float sy)
{
char labelpath[4096];
find_replace(path, "images", "labels", labelpath);
find_replace(labelpath, "JPEGImages", "labels", labelpath);
find_replace(labelpath, "raw", "labels", labelpath);
find_replace(labelpath, ".jpg", ".txt", labelpath);
find_replace(labelpath, ".png", ".txt", labelpath);
find_replace(labelpath, ".JPG", ".txt", labelpath);
find_replace(labelpath, ".JPEG", ".txt", labelpath);
int count = 0;
box_label *boxes = read_boxes(labelpath, &count);
randomize_boxes(boxes, count);
correct_boxes(boxes, count, dx, dy, sx, sy, flip);
if(count > num_boxes) count = num_boxes;
float x,y,w,h;
int id;
int i;
int sub = 0;
for (i = 0; i < count; ++i) {
x = boxes[i].x;
y = boxes[i].y;
w = boxes[i].w;
h = boxes[i].h;
id = boxes[i].id;
if ((w < .001 || h < .001)) {
++sub;
continue;
}
truth[(i-sub)*5+0] = x;
truth[(i-sub)*5+1] = y;
truth[(i-sub)*5+2] = w;
truth[(i-sub)*5+3] = h;
truth[(i-sub)*5+4] = id;
}
free(boxes);
}
#define NUMCHARS 37
void print_letters(float *pred, int n)
{
int i;
for(i = 0; i < n; ++i){
int index = max_index(pred+i*NUMCHARS, NUMCHARS);
printf("%c", int_to_alphanum(index));
}
printf("\n");
}
void fill_truth_captcha(char *path, int n, float *truth)
{
char *begin = strrchr(path, '/');
++begin;
int i;
for(i = 0; i < strlen(begin) && i < n && begin[i] != '.'; ++i){
int index = alphanum_to_int(begin[i]);
if(index > 35) printf("Bad %c\n", begin[i]);
truth[i*NUMCHARS+index] = 1;
}
for(;i < n; ++i){
truth[i*NUMCHARS + NUMCHARS-1] = 1;
}
}
data load_data_captcha(char **paths, int n, int m, int k, int w, int h)
{
if(m) paths = get_random_paths(paths, n, m);
data d = {0};
d.shallow = 0;
d.X = load_image_paths(paths, n, w, h);
d.y = make_matrix(n, k*NUMCHARS);
int i;
for(i = 0; i < n; ++i){
fill_truth_captcha(paths[i], k, d.y.vals[i]);
}
if(m) free(paths);
return d;
}
data load_data_captcha_encode(char **paths, int n, int m, int w, int h)
{
if(m) paths = get_random_paths(paths, n, m);
data d = {0};
d.shallow = 0;
d.X = load_image_paths(paths, n, w, h);
d.X.cols = 17100;
d.y = d.X;
if(m) free(paths);
return d;
}
void fill_truth(char *path, char **labels, int k, float *truth)
{
int i;
memset(truth, 0, k*sizeof(float));
int count = 0;
for(i = 0; i < k; ++i){
if(strstr(path, labels[i])){
truth[i] = 1;
++count;
//printf("%s %s %d\n", path, labels[i], i);
}
}
if(count != 1 && (k != 1 || count != 0)) printf("Too many or too few labels: %d, %s\n", count, path);
}
void fill_hierarchy(float *truth, int k, tree *hierarchy)
{
int j;
for(j = 0; j < k; ++j){
if(truth[j]){
int parent = hierarchy->parent[j];
while(parent >= 0){
truth[parent] = 1;
parent = hierarchy->parent[parent];
}
}
}
int i;
int count = 0;
for(j = 0; j < hierarchy->groups; ++j){
//printf("%d\n", count);
int mask = 1;
for(i = 0; i < hierarchy->group_size[j]; ++i){
if(truth[count + i]){
mask = 0;
break;
}
}
if (mask) {
for(i = 0; i < hierarchy->group_size[j]; ++i){
truth[count + i] = SECRET_NUM;
}
}
count += hierarchy->group_size[j];
}
}
matrix load_regression_labels_paths(char **paths, int n, int k)
{
matrix y = make_matrix(n, k);
int i,j;
for(i = 0; i < n; ++i){
char labelpath[4096];
find_replace(paths[i], "images", "labels", labelpath);
find_replace(labelpath, "JPEGImages", "labels", labelpath);
find_replace(labelpath, ".DKNET_BMP", ".txt", labelpath);
find_replace(labelpath, ".JPEG", ".txt", labelpath);
find_replace(labelpath, ".JPG", ".txt", labelpath);
find_replace(labelpath, ".JPeG", ".txt", labelpath);
find_replace(labelpath, ".Jpeg", ".txt", labelpath);
find_replace(labelpath, ".PNG", ".txt", labelpath);
find_replace(labelpath, ".TIF", ".txt", labelpath);
find_replace(labelpath, ".bmp", ".txt", labelpath);
find_replace(labelpath, ".jpeg", ".txt", labelpath);
find_replace(labelpath, ".jpg", ".txt", labelpath);
find_replace(labelpath, ".png", ".txt", labelpath);
find_replace(labelpath, ".tif", ".txt", labelpath);
FILE *file = fopen(labelpath, "r");
for(j = 0; j < k; ++j){
fscanf(file, "%f", &(y.vals[i][j]));
}
fclose(file);
}
return y;
}
matrix load_labels_paths(char **paths, int n, char **labels, int k, tree *hierarchy)
{
matrix y = make_matrix(n, k);
int i;
for(i = 0; i < n && labels; ++i){
fill_truth(paths[i], labels, k, y.vals[i]);
if(hierarchy){
fill_hierarchy(y.vals[i], k, hierarchy);
}
}
return y;
}
matrix load_tags_paths(char **paths, int n, int k)
{
matrix y = make_matrix(n, k);
int i;
//int count = 0;
for(i = 0; i < n; ++i){
char label[4096];
find_replace(paths[i], "images", "labels", label);
find_replace(label, ".jpg", ".txt", label);
FILE *file = fopen(label, "r");
if (!file) continue;
//++count;
int tag;
while(fscanf(file, "%d", &tag) == 1){
if(tag < k){
y.vals[i][tag] = 1;
}
}
fclose(file);
}
//printf("%d/%d\n", count, n);
return y;
}
char **get_labels(char *filename)
{
list *plist = get_paths(filename);
char **labels = (char **)list_to_array(plist);
free_list(plist);
return labels;
}
void free_data(data d)
{
if(!d.shallow){
free_matrix(d.X);
free_matrix(d.y);
}else{
free(d.X.vals);
free(d.y.vals);
}
}
image get_segmentation_image(char *path, int w, int h, int classes)
{
char labelpath[4096];
find_replace(path, "images", "mask", labelpath);
find_replace(labelpath, "JPEGImages", "mask", labelpath);
find_replace(labelpath, ".jpg", ".txt", labelpath);
find_replace(labelpath, ".JPG", ".txt", labelpath);
find_replace(labelpath, ".JPEG", ".txt", labelpath);
image mask = make_image(w, h, classes);
FILE *file = fopen(labelpath, "r");
if(!file) file_error(labelpath);
char buff[32788];
int id;
image part = make_image(w, h, 1);
while(fscanf(file, "%d %s", &id, buff) == 2){
int n = 0;
int *rle = read_intlist(buff, &n, 0);
load_rle(part, rle, n);
or_image(part, mask, id);
free(rle);
}
//exclusive_image(mask);
fclose(file);
free_image(part);
return mask;
}
image get_segmentation_image2(char *path, int w, int h, int classes)
{
char labelpath[4096];
find_replace(path, "images", "mask", labelpath);
find_replace(labelpath, "JPEGImages", "mask", labelpath);
find_replace(labelpath, ".jpg", ".txt", labelpath);
find_replace(labelpath, ".JPG", ".txt", labelpath);
find_replace(labelpath, ".JPEG", ".txt", labelpath);
image mask = make_image(w, h, classes+1);
int i;
for(i = 0; i < w*h; ++i){
mask.data[w*h*classes + i] = 1;
}
FILE *file = fopen(labelpath, "r");
if(!file) file_error(labelpath);
char buff[32788];
int id;
image part = make_image(w, h, 1);
while(fscanf(file, "%d %s", &id, buff) == 2){
int n = 0;
int *rle = read_intlist(buff, &n, 0);
load_rle(part, rle, n);
or_image(part, mask, id);
for(i = 0; i < w*h; ++i){
if(part.data[i]) mask.data[w*h*classes + i] = 0;
}
free(rle);
}
//exclusive_image(mask);
fclose(file);
free_image(part);
return mask;
}
data load_data_seg(int n, char **paths, int m, int w, int h, int classes, int min, int max, float angle, float aspect, float hue, float saturation, float exposure, int div)
{
char **random_paths = get_random_paths(paths, n, m);
int i;
data d = {0};
d.shallow = 0;
d.X.rows = n;
d.X.vals = calloc(d.X.rows, sizeof(float*));
d.X.cols = h*w*3;
d.y.rows = n;
d.y.cols = h*w*classes/div/div;
d.y.vals = calloc(d.X.rows, sizeof(float*));
for(i = 0; i < n; ++i){
image orig = load_image_color(random_paths[i], 0, 0);
augment_args a = random_augment_args(orig, angle, aspect, min, max, w, h);
image sized = rotate_crop_image(orig, a.rad, a.scale, a.w, a.h, a.dx, a.dy, a.aspect);
int flip = rand()%2;
if(flip) flip_image(sized);
random_distort_image(sized, hue, saturation, exposure);
d.X.vals[i] = sized.data;
image mask = get_segmentation_image(random_paths[i], orig.w, orig.h, classes);
//image mask = make_image(orig.w, orig.h, classes+1);
image sized_m = rotate_crop_image(mask, a.rad, a.scale/div, a.w/div, a.h/div, a.dx/div, a.dy/div, a.aspect);
if(flip) flip_image(sized_m);
d.y.vals[i] = sized_m.data;
free_image(orig);
free_image(mask);
/*
image rgb = mask_to_rgb(sized_m, classes);
show_image(rgb, "part");
show_image(sized, "orig");
cvWaitKey(0);
free_image(rgb);
*/
}
free(random_paths);
return d;
}
data load_data_iseg(int n, char **paths, int m, int w, int h, int classes, int boxes, int div, int min, int max, float angle, float aspect, float hue, float saturation, float exposure)
{
char **random_paths = get_random_paths(paths, n, m);
int i;
data d = {0};
d.shallow = 0;
d.X.rows = n;
d.X.vals = calloc(d.X.rows, sizeof(float*));
d.X.cols = h*w*3;
d.y = make_matrix(n, (((w/div)*(h/div))+1)*boxes);
for(i = 0; i < n; ++i){
image orig = load_image_color(random_paths[i], 0, 0);
augment_args a = random_augment_args(orig, angle, aspect, min, max, w, h);
image sized = rotate_crop_image(orig, a.rad, a.scale, a.w, a.h, a.dx, a.dy, a.aspect);
int flip = rand()%2;
if(flip) flip_image(sized);
random_distort_image(sized, hue, saturation, exposure);
d.X.vals[i] = sized.data;
//show_image(sized, "image");
fill_truth_iseg(random_paths[i], boxes, d.y.vals[i], classes, orig.w, orig.h, a, flip, w/div, h/div);
free_image(orig);
/*
image rgb = mask_to_rgb(sized_m, classes);
show_image(rgb, "part");
show_image(sized, "orig");
cvWaitKey(0);
free_image(rgb);
*/
}
free(random_paths);
return d;
}
data load_data_mask(int n, char **paths, int m, int w, int h, int classes, int boxes, int coords, int min, int max, float angle, float aspect, float hue, float saturation, float exposure)
{
char **random_paths = get_random_paths(paths, n, m);
int i;
data d = {0};
d.shallow = 0;
d.X.rows = n;
d.X.vals = calloc(d.X.rows, sizeof(float*));
d.X.cols = h*w*3;
d.y = make_matrix(n, (coords+1)*boxes);
for(i = 0; i < n; ++i){
image orig = load_image_color(random_paths[i], 0, 0);
augment_args a = random_augment_args(orig, angle, aspect, min, max, w, h);
image sized = rotate_crop_image(orig, a.rad, a.scale, a.w, a.h, a.dx, a.dy, a.aspect);
int flip = rand()%2;
if(flip) flip_image(sized);
random_distort_image(sized, hue, saturation, exposure);
d.X.vals[i] = sized.data;
//show_image(sized, "image");
fill_truth_mask(random_paths[i], boxes, d.y.vals[i], classes, orig.w, orig.h, a, flip, 14, 14);
free_image(orig);
/*
image rgb = mask_to_rgb(sized_m, classes);
show_image(rgb, "part");
show_image(sized, "orig");
cvWaitKey(0);
free_image(rgb);
*/
}
free(random_paths);
return d;
}
data load_data_region(int n, char **paths, int m, int w, int h, int size, int classes, float jitter, float hue, float saturation, float exposure)
{
char **random_paths = get_random_paths(paths, n, m);
int i;
data d = {0};
d.shallow = 0;
d.X.rows = n;
d.X.vals = calloc(d.X.rows, sizeof(float*));
d.X.cols = h*w*3;
int k = size*size*(5+classes);
d.y = make_matrix(n, k);
for(i = 0; i < n; ++i){
image orig = load_image_color(random_paths[i], 0, 0);
int oh = orig.h;
int ow = orig.w;
int dw = (ow*jitter);
int dh = (oh*jitter);
int pleft = rand_uniform(-dw, dw);
int pright = rand_uniform(-dw, dw);
int ptop = rand_uniform(-dh, dh);
int pbot = rand_uniform(-dh, dh);
int swidth = ow - pleft - pright;
int sheight = oh - ptop - pbot;
float sx = (float)swidth / ow;
float sy = (float)sheight / oh;
int flip = rand()%2;
image cropped = crop_image(orig, pleft, ptop, swidth, sheight);
float dx = ((float)pleft/ow)/sx;
float dy = ((float)ptop /oh)/sy;
image sized = resize_image(cropped, w, h);
if(flip) flip_image(sized);
random_distort_image(sized, hue, saturation, exposure);
d.X.vals[i] = sized.data;
fill_truth_region(random_paths[i], d.y.vals[i], classes, size, flip, dx, dy, 1./sx, 1./sy);
free_image(orig);
free_image(cropped);
}
free(random_paths);
return d;
}
data load_data_compare(int n, char **paths, int m, int classes, int w, int h)
{
if(m) paths = get_random_paths(paths, 2*n, m);
int i,j;
data d = {0};
d.shallow = 0;
d.X.rows = n;
d.X.vals = calloc(d.X.rows, sizeof(float*));
d.X.cols = h*w*6;
int k = 2*(classes);
d.y = make_matrix(n, k);
for(i = 0; i < n; ++i){
image im1 = load_image_color(paths[i*2], w, h);
image im2 = load_image_color(paths[i*2+1], w, h);
d.X.vals[i] = calloc(d.X.cols, sizeof(float));
memcpy(d.X.vals[i], im1.data, h*w*3*sizeof(float));
memcpy(d.X.vals[i] + h*w*3, im2.data, h*w*3*sizeof(float));
int id;
float iou;
char imlabel1[4096];
char imlabel2[4096];
find_replace(paths[i*2], "imgs", "labels", imlabel1);
find_replace(imlabel1, "jpg", "txt", imlabel1);
FILE *fp1 = fopen(imlabel1, "r");
while(fscanf(fp1, "%d %f", &id, &iou) == 2){
if (d.y.vals[i][2*id] < iou) d.y.vals[i][2*id] = iou;
}
find_replace(paths[i*2+1], "imgs", "labels", imlabel2);
find_replace(imlabel2, "jpg", "txt", imlabel2);
FILE *fp2 = fopen(imlabel2, "r");
while(fscanf(fp2, "%d %f", &id, &iou) == 2){
if (d.y.vals[i][2*id + 1] < iou) d.y.vals[i][2*id + 1] = iou;
}
for (j = 0; j < classes; ++j){
if (d.y.vals[i][2*j] > .5 && d.y.vals[i][2*j+1] < .5){
d.y.vals[i][2*j] = 1;
d.y.vals[i][2*j+1] = 0;
} else if (d.y.vals[i][2*j] < .5 && d.y.vals[i][2*j+1] > .5){
d.y.vals[i][2*j] = 0;
d.y.vals[i][2*j+1] = 1;
} else {
d.y.vals[i][2*j] = SECRET_NUM;
d.y.vals[i][2*j+1] = SECRET_NUM;
}
}
fclose(fp1);
fclose(fp2);
free_image(im1);
free_image(im2);
}
if(m) free(paths);
return d;
}
data load_data_swag(char **paths, int n, int classes, float jitter)
{
int index = rand()%n;
char *random_path = paths[index];
image orig = load_image_color(random_path, 0, 0);
int h = orig.h;
int w = orig.w;
data d = {0};
d.shallow = 0;
d.w = w;
d.h = h;
d.X.rows = 1;
d.X.vals = calloc(d.X.rows, sizeof(float*));
d.X.cols = h*w*3;
int k = (4+classes)*90;
d.y = make_matrix(1, k);
int dw = w*jitter;
int dh = h*jitter;
int pleft = rand_uniform(-dw, dw);
int pright = rand_uniform(-dw, dw);
int ptop = rand_uniform(-dh, dh);
int pbot = rand_uniform(-dh, dh);
int swidth = w - pleft - pright;
int sheight = h - ptop - pbot;
float sx = (float)swidth / w;
float sy = (float)sheight / h;
int flip = rand()%2;
image cropped = crop_image(orig, pleft, ptop, swidth, sheight);
float dx = ((float)pleft/w)/sx;
float dy = ((float)ptop /h)/sy;
image sized = resize_image(cropped, w, h);
if(flip) flip_image(sized);
d.X.vals[0] = sized.data;
fill_truth_swag(random_path, d.y.vals[0], classes, flip, dx, dy, 1./sx, 1./sy);
free_image(orig);
free_image(cropped);
return d;
}
data load_data_detection(int n, char **paths, int m, int w, int h, int boxes, int classes, float jitter, float hue, float saturation, float exposure)
{
char **random_paths = get_random_paths(paths, n, m);
int i;
data d = {0};
d.shallow = 0;
d.X.rows = n;
d.X.vals = calloc(d.X.rows, sizeof(float*));
d.X.cols = h*w*3;
d.y = make_matrix(n, 5*boxes);
for(i = 0; i < n; ++i){
image orig = load_image_color(random_paths[i], 0, 0);
image sized = make_image(w, h, orig.c);
fill_image(sized, .5);
float dw = jitter * orig.w;
float dh = jitter * orig.h;
float new_ar = (orig.w + rand_uniform(-dw, dw)) / (orig.h + rand_uniform(-dh, dh));
//float scale = rand_uniform(.25, 2);
float scale = 1;
float nw, nh;
if(new_ar < 1){
nh = scale * h;
nw = nh * new_ar;
} else {
nw = scale * w;
nh = nw / new_ar;
}
float dx = rand_uniform(0, w - nw);
float dy = rand_uniform(0, h - nh);
place_image(orig, nw, nh, dx, dy, sized);
random_distort_image(sized, hue, saturation, exposure);
int flip = rand()%2;
if(flip) flip_image(sized);
d.X.vals[i] = sized.data;
fill_truth_detection(random_paths[i], boxes, d.y.vals[i], classes, flip, -dx/w, -dy/h, nw/w, nh/h);
free_image(orig);
}
free(random_paths);
return d;
}
void *load_thread(void *ptr)
{
//printf("Loading data: %d\n", rand());
load_args a = *(struct load_args*)ptr;
if(a.exposure == 0) a.exposure = 1;
if(a.saturation == 0) a.saturation = 1;
if(a.aspect == 0) a.aspect = 1;
if (a.type == OLD_CLASSIFICATION_DATA){
*a.d = load_data_old(a.paths, a.n, a.m, a.labels, a.classes, a.w, a.h);
} else if (a.type == REGRESSION_DATA){
*a.d = load_data_regression(a.paths, a.n, a.m, a.classes, a.min, a.max, a.size, a.angle, a.aspect, a.hue, a.saturation, a.exposure);
} else if (a.type == CLASSIFICATION_DATA){
*a.d = load_data_augment(a.paths, a.n, a.m, a.labels, a.classes, a.hierarchy, a.min, a.max, a.size, a.angle, a.aspect, a.hue, a.saturation, a.exposure, a.center);
} else if (a.type == SUPER_DATA){
*a.d = load_data_super(a.paths, a.n, a.m, a.w, a.h, a.scale);
} else if (a.type == WRITING_DATA){
*a.d = load_data_writing(a.paths, a.n, a.m, a.w, a.h, a.out_w, a.out_h);
} else if (a.type == ISEG_DATA){
*a.d = load_data_iseg(a.n, a.paths, a.m, a.w, a.h, a.classes, a.num_boxes, a.scale, a.min, a.max, a.angle, a.aspect, a.hue, a.saturation, a.exposure);
} else if (a.type == INSTANCE_DATA){
*a.d = load_data_mask(a.n, a.paths, a.m, a.w, a.h, a.classes, a.num_boxes, a.coords, a.min, a.max, a.angle, a.aspect, a.hue, a.saturation, a.exposure);
} else if (a.type == SEGMENTATION_DATA){
*a.d = load_data_seg(a.n, a.paths, a.m, a.w, a.h, a.classes, a.min, a.max, a.angle, a.aspect, a.hue, a.saturation, a.exposure, a.scale);
} else if (a.type == REGION_DATA){
*a.d = load_data_region(a.n, a.paths, a.m, a.w, a.h, a.num_boxes, a.classes, a.jitter, a.hue, a.saturation, a.exposure);
} else if (a.type == DETECTION_DATA){
*a.d = load_data_detection(a.n, a.paths, a.m, a.w, a.h, a.num_boxes, a.classes, a.jitter, a.hue, a.saturation, a.exposure);
} else if (a.type == SWAG_DATA){
*a.d = load_data_swag(a.paths, a.n, a.classes, a.jitter);
} else if (a.type == COMPARE_DATA){
*a.d = load_data_compare(a.n, a.paths, a.m, a.classes, a.w, a.h);
} else if (a.type == IMAGE_DATA){
*(a.im) = load_image_color(a.path, 0, 0);
*(a.resized) = resize_image(*(a.im), a.w, a.h);
} else if (a.type == LETTERBOX_DATA){
*(a.im) = load_image_color(a.path, 0, 0);
*(a.resized) = letterbox_image(*(a.im), a.w, a.h);
} else if (a.type == TAG_DATA){
*a.d = load_data_tag(a.paths, a.n, a.m, a.classes, a.min, a.max, a.size, a.angle, a.aspect, a.hue, a.saturation, a.exposure);
}
free(ptr);
return 0;
}
pthread_t load_data_in_thread(load_args args)
{
pthread_t thread;
struct load_args *ptr = calloc(1, sizeof(struct load_args));
*ptr = args;
if(pthread_create(&thread, 0, load_thread, ptr)) error("Thread creation failed");
return thread;
}
void *load_threads(void *ptr)
{
int i;
load_args args = *(load_args *)ptr;
if (args.threads == 0) args.threads = 1;
data *out = args.d;
int total = args.n;
free(ptr);
data *buffers = calloc(args.threads, sizeof(data));
pthread_t *threads = calloc(args.threads, sizeof(pthread_t));
for(i = 0; i < args.threads; ++i){
args.d = buffers + i;
args.n = (i+1) * total/args.threads - i * total/args.threads;
threads[i] = load_data_in_thread(args);
}
for(i = 0; i < args.threads; ++i){
pthread_join(threads[i], 0);
}
*out = concat_datas(buffers, args.threads);
out->shallow = 0;
for(i = 0; i < args.threads; ++i){
buffers[i].shallow = 1;
free_data(buffers[i]);
}
free(buffers);
free(threads);
return 0;
}
void load_data_blocking(load_args args)
{
struct load_args *ptr = calloc(1, sizeof(struct load_args));
*ptr = args;
load_thread(ptr);
}
pthread_t load_data(load_args args)
{
pthread_t thread;
struct load_args *ptr = calloc(1, sizeof(struct load_args));
*ptr = args;
if(pthread_create(&thread, 0, load_threads, ptr)) error("Thread creation failed");
return thread;
}
data load_data_writing(char **paths, int n, int m, int w, int h, int out_w, int out_h)
{
if(m) paths = get_random_paths(paths, n, m);
char **replace_paths = find_replace_paths(paths, n, ".png", "-label.png");
data d = {0};
d.shallow = 0;
d.X = load_image_paths(paths, n, w, h);
d.y = load_image_paths_gray(replace_paths, n, out_w, out_h);
if(m) free(paths);
int i;
for(i = 0; i < n; ++i) free(replace_paths[i]);
free(replace_paths);
return d;
}
data load_data_old(char **paths, int n, int m, char **labels, int k, int w, int h)
{
if(m) paths = get_random_paths(paths, n, m);
data d = {0};
d.shallow = 0;
d.X = load_image_paths(paths, n, w, h);
d.y = load_labels_paths(paths, n, labels, k, 0);
if(m) free(paths);
return d;
}
/*
data load_data_study(char **paths, int n, int m, char **labels, int k, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure)
{
data d = {0};
d.indexes = calloc(n, sizeof(int));
if(m) paths = get_random_paths_indexes(paths, n, m, d.indexes);
d.shallow = 0;
d.X = load_image_augment_paths(paths, n, min, max, size, angle, aspect, hue, saturation, exposure);
d.y = load_labels_paths(paths, n, labels, k);
if(m) free(paths);
return d;
}
*/
data load_data_super(char **paths, int n, int m, int w, int h, int scale)
{
if(m) paths = get_random_paths(paths, n, m);
data d = {0};
d.shallow = 0;
int i;
d.X.rows = n;
d.X.vals = calloc(n, sizeof(float*));
d.X.cols = w*h*3;
d.y.rows = n;
d.y.vals = calloc(n, sizeof(float*));
d.y.cols = w*scale * h*scale * 3;
for(i = 0; i < n; ++i){
image im = load_image_color(paths[i], 0, 0);
image crop = random_crop_image(im, w*scale, h*scale);
int flip = rand()%2;
if (flip) flip_image(crop);
image resize = resize_image(crop, w, h);
d.X.vals[i] = resize.data;
d.y.vals[i] = crop.data;
free_image(im);
}
if(m) free(paths);
return d;
}
data load_data_regression(char **paths, int n, int m, int k, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure)
{
if(m) paths = get_random_paths(paths, n, m);
data d = {0};
d.shallow = 0;
d.X = load_image_augment_paths(paths, n, min, max, size, angle, aspect, hue, saturation, exposure, 0);
d.y = load_regression_labels_paths(paths, n, k);
if(m) free(paths);
return d;
}
data select_data(data *orig, int *inds)
{
data d = {0};
d.shallow = 1;
d.w = orig[0].w;
d.h = orig[0].h;
d.X.rows = orig[0].X.rows;
d.y.rows = orig[0].X.rows;
d.X.cols = orig[0].X.cols;
d.y.cols = orig[0].y.cols;
d.X.vals = calloc(orig[0].X.rows, sizeof(float *));
d.y.vals = calloc(orig[0].y.rows, sizeof(float *));
int i;
for(i = 0; i < d.X.rows; ++i){
d.X.vals[i] = orig[inds[i]].X.vals[i];
d.y.vals[i] = orig[inds[i]].y.vals[i];
}
return d;
}
data *tile_data(data orig, int divs, int size)
{
data *ds = calloc(divs*divs, sizeof(data));
int i, j;
#pragma omp parallel for
for(i = 0; i < divs*divs; ++i){
data d;
d.shallow = 0;
d.w = orig.w/divs * size;
d.h = orig.h/divs * size;
d.X.rows = orig.X.rows;
d.X.cols = d.w*d.h*3;
d.X.vals = calloc(d.X.rows, sizeof(float*));
d.y = copy_matrix(orig.y);
#pragma omp parallel for
for(j = 0; j < orig.X.rows; ++j){
int x = (i%divs) * orig.w / divs - (d.w - orig.w/divs)/2;
int y = (i/divs) * orig.h / divs - (d.h - orig.h/divs)/2;
image im = float_to_image(orig.w, orig.h, 3, orig.X.vals[j]);
d.X.vals[j] = crop_image(im, x, y, d.w, d.h).data;
}
ds[i] = d;
}
return ds;
}
data resize_data(data orig, int w, int h)
{
data d = {0};
d.shallow = 0;
d.w = w;
d.h = h;
int i;
d.X.rows = orig.X.rows;
d.X.cols = w*h*3;
d.X.vals = calloc(d.X.rows, sizeof(float*));
d.y = copy_matrix(orig.y);
#pragma omp parallel for
for(i = 0; i < orig.X.rows; ++i){
image im = float_to_image(orig.w, orig.h, 3, orig.X.vals[i]);
d.X.vals[i] = resize_image(im, w, h).data;
}
return d;
}
data load_data_augment(char **paths, int n, int m, char **labels, int k, tree *hierarchy, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure, int center)
{
if(m) paths = get_random_paths(paths, n, m);
data d = {0};
d.shallow = 0;
d.w=size;
d.h=size;
d.X = load_image_augment_paths(paths, n, min, max, size, angle, aspect, hue, saturation, exposure, center);
d.y = load_labels_paths(paths, n, labels, k, hierarchy);
if(m) free(paths);
return d;
}
data load_data_tag(char **paths, int n, int m, int k, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure)
{
if(m) paths = get_random_paths(paths, n, m);
data d = {0};
d.w = size;
d.h = size;
d.shallow = 0;
d.X = load_image_augment_paths(paths, n, min, max, size, angle, aspect, hue, saturation, exposure, 0);
d.y = load_tags_paths(paths, n, k);
if(m) free(paths);
return d;
}
matrix concat_matrix(matrix m1, matrix m2)
{
int i, count = 0;
matrix m;
m.cols = m1.cols;
m.rows = m1.rows+m2.rows;
m.vals = calloc(m1.rows + m2.rows, sizeof(float*));
for(i = 0; i < m1.rows; ++i){
m.vals[count++] = m1.vals[i];
}
for(i = 0; i < m2.rows; ++i){
m.vals[count++] = m2.vals[i];
}
return m;
}
data concat_data(data d1, data d2)
{
data d = {0};
d.shallow = 1;
d.X = concat_matrix(d1.X, d2.X);
d.y = concat_matrix(d1.y, d2.y);
d.w = d1.w;
d.h = d1.h;
return d;
}
data concat_datas(data *d, int n)
{
int i;
data out = {0};
for(i = 0; i < n; ++i){
data new = concat_data(d[i], out);
free_data(out);
out = new;
}
return out;
}
data load_categorical_data_csv(char *filename, int target, int k)
{
data d = {0};
d.shallow = 0;
matrix X = csv_to_matrix(filename);
float *truth_1d = pop_column(&X, target);
float **truth = one_hot_encode(truth_1d, X.rows, k);
matrix y;
y.rows = X.rows;
y.cols = k;
y.vals = truth;
d.X = X;
d.y = y;
free(truth_1d);
return d;
}
data load_cifar10_data(char *filename)
{
data d = {0};
d.shallow = 0;
long i,j;
matrix X = make_matrix(10000, 3072);
matrix y = make_matrix(10000, 10);
d.X = X;
d.y = y;
FILE *fp = fopen(filename, "rb");
if(!fp) file_error(filename);
for(i = 0; i < 10000; ++i){
unsigned char bytes[3073];
fread(bytes, 1, 3073, fp);
int class = bytes[0];
y.vals[i][class] = 1;
for(j = 0; j < X.cols; ++j){
X.vals[i][j] = (double)bytes[j+1];
}
}
scale_data_rows(d, 1./255);
//normalize_data_rows(d);
fclose(fp);
return d;
}
void get_random_batch(data d, int n, float *X, float *y)
{
int j;
for(j = 0; j < n; ++j){
int index = rand()%d.X.rows;
memcpy(X+j*d.X.cols, d.X.vals[index], d.X.cols*sizeof(float));
memcpy(y+j*d.y.cols, d.y.vals[index], d.y.cols*sizeof(float));
}
}
void get_next_batch(data d, int n, int offset, float *X, float *y)
{
int j;
for(j = 0; j < n; ++j){
int index = offset + j;
memcpy(X+j*d.X.cols, d.X.vals[index], d.X.cols*sizeof(float));
if(y) memcpy(y+j*d.y.cols, d.y.vals[index], d.y.cols*sizeof(float));
}
}
void smooth_data(data d)
{
int i, j;
float scale = 1. / d.y.cols;
float eps = .1;
for(i = 0; i < d.y.rows; ++i){
for(j = 0; j < d.y.cols; ++j){
d.y.vals[i][j] = eps * scale + (1-eps) * d.y.vals[i][j];
}
}
}
data load_all_cifar10()
{
data d = {0};
d.shallow = 0;
int i,j,b;
matrix X = make_matrix(50000, 3072);
matrix y = make_matrix(50000, 10);
d.X = X;
d.y = y;
for(b = 0; b < 5; ++b){
char buff[256];
sprintf(buff, "data/cifar/cifar-10-batches-bin/data_batch_%d.bin", b+1);
FILE *fp = fopen(buff, "rb");
if(!fp) file_error(buff);
for(i = 0; i < 10000; ++i){
unsigned char bytes[3073];
fread(bytes, 1, 3073, fp);
int class = bytes[0];
y.vals[i+b*10000][class] = 1;
for(j = 0; j < X.cols; ++j){
X.vals[i+b*10000][j] = (double)bytes[j+1];
}
}
fclose(fp);
}
//normalize_data_rows(d);
scale_data_rows(d, 1./255);
smooth_data(d);
return d;
}
data load_go(char *filename)
{
FILE *fp = fopen(filename, "rb");
matrix X = make_matrix(3363059, 361);
matrix y = make_matrix(3363059, 361);
int row, col;
if(!fp) file_error(filename);
char *label;
int count = 0;
while((label = fgetl(fp))){
int i;
if(count == X.rows){
X = resize_matrix(X, count*2);
y = resize_matrix(y, count*2);
}
sscanf(label, "%d %d", &row, &col);
char *board = fgetl(fp);
int index = row*19 + col;
y.vals[count][index] = 1;
for(i = 0; i < 19*19; ++i){
float val = 0;
if(board[i] == '1') val = 1;
else if(board[i] == '2') val = -1;
X.vals[count][i] = val;
}
++count;
free(label);
free(board);
}
X = resize_matrix(X, count);
y = resize_matrix(y, count);
data d = {0};
d.shallow = 0;
d.X = X;
d.y = y;
fclose(fp);
return d;
}
void randomize_data(data d)
{
int i;
for(i = d.X.rows-1; i > 0; --i){
int index = rand()%i;
float *swap = d.X.vals[index];
d.X.vals[index] = d.X.vals[i];
d.X.vals[i] = swap;
swap = d.y.vals[index];
d.y.vals[index] = d.y.vals[i];
d.y.vals[i] = swap;
}
}
void scale_data_rows(data d, float s)
{
int i;
for(i = 0; i < d.X.rows; ++i){
scale_array(d.X.vals[i], d.X.cols, s);
}
}
void translate_data_rows(data d, float s)
{
int i;
for(i = 0; i < d.X.rows; ++i){
translate_array(d.X.vals[i], d.X.cols, s);
}
}
data copy_data(data d)
{
data c = {0};
c.w = d.w;
c.h = d.h;
c.shallow = 0;
c.num_boxes = d.num_boxes;
c.boxes = d.boxes;
c.X = copy_matrix(d.X);
c.y = copy_matrix(d.y);
return c;
}
void normalize_data_rows(data d)
{
int i;
for(i = 0; i < d.X.rows; ++i){
normalize_array(d.X.vals[i], d.X.cols);
}
}
data get_data_part(data d, int part, int total)
{
data p = {0};
p.shallow = 1;
p.X.rows = d.X.rows * (part + 1) / total - d.X.rows * part / total;
p.y.rows = d.y.rows * (part + 1) / total - d.y.rows * part / total;
p.X.cols = d.X.cols;
p.y.cols = d.y.cols;
p.X.vals = d.X.vals + d.X.rows * part / total;
p.y.vals = d.y.vals + d.y.rows * part / total;
return p;
}
data get_random_data(data d, int num)
{
data r = {0};
r.shallow = 1;
r.X.rows = num;
r.y.rows = num;
r.X.cols = d.X.cols;
r.y.cols = d.y.cols;
r.X.vals = calloc(num, sizeof(float *));
r.y.vals = calloc(num, sizeof(float *));
int i;
for(i = 0; i < num; ++i){
int index = rand()%d.X.rows;
r.X.vals[i] = d.X.vals[index];
r.y.vals[i] = d.y.vals[index];
}
return r;
}
data *split_data(data d, int part, int total)
{
data *split = calloc(2, sizeof(data));
int i;
int start = part*d.X.rows/total;
int end = (part+1)*d.X.rows/total;
data train;
data test;
train.shallow = test.shallow = 1;
test.X.rows = test.y.rows = end-start;
train.X.rows = train.y.rows = d.X.rows - (end-start);
train.X.cols = test.X.cols = d.X.cols;
train.y.cols = test.y.cols = d.y.cols;
train.X.vals = calloc(train.X.rows, sizeof(float*));
test.X.vals = calloc(test.X.rows, sizeof(float*));
train.y.vals = calloc(train.y.rows, sizeof(float*));
test.y.vals = calloc(test.y.rows, sizeof(float*));
for(i = 0; i < start; ++i){
train.X.vals[i] = d.X.vals[i];
train.y.vals[i] = d.y.vals[i];
}
for(i = start; i < end; ++i){
test.X.vals[i-start] = d.X.vals[i];
test.y.vals[i-start] = d.y.vals[i];
}
for(i = end; i < d.X.rows; ++i){
train.X.vals[i-(end-start)] = d.X.vals[i];
train.y.vals[i-(end-start)] = d.y.vals[i];
}
split[0] = train;
split[1] = test;
return split;
}
|
neutral.c | #include "neutral.h"
#include "../../comms.h"
#include "../../params.h"
#include "../../shared.h"
#include "../../shared_data.h"
#include "../neutral_interface.h"
#include <assert.h>
#include <float.h>
#include <math.h>
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#ifdef MPI
#include "mpi.h"
#endif
// Performs a solve of dependent variables for particle transport
void solve_transport_2d(
const int nx, const int ny, const int global_nx, const int global_ny,
const uint64_t master_key, const int pad, const int x_off, const int y_off,
const double dt, const int ntotal_particles, int* nparticles,
const int* neighbours, Particle* particles, const double* density,
const double* edgex, const double* edgey, const double* edgedx,
const double* edgedy, CrossSection* cs_scatter_table,
CrossSection* cs_absorb_table, double* energy_deposition_tally,
uint64_t* reduce_array0, uint64_t* reduce_array1, uint64_t* reduce_array2,
uint64_t* facet_events, uint64_t* collision_events) {
if (!(*nparticles)) {
printf("Out of particles\n");
return;
}
handle_particles(global_nx, global_ny, nx, ny, master_key, pad, x_off, y_off,
1, dt, neighbours, density, edgex, edgey, edgedx, edgedy,
facet_events, collision_events, ntotal_particles,
*nparticles, particles, cs_scatter_table, cs_absorb_table,
energy_deposition_tally);
}
// Handles the current active batch of particles
void handle_particles(const int global_nx, const int global_ny, const int nx,
const int ny, const uint64_t master_key, const int pad,
const int x_off, const int y_off, const int initial,
const double dt, const int* neighbours,
const double* density, const double* edgex,
const double* edgey, const double* edgedx,
const double* edgedy, uint64_t* facets,
uint64_t* collisions, const int ntotal_particles,
const int nparticles_to_process,
Particle* particles_start, CrossSection* cs_scatter_table,
CrossSection* cs_absorb_table,
double* energy_deposition_tally) {
int nthreads = 0;
#pragma omp parallel
{ nthreads = omp_get_num_threads(); }
uint64_t nfacets = 0;
uint64_t ncollisions = 0;
uint64_t nparticles = 0;
double* p_x = particles_start->x;
double* p_y = particles_start->y;
double* p_omega_x = particles_start->omega_x;
double* p_omega_y = particles_start->omega_y;
double* p_energy = particles_start->energy;
double* p_weight = particles_start->weight;
double* p_dt_to_census = particles_start->dt_to_census;
double* p_mfp_to_collision = particles_start->mfp_to_collision;
int* p_cellx = particles_start->cellx;
int* p_celly = particles_start->celly;
int* p_dead = particles_start->dead;
double* cs_scatter_table_keys = cs_scatter_table->keys;
double* cs_scatter_table_values = cs_scatter_table->values;
int cs_scatter_table_nentries = cs_scatter_table->nentries;
double* cs_absorb_table_keys = cs_absorb_table->keys;
double* cs_absorb_table_values = cs_absorb_table->values;
int cs_absorb_table_nentries = cs_absorb_table->nentries;
//int nt = nparticles_to_process/128+1;
#pragma omp target teams distribute parallel for simd \
map(tofrom: nfacets, ncollisions, nparticles) \
reduction(+: nfacets, ncollisions, nparticles)
for (int pp = 0; pp < nparticles_to_process; ++pp) {
// (1) particle can stream and reach census
// (2) particle can collide and either
// - the particle will be absorbed
// - the particle will scatter (this means the energy changes)
// (3) particle encounters boundary region, transports to another cell
if (p_dead[pp]) {
continue;
}
nparticles++;
int x_facet = 0;
int absorb_cs_index = -1;
int scatter_cs_index = -1;
double cell_mfp = 0.0;
// Determine the current cell
int cellx = p_cellx[pp] - x_off + pad;
int celly = p_celly[pp] - y_off + pad;
double local_density = density[celly * (nx + 2 * pad) + cellx];
// Fetch the cross sections and prepare related quantities
double microscopic_cs_scatter;
microscopic_cs_for_energy(
cs_scatter_table_keys, cs_scatter_table_values,
cs_scatter_table_nentries, p_energy[pp], &scatter_cs_index, µscopic_cs_scatter);
double microscopic_cs_absorb;
microscopic_cs_for_energy(
cs_absorb_table_keys, cs_absorb_table_values, cs_absorb_table_nentries,
p_energy[pp], &absorb_cs_index, µscopic_cs_absorb);
double number_density = (local_density * AVOGADROS / MOLAR_MASS);
double macroscopic_cs_scatter =
number_density * microscopic_cs_scatter * BARNS;
double macroscopic_cs_absorb =
number_density * microscopic_cs_absorb * BARNS;
double speed = sqrt((2.0 * p_energy[pp] * eV_TO_J) / PARTICLE_MASS);
double energy_deposition = 0.0;
const double inv_ntotal_particles = 1.0 / (double)ntotal_particles;
uint64_t counter = 0;
double rn[NRANDOM_NUMBERS];
// Set time to census and MFPs until collision, unless travelled
// particle
if (initial) {
p_dt_to_census[pp] = dt;
generate_random_numbers(pp, master_key, counter++, &rn[0], &rn[1]);
p_mfp_to_collision[pp] = -log(rn[0]) / macroscopic_cs_scatter;
}
// Loop until we have reached census
while (p_dt_to_census[pp] > 0.0) {
cell_mfp = 1.0 / (macroscopic_cs_scatter + macroscopic_cs_absorb);
// Work out the distance until the particle hits a facet
double distance_to_facet = 0.0;
calc_distance_to_facet(global_nx, p_x[pp], p_y[pp], pad, x_off, y_off,
p_omega_x[pp], p_omega_y[pp], speed, p_cellx[pp],
p_celly[pp], &distance_to_facet, &x_facet, edgex,
edgey);
const double distance_to_collision = p_mfp_to_collision[pp] * cell_mfp;
const double distance_to_census = speed * p_dt_to_census[pp];
// Check if our next event is a collision
if (distance_to_collision < distance_to_facet &&
distance_to_collision < distance_to_census) {
// Track the total number of collisions
ncollisions++;
// Handles a collision event
int result = collision_event(
global_nx, nx, x_off, y_off, master_key, inv_ntotal_particles,
distance_to_collision, local_density, cs_absorb_table_keys,
cs_scatter_table_keys, cs_absorb_table_values,
cs_scatter_table_values, cs_absorb_table_nentries,
cs_scatter_table_nentries, pp, p_x, p_y, p_cellx, p_celly, p_weight,
p_energy, p_dead, p_omega_x, p_omega_y, p_dt_to_census,
p_mfp_to_collision, &counter, &energy_deposition, &number_density,
µscopic_cs_scatter, µscopic_cs_absorb,
¯oscopic_cs_scatter, ¯oscopic_cs_absorb,
energy_deposition_tally, &scatter_cs_index, &absorb_cs_index, rn,
&speed);
if (result != PARTICLE_CONTINUE) {
break;
}
}
// Check if we have reached facet
else if (distance_to_facet < distance_to_census) {
// Track the number of fact encounters
nfacets++;
// Handle facet event
int result = facet_event(
global_nx, global_ny, nx, ny, x_off, y_off, inv_ntotal_particles,
distance_to_facet, speed, cell_mfp, x_facet, density, neighbours,
pp, p_energy, p_weight, p_mfp_to_collision, p_dt_to_census, p_x,
p_y, p_omega_x, p_omega_y, p_cellx, p_celly, &energy_deposition,
&number_density, µscopic_cs_scatter, µscopic_cs_absorb,
¯oscopic_cs_scatter, ¯oscopic_cs_absorb,
energy_deposition_tally, &cellx, &celly, &local_density);
if (result != PARTICLE_CONTINUE) {
break;
}
} else {
census_event(global_nx, nx, x_off, y_off, inv_ntotal_particles,
distance_to_census, cell_mfp, pp, p_weight, p_energy, p_x,
p_y, p_omega_x, p_omega_y, p_mfp_to_collision,
p_dt_to_census, p_cellx, p_celly, &energy_deposition,
&number_density, µscopic_cs_scatter,
µscopic_cs_absorb, energy_deposition_tally);
break;
}
}
}
// Store a total number of facets and collisions
*facets += nfacets;
*collisions += ncollisions;
printf("Particles %llu\n", nparticles);
}
// Handles a collision event
int collision_event(
const int global_nx, const int nx, const int x_off, const int y_off,
const uint64_t master_key, const double inv_ntotal_particles,
const double distance_to_collision, const double local_density,
const double* cs_absorb_table_keys, const double* cs_scatter_table_keys,
const double* cs_absorb_table_values, const double* cs_scatter_table_values,
const int cs_absorb_table_nentries, const int cs_scatter_table_nentries,
const uint64_t pp, double* p_x, double* p_y, int* p_cellx, int* p_celly,
double* p_weight, double* p_energy, int* p_dead, double* p_omega_x,
double* p_omega_y, double* p_dt_to_census, double* p_mfp_to_collision,
uint64_t* counter, double* energy_deposition, double* number_density,
double* microscopic_cs_scatter, double* microscopic_cs_absorb,
double* macroscopic_cs_scatter, double* macroscopic_cs_absorb,
double* energy_deposition_tally, int* scatter_cs_index,
int* absorb_cs_index, double rn[NRANDOM_NUMBERS], double* speed) {
// Energy deposition stored locally for collision, not in tally mesh
add_energy_deposition(
global_nx, nx, x_off, y_off, p_energy[pp], p_weight[pp],
inv_ntotal_particles, distance_to_collision, *number_density,
*microscopic_cs_absorb, *microscopic_cs_scatter + *microscopic_cs_absorb, energy_deposition);
// Moves the particle to the collision site
p_x[pp] += distance_to_collision * p_omega_x[pp];
p_y[pp] += distance_to_collision * p_omega_y[pp];
const double p_absorb = *macroscopic_cs_absorb /
(*macroscopic_cs_scatter + *macroscopic_cs_absorb);
double rn0;
double rn1;
generate_random_numbers(pp, master_key, *counter, &rn0, &rn1);
(*counter)++;
if (rn0 < p_absorb) {
/* Model particle absorption */
// Find the new particle weight after absorption, saving the energy change
p_weight[pp] *= (1.0 - p_absorb);
if (p_energy[pp] < MIN_ENERGY_OF_INTEREST) {
// Energy is too low, so mark the particle for deletion
p_dead[pp] = 1;
// Need to store tally information as finished with particle
update_tallies(nx, x_off, y_off, p_cellx[pp], p_celly[pp],
inv_ntotal_particles, *energy_deposition,
energy_deposition_tally);
*energy_deposition = 0.0;
return PARTICLE_DEAD;
}
} else {
/* Model elastic particle scattering */
// The following assumes that all particles reside within a two-dimensional
// plane, which solves a different equation. Change so that we consider
// the full set of directional cosines, allowing scattering between planes.
// Choose a random scattering angle between -1 and 1
const double mu_cm = 1.0 - 2.0 * rn1;
// Calculate the new energy based on the relation to angle of incidence
const double e_new = p_energy[pp] *
(MASS_NO * MASS_NO + 2.0 * MASS_NO * mu_cm + 1.0) /
((MASS_NO + 1.0) * (MASS_NO + 1.0));
// Convert the angle into the laboratory frame of reference
double cos_theta = 0.5 * ((MASS_NO + 1.0) * sqrt(e_new / p_energy[pp]) -
(MASS_NO - 1.0) * sqrt(p_energy[pp] / e_new));
// Alter the direction of the velocities
const double sin_theta = sqrt(1.0 - cos_theta * cos_theta);
const double omega_x_new =
(p_omega_x[pp] * cos_theta - p_omega_y[pp] * sin_theta);
const double omega_y_new =
(p_omega_x[pp] * sin_theta + p_omega_y[pp] * cos_theta);
p_omega_x[pp] = omega_x_new;
p_omega_y[pp] = omega_y_new;
p_energy[pp] = e_new;
}
// Energy has changed so update the cross-sections
microscopic_cs_for_energy(
cs_scatter_table_keys, cs_scatter_table_values, cs_scatter_table_nentries,
p_energy[pp], scatter_cs_index, microscopic_cs_scatter);
microscopic_cs_for_energy(
cs_absorb_table_keys, cs_absorb_table_values, cs_absorb_table_nentries,
p_energy[pp], absorb_cs_index, microscopic_cs_absorb);
*number_density = (local_density * AVOGADROS / MOLAR_MASS);
*macroscopic_cs_scatter = *number_density * (*microscopic_cs_scatter) * BARNS;
*macroscopic_cs_absorb = *number_density * (*microscopic_cs_absorb) * BARNS;
// Re-sample number of mean free paths to collision
generate_random_numbers(pp, master_key, *counter, &rn0, &rn1);
(*counter)++;
p_mfp_to_collision[pp] = -log(rn0) / *macroscopic_cs_scatter;
p_dt_to_census[pp] -= distance_to_collision / *speed;
*speed = sqrt((2.0 * p_energy[pp] * eV_TO_J) / PARTICLE_MASS);
return PARTICLE_CONTINUE;
}
// Handle facet event
int
facet_event(const int global_nx, const int global_ny, const int nx,
const int ny, const int x_off, const int y_off,
const double inv_ntotal_particles, const double distance_to_facet,
const double speed, const double cell_mfp, const int x_facet,
const double* density, const int* neighbours, const uint64_t pp,
double* p_energy, double* p_weight, double* p_mfp_to_collision,
double* p_dt_to_census, double* p_x, double* p_y, double* p_omega_x,
double* p_omega_y, int* p_cellx, int* p_celly,
double* energy_deposition, double* number_density,
double* microscopic_cs_scatter, double* microscopic_cs_absorb,
double* macroscopic_cs_scatter, double* macroscopic_cs_absorb,
double* energy_deposition_tally, int* cellx, int* celly,
double* local_density) {
// Update the mean free paths until collision
p_mfp_to_collision[pp] -= (distance_to_facet / cell_mfp);
p_dt_to_census[pp] -= (distance_to_facet / speed);
add_energy_deposition(
global_nx, nx, x_off, y_off, p_energy[pp], p_weight[pp],
inv_ntotal_particles, distance_to_facet, *number_density,
*microscopic_cs_absorb, *microscopic_cs_scatter + *microscopic_cs_absorb, energy_deposition);
// Update tallies as we leave a cell
update_tallies(nx, x_off, y_off, p_cellx[pp], p_celly[pp],
inv_ntotal_particles, *energy_deposition,
energy_deposition_tally);
*energy_deposition = 0.0;
// Move the particle to the facet
p_x[pp] += distance_to_facet * p_omega_x[pp];
p_y[pp] += distance_to_facet * p_omega_y[pp];
if (x_facet) {
if (p_omega_x[pp] > 0.0) {
// Reflect at the boundary
if (p_cellx[pp] >= (global_nx - 1)) {
p_omega_x[pp] = -(p_omega_x[pp]);
} else {
// Moving to right cell
p_cellx[pp]++;
}
} else if (p_omega_x[pp] < 0.0) {
if (p_cellx[pp] <= 0) {
// Reflect at the boundary
p_omega_x[pp] = -(p_omega_x[pp]);
} else {
// Moving to left cell
p_cellx[pp]--;
}
}
} else {
if (p_omega_y[pp] > 0.0) {
// Reflect at the boundary
if (p_celly[pp] >= (global_ny - 1)) {
p_omega_y[pp] = -(p_omega_y[pp]);
} else {
// Moving to north cell
p_celly[pp]++;
}
} else if (p_omega_y[pp] < 0.0) {
// Reflect at the boundary
if (p_celly[pp] <= 0) {
p_omega_y[pp] = -(p_omega_y[pp]);
} else {
// Moving to south cell
p_celly[pp]--;
}
}
}
// Update the data based on new cell
*cellx = p_cellx[pp] - x_off;
*celly = p_celly[pp] - y_off;
*local_density = density[*celly * nx + *cellx];
*number_density = (*local_density * AVOGADROS / MOLAR_MASS);
*macroscopic_cs_scatter = *number_density * *microscopic_cs_scatter * BARNS;
*macroscopic_cs_absorb = *number_density * *microscopic_cs_absorb * BARNS;
return PARTICLE_CONTINUE;
}
// Handles the census event
void
census_event(const int global_nx, const int nx, const int x_off,
const int y_off, const double inv_ntotal_particles,
const double distance_to_census, const double cell_mfp,
const uint64_t pp, double* p_weight, double* p_energy, double* p_x,
double* p_y, double* p_omega_x, double* p_omega_y,
double* p_mfp_to_collision, double* p_dt_to_census, int* p_cellx,
int* p_celly, double* energy_deposition, double* number_density,
double* microscopic_cs_scatter, double* microscopic_cs_absorb,
double* energy_deposition_tally) {
// We have not changed cell or energy level at this stage
p_x[pp] += distance_to_census * p_omega_x[pp];
p_y[pp] += distance_to_census * p_omega_y[pp];
p_mfp_to_collision[pp] -= (distance_to_census / cell_mfp);
add_energy_deposition(
global_nx, nx, x_off, y_off, p_energy[pp], p_weight[pp],
inv_ntotal_particles, distance_to_census, *number_density,
*microscopic_cs_absorb, *microscopic_cs_scatter + *microscopic_cs_absorb, energy_deposition);
// Need to store tally information as finished with particle
update_tallies(nx, x_off, y_off, p_cellx[pp], p_celly[pp],
inv_ntotal_particles, *energy_deposition,
energy_deposition_tally);
p_dt_to_census[pp] = 0.0;
}
// Tallies the energy deposition in the cell
void update_tallies(const int nx, const int x_off,
const int y_off, const int p_cellx,
const int p_celly,
const double inv_ntotal_particles,
const double energy_deposition,
double* energy_deposition_tally) {
const int cellx = p_cellx - x_off;
const int celly = p_celly - y_off;
#pragma omp atomic update
energy_deposition_tally[celly * nx + cellx] +=
energy_deposition * inv_ntotal_particles;
}
// Calculate the distance to the next facet
void
calc_distance_to_facet(const int global_nx, const double p_x, const double p_y,
const int pad, const int x_off, const int y_off,
const double p_omega_x, const double p_omega_y,
const double speed, const int particle_cellx,
const int particle_celly, double* distance_to_facet,
int* x_facet, const double* edgex, const double* edgey) {
// Check the master_key required to move the particle along a single axis
// If the velocity is positive then the top or right boundary will be hit
const int cellx = particle_cellx - x_off + pad;
const int celly = particle_celly - y_off + pad;
double u_x_inv = 1.0 / (p_omega_x * speed);
double u_y_inv = 1.0 / (p_omega_y * speed);
// The bound is open on the left and bottom so we have to correct for this
// and required the movement to the facet to go slightly further than the edge
// in the calculated values, using OPEN_BOUND_CORRECTION, which is the
// smallest possible distance from the closed bound e.g. 1.0e-14.
double dt_x = (p_omega_x >= 0.0)
? ((edgex[cellx + 1]) - p_x) * u_x_inv
: ((edgex[cellx] - OPEN_BOUND_CORRECTION) - p_x) * u_x_inv;
double dt_y = (p_omega_y >= 0.0)
? ((edgey[celly + 1]) - p_y) * u_y_inv
: ((edgey[celly] - OPEN_BOUND_CORRECTION) - p_y) * u_y_inv;
*x_facet = (dt_x < dt_y) ? 1 : 0;
// Calculated the projection to be
// a = vector on first edge to be hit
// u = velocity vector
double mag_u0 = speed;
if (*x_facet) {
// We are centered on the origin, so the y component is 0 after travelling
// aint the x axis to the edge (ax, 0).(x, y)
*distance_to_facet =
(p_omega_x >= 0.0)
? ((edgex[cellx + 1]) - p_x) * mag_u0 * u_x_inv
: ((edgex[cellx] - OPEN_BOUND_CORRECTION) - p_x) * mag_u0 * u_x_inv;
} else {
// We are centered on the origin, so the x component is 0 after travelling
// along the y axis to the edge (0, ay).(x, y)
*distance_to_facet =
(p_omega_y >= 0.0)
? ((edgey[celly + 1]) - p_y) * mag_u0 * u_y_inv
: ((edgey[celly] - OPEN_BOUND_CORRECTION) - p_y) * mag_u0 * u_y_inv;
}
}
// Calculate the energy deposition in the cell
void add_energy_deposition(
const int global_nx, const int nx, const int x_off, const int y_off,
const double p_energy, const double p_weight,
const double inv_ntotal_particles, const double path_length,
const double number_density, const double microscopic_cs_absorb,
const double microscopic_cs_total, double* ed) {
// Calculate the energy deposition based on the path length
const double average_exit_energy_absorb = 0.0;
const double absorption_heating =
(microscopic_cs_absorb / microscopic_cs_total) *
average_exit_energy_absorb;
const double average_exit_energy_scatter =
p_energy *
((MASS_NO * MASS_NO + MASS_NO + 1) / ((MASS_NO + 1) * (MASS_NO + 1)));
const double scattering_heating =
(1.0 - (microscopic_cs_absorb / microscopic_cs_total)) *
average_exit_energy_scatter;
const double heating_response =
(p_energy - scattering_heating - absorption_heating);
*ed += p_weight * path_length * (microscopic_cs_total * BARNS) *
heating_response * number_density;
}
// Fetch the cross section for a particular energy value
void microscopic_cs_for_energy(const double* keys,
const double* values,
const int nentries,
const double p_energy,
int* cs_index, double* cs) {
// Use a simple binary search to find the energy group
int ind = nentries / 2;
int width = ind / 2;
while (p_energy < keys[ind] || p_energy >= keys[ind + 1]) {
ind += (p_energy < keys[ind]) ? -width : width;
width = max(1, width / 2); // To handle odd cases, allows one extra walk
}
// Return the value linearly interpolated
*cs = values[ind] +
((p_energy - keys[ind]) / (keys[ind + 1] - keys[ind])) *
(values[ind + 1] - values[ind]);
}
void generate_random_numbers(const uint64_t pkey,
const uint64_t master_key,
const uint64_t counter, double* rn0,
double* rn1) {
const int nrns = 2;
threefry2x64_ctr_t ctr;
threefry2x64_ctr_t key;
ctr.v[0] = counter;
ctr.v[1] = 0;
key.v[0] = pkey;
key.v[1] = master_key;
// Generate the random numbers
threefry2x64_ctr_t rand = threefry2x64(ctr, key);
// Turn our random numbers from integrals to double precision
uint64_t max_uint64 = UINT64_C(0xFFFFFFFFFFFFFFFF);
const double factor = 1.0 / (max_uint64 + 1.0);
const double half_factor = 0.5 * factor;
*rn0 = rand.v[0] * factor + half_factor;
*rn1 = rand.v[1] * factor + half_factor;
}
// Validates the results of the simulation
void validate(const int nx, const int ny, const char* params_filename,
const int rank, double* energy_deposition_tally) {
// Reduce the entire energy deposition tally locally
double local_energy_tally = 0.0;
#pragma omp target teams distribute parallel for map( \
tofrom : local_energy_tally) reduction(+ : local_energy_tally)
for (int ii = 0; ii < nx * ny; ++ii) {
local_energy_tally += energy_deposition_tally[ii];
}
// Finalise the reduction globally
double global_energy_tally = reduce_all_sum(local_energy_tally);
if (rank != MASTER) {
return;
}
printf("\nFinal global_energy_tally %.15e\n", global_energy_tally);
int nresults = 0;
char* keys = (char*)malloc(sizeof(char) * MAX_KEYS * (MAX_STR_LEN + 1));
double* values = (double*)malloc(sizeof(double) * MAX_KEYS);
if (!get_key_value_parameter(params_filename, NEUTRAL_TESTS, keys, values,
&nresults)) {
printf("Warning. Test entry was not found, could NOT validate.\n");
return;
}
// Check the result is within tolerance
printf("Expected %.12e, result was %.12e.\n", values[0], global_energy_tally);
if (within_tolerance(values[0], global_energy_tally, VALIDATE_TOLERANCE)) {
printf("PASSED validation.\n");
} else {
printf("FAILED validation.\n");
}
free(keys);
free(values);
}
// Initialises a new particle ready for tracking
size_t inject_particles(const int nparticles, const int global_nx,
const int local_nx, const int local_ny, const int pad,
const double local_particle_left_off,
const double local_particle_bottom_off,
const double local_particle_width,
const double local_particle_height, const int x_off,
const int y_off, const double dt, const double* edgex,
const double* edgey, const double initial_energy,
Particle** particles) {
*particles = (Particle*)malloc(sizeof(Particle));
if (!*particles) {
TERMINATE("Could not allocate particle array.\n");
}
Particle* particle = *particles;
size_t allocation = 0;
allocation += allocate_data(&particle->x, nparticles * 1.5);
allocation += allocate_data(&particle->y, nparticles * 1.5);
allocation += allocate_data(&particle->omega_x, nparticles * 1.5);
allocation += allocate_data(&particle->omega_y, nparticles * 1.5);
allocation += allocate_data(&particle->energy, nparticles * 1.5);
allocation += allocate_data(&particle->weight, nparticles * 1.5);
allocation += allocate_data(&particle->dt_to_census, nparticles * 1.5);
allocation += allocate_data(&particle->mfp_to_collision, nparticles * 1.5);
allocation += allocate_int_data(&particle->cellx, nparticles * 1.5);
allocation += allocate_int_data(&particle->celly, nparticles * 1.5);
allocation += allocate_int_data(&particle->dead, nparticles * 1.5);
double* p_x = particle->x;
double* p_y = particle->y;
double* p_omega_x = particle->omega_x;
double* p_omega_y = particle->omega_y;
double* p_energy = particle->energy;
double* p_weight = particle->weight;
double* p_dt_to_census = particle->dt_to_census;
double* p_mfp_to_collision = particle->mfp_to_collision;
int* p_cellx = particle->cellx;
int* p_celly = particle->celly;
int* p_dead = particle->dead;
START_PROFILING(&compute_profile);
#pragma omp target teams distribute parallel for
for (int pp = 0; pp < nparticles; ++pp) {
double rn[NRANDOM_NUMBERS];
generate_random_numbers(pp, 0, 0, &rn[0], &rn[1]);
// Set the initial nandom location of the particle inside the source
// region
p_x[pp] = local_particle_left_off + rn[0] * local_particle_width;
p_y[pp] = local_particle_bottom_off + rn[1] * local_particle_height;
// Check the location of the specific cell that the particle sits within.
// We have to check this explicitly because the mesh might be non-uniform.
int cellx = 0;
int celly = 0;
for (int ii = 0; ii < local_nx; ++ii) {
if (p_x[pp] >= edgex[ii + pad] && p_x[pp] < edgex[ii + pad + 1]) {
cellx = x_off + ii;
break;
}
}
for (int ii = 0; ii < local_ny; ++ii) {
if (p_y[pp] >= edgey[ii + pad] && p_y[pp] < edgey[ii + pad + 1]) {
celly = y_off + ii;
break;
}
}
p_cellx[pp] = cellx;
p_celly[pp] = celly;
// Generating theta has uniform density, however 0.0 and 1.0 produce the
// same
// value which introduces very very very small bias...
generate_random_numbers(pp, 0, 1, &rn[0], &rn[1]);
const double theta = 2.0 * M_PI * rn[0];
p_omega_x[pp] = cos(theta);
p_omega_y[pp] = sin(theta);
// This approximation sets mono-energetic initial state for source
// particles
p_energy[pp] = initial_energy;
// Set a weight for the particle to track absorption
p_weight[pp] = 1.0;
p_dt_to_census[pp] = dt;
p_mfp_to_collision[pp] = 0.0;
p_dead[pp] = 0;
}
STOP_PROFILING(&compute_profile, "initialising particles");
return allocation;
}
|
myFunc.h | // Rob Farber
#include <stdlib.h>
#include <string.h>
#include <stdint.h>
#include <malloc.h>
#include <math.h>
#include <omp.h>
#define MIC_DEV 0
#define ALLOC alloc_if(1) free_if(0)
#define FREE alloc_if(0) free_if(1)
#define REUSE alloc_if(0) free_if(0)
// Use a struct to pass and get data from the objective function
typedef struct userData {
// Data information
int nExamples;
__declspec(align(64)) float * restrict example;
__declspec(align(64)) float * restrict param;
// information for multi-Devices
int nDevices;
int startOffset;
struct userData* dev;
// Timing information
int isWarmup;
double timeObjFunc;
int countObjFunc;
double timeDataLoad;
double minTime, maxTime;
} userData_t;
// function to measure wall clock time
inline double getTime() { return(omp_get_wtime());}
#pragma offload_attribute (push, target (mic))
// helper macros to index into the example array
#define IN(i,nExamples,j) (i*nExamples+j)
#define OUT(i,nExamples,j) ((i+N_INPUT)*nExamples+j)
// Define the Sigmoid
#ifdef USE_LINEAR
char *desc="generated_PCA_func LINEAR()";
inline float G(float x) { return( x ) ;}
#define G_ESTIMATE 0
#elif USE_TANH
char *desc="generated_func tanh()";
inline float G(float x) { return( tanhf(x) ) ;}
#define G_ESTIMATE 7 // estimate 7 flops for G
#elif LOGISTIC
char *desc="generated func logistic()";
inline float G(float x) { return( 1.f/(1.f+expf(-x)) ) ;}
#define G_ESTIMATE 7 // estimate flops for G
#else // Use Elliott function
char *desc="generated func Eliott activation: x/(1+fabsf(x))";
inline float G(float x) { return( x/(1.f+fabsf(x)) ) ;}
#define G_ESTIMATE 3 // estimate flops for G
#endif
// This file defines the function to be evaluated
#include "fcn.h"
// The offload objective function
double _objFunc(unsigned int n, const double * restrict x,
double * restrict grad, void * restrict my_func_data)
{
double err;
userData_t *uData = (userData_t *) my_func_data;
// convert from double to float for speed
for(int i=0; i < N_PARAM; i++) uData->param[i]=x[i];
#ifdef __INTEL_OFFLOAD
// **** Start of Offload section ******
double partial[uData->nDevices];
float * restrict param = uData->param;
int loadFlag[uData->nDevices];
// asynchronous transfer of parameters to all devices
for(int device=0; device < uData->nDevices; device++) {
loadFlag[device]=device;
#pragma offload_transfer target(mic:device) in(param:length(N_PARAM) REUSE) signal(loadFlag+device)
{}
}
// perform the computations asynchronously
for(int device=0; device < uData->nDevices; device++) {
float * restrict deviceExample = uData->dev[device].example; // workaround for compiler bug
int nDeviceExamples = uData->dev[device].nExamples; // workaround for compiler bug
#pragma offload target(mic:device) in(param:length(0) REUSE) in(deviceExample:length(0) REUSE) \
out(err : into(partial[device])) signal(partial+device) wait(loadFlag+device)
{
err=0.; // initialize error here for offload
#pragma omp parallel for reduction(+ : err)
for(int i=0; i < nDeviceExamples; i++) {
float d=myFunc(i, param, deviceExample, nDeviceExamples, NULL);
err += d*d;
}
}
}
err=0.;
for(int device=0; device < uData->nDevices; device++) {
#pragma offload target(mic:device) wait(partial+device)
{}
err += partial[device];
}
// **** End of Offload section ******
#else
err=0.; // initialize error here in case offload selected
#pragma omp parallel for reduction(+ : err)
for(int i=0; i < uData->nExamples; i++) {
float d=myFunc(i, uData->param, uData->example, uData->nExamples, NULL);
err += d*d;
}
#endif
return sqrt(err);
}
#pragma offload_attribute (pop)
// The optizimation library callable objective function that gathers timing information
double objFunc(unsigned int n, const double * restrict x,
double * restrict grad, void * restrict my_func_data)
{
if(grad) {
fprintf(stderr,"Gradient not implemented!\n");
exit(1);
}
userData_t *uData = (userData_t *) my_func_data;
double runTime=getTime();
double err = _objFunc(n,x,grad,my_func_data);
runTime = getTime() - runTime;
if(!uData->isWarmup) {
// Note a maxTime of zero means this is the first call
if(uData->maxTime == 0.) {
uData->maxTime = uData->minTime = runTime;
}
uData->maxTime = (uData->maxTime > runTime)?uData->maxTime:runTime;
uData->minTime = (uData->minTime < runTime)?uData->minTime:runTime;
uData->timeObjFunc += runTime;
uData->countObjFunc++;
}
return( err );
}
// Called to free memory and report timing information
void fini(userData_t *uData)
{
int nThreads=0;
// Intel recommended way to get the number of threads in offload mode.
#pragma offload target(mic:MIC_DEV) out(nThreads)
{
#pragma omp parallel
{
#pragma omp single
{
nThreads = omp_get_num_threads();
}
}
}
// Ouput some information
if(!uData->isWarmup) {
printf("number OMP threads %d\n", nThreads);
printf("DataLoadTime %g\n", uData->timeDataLoad);
printf("AveObjTime %g, countObjFunc %d, totalObjTime %g\n",
uData->timeObjFunc/uData->countObjFunc, uData->countObjFunc, uData->timeObjFunc);
#ifdef FLOP_ESTIMATE
printf("Estimated flops in myFunc %d, estimated average GFlop/s %g\n", FLOP_ESTIMATE,
(((double)uData->nExamples*FLOP_ESTIMATE)/(uData->timeObjFunc/uData->countObjFunc)/1.e9) );
printf("Estimated maximum GFlop/s %g, minimum GFLop/s %g\n",
(((double)uData->nExamples*FLOP_ESTIMATE)/(uData->minTime)/1.e9),
(((double)uData->nExamples*FLOP_ESTIMATE)/(uData->maxTime)/1.e9) );
}
#endif
#ifdef __INTEL_OFFLOAD
// free example vector if using offload mode
__declspec(align(64)) float * restrict example = uData->example;
// free on the host
if(uData->example) free(uData->example);
uData->example=NULL;
uData->param=NULL;
if(uData->dev) {
for(int i=0; i < uData->nDevices; i++) {
if(uData->dev[i].example) {
float *pt=uData->dev[i].example;
#pragma offload target(mic:i) in(pt : length(0) FREE)
{}
free(uData->dev[i].example);
}
if(uData->dev[i].param) {
float *pt=uData->dev[i].param;
#pragma offload target(mic:i) in(pt : length(0) FREE)
{}
}
}
}
#endif
// free on the host
if(uData->example) free(uData->example); uData->example=NULL;
if(uData->param) free(uData->param); uData->param=NULL;
}
// partitions data and asynchronously loads onto the devices
void offloadData(userData_t *uData)
{
#ifdef __INTEL_OFFLOAD
double startTime=getTime();
uData->nDevices = _Offload_number_of_devices();
if(uData->nDevices == 0) {
fprintf(stderr,"No devices found!\n");
exit -1;
}
fprintf(stderr,"Number of devices %d\n",uData->nDevices);
uData->dev = calloc(uData->nDevices, sizeof(userData_t));
if(!uData->dev) {
fprintf(stderr,"Out of memory!\n");
exit -1;
}
// Partition examples across multiple devices
for(int i=0; i < uData->nDevices; i++)
uData->dev[i].nExamples = (uData->nExamples/uData->nDevices);
for(int i=0; i < (uData->nExamples % uData->nDevices); i++)
uData->dev[i].nExamples++; // fill in non-multiples of nDevices
// fill in remaining uData information
int index=0;
for(int i=0; i < uData->nDevices; i++) {
uData->dev[i].startOffset = index;
index += uData->dev[i].nExamples;
uData->dev[i].param = uData->param;
fprintf(stderr,"Device %d startOffset %d nExamples %d\n", i,uData->dev[i].startOffset, uData->dev[i].nExamples);
}
// asynchronous alloc and transfer
// prepare to move blocks of data to each device. (This is faster than a sliced move)
int loadFlag[uData->nDevices];
for(int i=0; i < uData->nDevices; i++) {
int Xsiz = (N_INPUT+N_OUTPUT)*EXAMPLE_SIZE* uData->dev[i].nExamples;
// For convenience, use c99 to cast pointer to a multidimensional array
float (*data)[uData->nExamples] = (float (*)[uData->nExamples]) uData->example;
uData->dev[i].example=(float*) memalign(64,Xsiz*sizeof(float));
if(!uData->dev[i].example) {
fprintf(stderr,"Not enough memory for the device copies of the examples!\n");
exit(1);
}
float *pt = uData->dev[i].example;
for(int row=0; row < (N_INPUT+N_OUTPUT); row++) {
int nCol = uData->dev[i].startOffset + uData->dev[i].nExamples;
for(int col=uData->dev[i].startOffset; col < nCol; col++) {
*(pt++) = data[row][col];
}
}
pt = uData->dev[i].example; // workaround for compiler bug
float *param = uData->param; // workaround for compiler bug
#pragma offload_transfer target(mic:i) in(pt : length(Xsiz) ALLOC) in(param: length(N_PARAM) ALLOC) signal(loadFlag+i)
{}
}
// wait for all async transfers to finish
for(int i=0; i < uData->nDevices; i++) {
#pragma offload target(mic:i) wait(loadFlag+i)
{}
}
uData->timeDataLoad = getTime() - startTime;
#endif
}
// loads the binary file of the form:
// nInput, nOutput, nExamples
// Input [0] [0:nExamples]
// Input [1] [0:nExamples]
// ...
// Output [0] [0:nExamples]
// Output [1] [0:nExamples]
// ...
void init(char*filename, userData_t *uData)
{
FILE *fn=stdin;
// check if reading from stdin
if(strcmp("-", filename) != 0)
fn=fopen(filename,"r");
if(!fn) {
fprintf(stderr,"Cannot open %s\n",filename);
exit(1);
}
// read the header information
int32_t nInput, nOutput;
int32_t nExamples;
fread(&nInput,sizeof(int32_t), 1, fn);
if(nInput != N_INPUT) {
fprintf(stderr,"Number of inputs incorrect!\n");
exit(1);
}
fread(&nOutput,sizeof(int32_t), 1, fn);
if(nOutput != N_OUTPUT) {
fprintf(stderr,"Number of outputs incorrect!\n");
exit(1);
}
fread(&nExamples,sizeof(int32_t), 1, fn);
if(nExamples <= 0) {
fprintf(stderr,"Number of examples incorrect!\n");
exit(1);
}
uData->nExamples = nExamples;
// aligned allocation of the data
uData->example=(float*) memalign(64,nExamples*EXAMPLE_SIZE*sizeof(float));
if(!uData->example) {
fprintf(stderr,"Not enough memory for examples!\n");
exit(1);
}
// aligned allocation of the on-device parameters
uData->param=(float*) memalign(64,N_PARAM*sizeof(float));
if(!uData->param) {
fprintf(stderr,"Not enough memory for the parameters!\n");
exit(1);
}
// read the data
for(int exIndex=0; exIndex < uData->nExamples; exIndex++) {
for(int i=0; i < nInput; i++)
fread(&uData->example[IN(i,uData->nExamples, exIndex)],1, sizeof(float), fn);
for(int i=0; i < nOutput; i++)
fread(&uData->example[OUT(i,uData->nExamples, exIndex)],1, sizeof(float), fn);
}
#ifdef __INTEL_OFFLOAD
offloadData(uData);
#endif
if(fn!=stdin) fclose(fn);
}
|
OpenMPClause.h | //===- OpenMPClause.h - Classes for OpenMP clauses --------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
/// \file
/// This file defines OpenMP AST classes for clauses.
/// There are clauses for executable directives, clauses for declarative
/// directives and clauses which can be used in both kinds of directives.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_AST_OPENMPCLAUSE_H
#define LLVM_CLANG_AST_OPENMPCLAUSE_H
#include "clang/AST/Decl.h"
#include "clang/AST/DeclarationName.h"
#include "clang/AST/Expr.h"
#include "clang/AST/NestedNameSpecifier.h"
#include "clang/AST/Stmt.h"
#include "clang/AST/StmtIterator.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/SourceLocation.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/iterator.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/TrailingObjects.h"
#include <cassert>
#include <cstddef>
#include <iterator>
#include <utility>
namespace clang {
class ASTContext;
//===----------------------------------------------------------------------===//
// AST classes for clauses.
//===----------------------------------------------------------------------===//
/// This is a basic class for representing single OpenMP clause.
class OMPClause {
/// Starting location of the clause (the clause keyword).
SourceLocation StartLoc;
/// Ending location of the clause.
SourceLocation EndLoc;
/// Kind of the clause.
OpenMPClauseKind Kind;
protected:
OMPClause(OpenMPClauseKind K, SourceLocation StartLoc, SourceLocation EndLoc)
: StartLoc(StartLoc), EndLoc(EndLoc), Kind(K) {}
public:
/// Returns the starting location of the clause.
SourceLocation getBeginLoc() const { return StartLoc; }
/// Returns the ending location of the clause.
SourceLocation getEndLoc() const { return EndLoc; }
/// Sets the starting location of the clause.
void setLocStart(SourceLocation Loc) { StartLoc = Loc; }
/// Sets the ending location of the clause.
void setLocEnd(SourceLocation Loc) { EndLoc = Loc; }
/// Returns kind of OpenMP clause (private, shared, reduction, etc.).
OpenMPClauseKind getClauseKind() const { return Kind; }
bool isImplicit() const { return StartLoc.isInvalid(); }
using child_iterator = StmtIterator;
using const_child_iterator = ConstStmtIterator;
using child_range = llvm::iterator_range<child_iterator>;
using const_child_range = llvm::iterator_range<const_child_iterator>;
child_range children();
const_child_range children() const {
auto Children = const_cast<OMPClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
/// Get the iterator range for the expressions used in the clauses. Used
/// expressions include only the children that must be evaluated at the
/// runtime before entering the construct.
child_range used_children();
const_child_range used_children() const {
auto Children = const_cast<OMPClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
static bool classof(const OMPClause *) { return true; }
};
/// Class that handles pre-initialization statement for some clauses, like
/// 'shedule', 'firstprivate' etc.
class OMPClauseWithPreInit {
friend class OMPClauseReader;
/// Pre-initialization statement for the clause.
Stmt *PreInit = nullptr;
/// Region that captures the associated stmt.
OpenMPDirectiveKind CaptureRegion = OMPD_unknown;
protected:
OMPClauseWithPreInit(const OMPClause *This) {
assert(get(This) && "get is not tuned for pre-init.");
}
/// Set pre-initialization statement for the clause.
void setPreInitStmt(Stmt *S, OpenMPDirectiveKind ThisRegion = OMPD_unknown) {
PreInit = S;
CaptureRegion = ThisRegion;
}
public:
/// Get pre-initialization statement for the clause.
const Stmt *getPreInitStmt() const { return PreInit; }
/// Get pre-initialization statement for the clause.
Stmt *getPreInitStmt() { return PreInit; }
/// Get capture region for the stmt in the clause.
OpenMPDirectiveKind getCaptureRegion() const { return CaptureRegion; }
static OMPClauseWithPreInit *get(OMPClause *C);
static const OMPClauseWithPreInit *get(const OMPClause *C);
};
/// Class that handles post-update expression for some clauses, like
/// 'lastprivate', 'reduction' etc.
class OMPClauseWithPostUpdate : public OMPClauseWithPreInit {
friend class OMPClauseReader;
/// Post-update expression for the clause.
Expr *PostUpdate = nullptr;
protected:
OMPClauseWithPostUpdate(const OMPClause *This) : OMPClauseWithPreInit(This) {
assert(get(This) && "get is not tuned for post-update.");
}
/// Set pre-initialization statement for the clause.
void setPostUpdateExpr(Expr *S) { PostUpdate = S; }
public:
/// Get post-update expression for the clause.
const Expr *getPostUpdateExpr() const { return PostUpdate; }
/// Get post-update expression for the clause.
Expr *getPostUpdateExpr() { return PostUpdate; }
static OMPClauseWithPostUpdate *get(OMPClause *C);
static const OMPClauseWithPostUpdate *get(const OMPClause *C);
};
/// This structure contains most locations needed for by an OMPVarListClause.
struct OMPVarListLocTy {
/// Starting location of the clause (the clause keyword).
SourceLocation StartLoc;
/// Location of '('.
SourceLocation LParenLoc;
/// Ending location of the clause.
SourceLocation EndLoc;
OMPVarListLocTy() = default;
OMPVarListLocTy(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: StartLoc(StartLoc), LParenLoc(LParenLoc), EndLoc(EndLoc) {}
};
/// This represents clauses with the list of variables like 'private',
/// 'firstprivate', 'copyin', 'shared', or 'reduction' clauses in the
/// '#pragma omp ...' directives.
template <class T> class OMPVarListClause : public OMPClause {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// Number of variables in the list.
unsigned NumVars;
protected:
/// Build a clause with \a N variables
///
/// \param K Kind of the clause.
/// \param StartLoc Starting location of the clause (the clause keyword).
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
OMPVarListClause(OpenMPClauseKind K, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N)
: OMPClause(K, StartLoc, EndLoc), LParenLoc(LParenLoc), NumVars(N) {}
/// Fetches list of variables associated with this clause.
MutableArrayRef<Expr *> getVarRefs() {
return MutableArrayRef<Expr *>(
static_cast<T *>(this)->template getTrailingObjects<Expr *>(), NumVars);
}
/// Sets the list of variables for this clause.
void setVarRefs(ArrayRef<Expr *> VL) {
assert(VL.size() == NumVars &&
"Number of variables is not the same as the preallocated buffer");
std::copy(VL.begin(), VL.end(),
static_cast<T *>(this)->template getTrailingObjects<Expr *>());
}
public:
using varlist_iterator = MutableArrayRef<Expr *>::iterator;
using varlist_const_iterator = ArrayRef<const Expr *>::iterator;
using varlist_range = llvm::iterator_range<varlist_iterator>;
using varlist_const_range = llvm::iterator_range<varlist_const_iterator>;
unsigned varlist_size() const { return NumVars; }
bool varlist_empty() const { return NumVars == 0; }
varlist_range varlists() {
return varlist_range(varlist_begin(), varlist_end());
}
varlist_const_range varlists() const {
return varlist_const_range(varlist_begin(), varlist_end());
}
varlist_iterator varlist_begin() { return getVarRefs().begin(); }
varlist_iterator varlist_end() { return getVarRefs().end(); }
varlist_const_iterator varlist_begin() const { return getVarRefs().begin(); }
varlist_const_iterator varlist_end() const { return getVarRefs().end(); }
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Fetches list of all variables in the clause.
ArrayRef<const Expr *> getVarRefs() const {
return llvm::makeArrayRef(
static_cast<const T *>(this)->template getTrailingObjects<Expr *>(),
NumVars);
}
};
/// This represents 'allocator' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp allocate(a) allocator(omp_default_mem_alloc)
/// \endcode
/// In this example directive '#pragma omp allocate' has simple 'allocator'
/// clause with the allocator 'omp_default_mem_alloc'.
class OMPAllocatorClause : public OMPClause {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// Expression with the allocator.
Stmt *Allocator = nullptr;
/// Set allocator.
void setAllocator(Expr *A) { Allocator = A; }
public:
/// Build 'allocator' clause with the given allocator.
///
/// \param A Allocator.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPAllocatorClause(Expr *A, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(OMPC_allocator, StartLoc, EndLoc), LParenLoc(LParenLoc),
Allocator(A) {}
/// Build an empty clause.
OMPAllocatorClause()
: OMPClause(OMPC_allocator, SourceLocation(), SourceLocation()) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Returns allocator.
Expr *getAllocator() const { return cast_or_null<Expr>(Allocator); }
child_range children() { return child_range(&Allocator, &Allocator + 1); }
const_child_range children() const {
return const_child_range(&Allocator, &Allocator + 1);
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_allocator;
}
};
/// This represents clause 'allocate' in the '#pragma omp ...' directives.
///
/// \code
/// #pragma omp parallel private(a) allocate(omp_default_mem_alloc :a)
/// \endcode
/// In this example directive '#pragma omp parallel' has clause 'private'
/// and clause 'allocate' for the variable 'a'.
class OMPAllocateClause final
: public OMPVarListClause<OMPAllocateClause>,
private llvm::TrailingObjects<OMPAllocateClause, Expr *> {
friend class OMPClauseReader;
friend OMPVarListClause;
friend TrailingObjects;
/// Allocator specified in the clause, or 'nullptr' if the default one is
/// used.
Expr *Allocator = nullptr;
/// Position of the ':' delimiter in the clause;
SourceLocation ColonLoc;
/// Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param Allocator Allocator expression.
/// \param ColonLoc Location of ':' delimiter.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
OMPAllocateClause(SourceLocation StartLoc, SourceLocation LParenLoc,
Expr *Allocator, SourceLocation ColonLoc,
SourceLocation EndLoc, unsigned N)
: OMPVarListClause<OMPAllocateClause>(OMPC_allocate, StartLoc, LParenLoc,
EndLoc, N),
Allocator(Allocator), ColonLoc(ColonLoc) {}
/// Build an empty clause.
///
/// \param N Number of variables.
explicit OMPAllocateClause(unsigned N)
: OMPVarListClause<OMPAllocateClause>(OMPC_allocate, SourceLocation(),
SourceLocation(), SourceLocation(),
N) {}
/// Sets location of ':' symbol in clause.
void setColonLoc(SourceLocation CL) { ColonLoc = CL; }
void setAllocator(Expr *A) { Allocator = A; }
public:
/// Creates clause with a list of variables \a VL.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param Allocator Allocator expression.
/// \param ColonLoc Location of ':' delimiter.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the variables.
static OMPAllocateClause *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation LParenLoc, Expr *Allocator,
SourceLocation ColonLoc,
SourceLocation EndLoc, ArrayRef<Expr *> VL);
/// Returns the allocator expression or nullptr, if no allocator is specified.
Expr *getAllocator() const { return Allocator; }
/// Returns the location of the ':' delimiter.
SourceLocation getColonLoc() const { return ColonLoc; }
/// Creates an empty clause with the place for \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
static OMPAllocateClause *CreateEmpty(const ASTContext &C, unsigned N);
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPAllocateClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_allocate;
}
};
/// This represents 'if' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp parallel if(parallel:a > 5)
/// \endcode
/// In this example directive '#pragma omp parallel' has simple 'if' clause with
/// condition 'a > 5' and directive name modifier 'parallel'.
class OMPIfClause : public OMPClause, public OMPClauseWithPreInit {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// Condition of the 'if' clause.
Stmt *Condition = nullptr;
/// Location of ':' (if any).
SourceLocation ColonLoc;
/// Directive name modifier for the clause.
OpenMPDirectiveKind NameModifier = OMPD_unknown;
/// Name modifier location.
SourceLocation NameModifierLoc;
/// Set condition.
void setCondition(Expr *Cond) { Condition = Cond; }
/// Set directive name modifier for the clause.
void setNameModifier(OpenMPDirectiveKind NM) { NameModifier = NM; }
/// Set location of directive name modifier for the clause.
void setNameModifierLoc(SourceLocation Loc) { NameModifierLoc = Loc; }
/// Set location of ':'.
void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; }
public:
/// Build 'if' clause with condition \a Cond.
///
/// \param NameModifier [OpenMP 4.1] Directive name modifier of clause.
/// \param Cond Condition of the clause.
/// \param HelperCond Helper condition for the clause.
/// \param CaptureRegion Innermost OpenMP region where expressions in this
/// clause must be captured.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param NameModifierLoc Location of directive name modifier.
/// \param ColonLoc [OpenMP 4.1] Location of ':'.
/// \param EndLoc Ending location of the clause.
OMPIfClause(OpenMPDirectiveKind NameModifier, Expr *Cond, Stmt *HelperCond,
OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation NameModifierLoc,
SourceLocation ColonLoc, SourceLocation EndLoc)
: OMPClause(OMPC_if, StartLoc, EndLoc), OMPClauseWithPreInit(this),
LParenLoc(LParenLoc), Condition(Cond), ColonLoc(ColonLoc),
NameModifier(NameModifier), NameModifierLoc(NameModifierLoc) {
setPreInitStmt(HelperCond, CaptureRegion);
}
/// Build an empty clause.
OMPIfClause()
: OMPClause(OMPC_if, SourceLocation(), SourceLocation()),
OMPClauseWithPreInit(this) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Return the location of ':'.
SourceLocation getColonLoc() const { return ColonLoc; }
/// Returns condition.
Expr *getCondition() const { return cast_or_null<Expr>(Condition); }
/// Return directive name modifier associated with the clause.
OpenMPDirectiveKind getNameModifier() const { return NameModifier; }
/// Return the location of directive name modifier.
SourceLocation getNameModifierLoc() const { return NameModifierLoc; }
child_range children() { return child_range(&Condition, &Condition + 1); }
const_child_range children() const {
return const_child_range(&Condition, &Condition + 1);
}
child_range used_children();
const_child_range used_children() const {
auto Children = const_cast<OMPIfClause *>(this)->used_children();
return const_child_range(Children.begin(), Children.end());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_if;
}
};
/// This represents 'final' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp task final(a > 5)
/// \endcode
/// In this example directive '#pragma omp task' has simple 'final'
/// clause with condition 'a > 5'.
class OMPFinalClause : public OMPClause {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// Condition of the 'if' clause.
Stmt *Condition = nullptr;
/// Set condition.
void setCondition(Expr *Cond) { Condition = Cond; }
public:
/// Build 'final' clause with condition \a Cond.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param Cond Condition of the clause.
/// \param EndLoc Ending location of the clause.
OMPFinalClause(Expr *Cond, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(OMPC_final, StartLoc, EndLoc), LParenLoc(LParenLoc),
Condition(Cond) {}
/// Build an empty clause.
OMPFinalClause()
: OMPClause(OMPC_final, SourceLocation(), SourceLocation()) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Returns condition.
Expr *getCondition() const { return cast_or_null<Expr>(Condition); }
child_range children() { return child_range(&Condition, &Condition + 1); }
const_child_range children() const {
return const_child_range(&Condition, &Condition + 1);
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_final;
}
};
/// This represents 'num_threads' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp parallel num_threads(6)
/// \endcode
/// In this example directive '#pragma omp parallel' has simple 'num_threads'
/// clause with number of threads '6'.
class OMPNumThreadsClause : public OMPClause, public OMPClauseWithPreInit {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// Condition of the 'num_threads' clause.
Stmt *NumThreads = nullptr;
/// Set condition.
void setNumThreads(Expr *NThreads) { NumThreads = NThreads; }
public:
/// Build 'num_threads' clause with condition \a NumThreads.
///
/// \param NumThreads Number of threads for the construct.
/// \param HelperNumThreads Helper Number of threads for the construct.
/// \param CaptureRegion Innermost OpenMP region where expressions in this
/// clause must be captured.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPNumThreadsClause(Expr *NumThreads, Stmt *HelperNumThreads,
OpenMPDirectiveKind CaptureRegion,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(OMPC_num_threads, StartLoc, EndLoc),
OMPClauseWithPreInit(this), LParenLoc(LParenLoc),
NumThreads(NumThreads) {
setPreInitStmt(HelperNumThreads, CaptureRegion);
}
/// Build an empty clause.
OMPNumThreadsClause()
: OMPClause(OMPC_num_threads, SourceLocation(), SourceLocation()),
OMPClauseWithPreInit(this) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Returns number of threads.
Expr *getNumThreads() const { return cast_or_null<Expr>(NumThreads); }
child_range children() { return child_range(&NumThreads, &NumThreads + 1); }
const_child_range children() const {
return const_child_range(&NumThreads, &NumThreads + 1);
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_num_threads;
}
};
/// This represents 'safelen' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp simd safelen(4)
/// \endcode
/// In this example directive '#pragma omp simd' has clause 'safelen'
/// with single expression '4'.
/// If the safelen clause is used then no two iterations executed
/// concurrently with SIMD instructions can have a greater distance
/// in the logical iteration space than its value. The parameter of
/// the safelen clause must be a constant positive integer expression.
class OMPSafelenClause : public OMPClause {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// Safe iteration space distance.
Stmt *Safelen = nullptr;
/// Set safelen.
void setSafelen(Expr *Len) { Safelen = Len; }
public:
/// Build 'safelen' clause.
///
/// \param Len Expression associated with this clause.
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPSafelenClause(Expr *Len, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(OMPC_safelen, StartLoc, EndLoc), LParenLoc(LParenLoc),
Safelen(Len) {}
/// Build an empty clause.
explicit OMPSafelenClause()
: OMPClause(OMPC_safelen, SourceLocation(), SourceLocation()) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Return safe iteration space distance.
Expr *getSafelen() const { return cast_or_null<Expr>(Safelen); }
child_range children() { return child_range(&Safelen, &Safelen + 1); }
const_child_range children() const {
return const_child_range(&Safelen, &Safelen + 1);
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_safelen;
}
};
/// This represents 'simdlen' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp simd simdlen(4)
/// \endcode
/// In this example directive '#pragma omp simd' has clause 'simdlen'
/// with single expression '4'.
/// If the 'simdlen' clause is used then it specifies the preferred number of
/// iterations to be executed concurrently. The parameter of the 'simdlen'
/// clause must be a constant positive integer expression.
class OMPSimdlenClause : public OMPClause {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// Safe iteration space distance.
Stmt *Simdlen = nullptr;
/// Set simdlen.
void setSimdlen(Expr *Len) { Simdlen = Len; }
public:
/// Build 'simdlen' clause.
///
/// \param Len Expression associated with this clause.
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPSimdlenClause(Expr *Len, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(OMPC_simdlen, StartLoc, EndLoc), LParenLoc(LParenLoc),
Simdlen(Len) {}
/// Build an empty clause.
explicit OMPSimdlenClause()
: OMPClause(OMPC_simdlen, SourceLocation(), SourceLocation()) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Return safe iteration space distance.
Expr *getSimdlen() const { return cast_or_null<Expr>(Simdlen); }
child_range children() { return child_range(&Simdlen, &Simdlen + 1); }
const_child_range children() const {
return const_child_range(&Simdlen, &Simdlen + 1);
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_simdlen;
}
};
/// This represents 'collapse' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp simd collapse(3)
/// \endcode
/// In this example directive '#pragma omp simd' has clause 'collapse'
/// with single expression '3'.
/// The parameter must be a constant positive integer expression, it specifies
/// the number of nested loops that should be collapsed into a single iteration
/// space.
class OMPCollapseClause : public OMPClause {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// Number of for-loops.
Stmt *NumForLoops = nullptr;
/// Set the number of associated for-loops.
void setNumForLoops(Expr *Num) { NumForLoops = Num; }
public:
/// Build 'collapse' clause.
///
/// \param Num Expression associated with this clause.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPCollapseClause(Expr *Num, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc)
: OMPClause(OMPC_collapse, StartLoc, EndLoc), LParenLoc(LParenLoc),
NumForLoops(Num) {}
/// Build an empty clause.
explicit OMPCollapseClause()
: OMPClause(OMPC_collapse, SourceLocation(), SourceLocation()) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Return the number of associated for-loops.
Expr *getNumForLoops() const { return cast_or_null<Expr>(NumForLoops); }
child_range children() { return child_range(&NumForLoops, &NumForLoops + 1); }
const_child_range children() const {
return const_child_range(&NumForLoops, &NumForLoops + 1);
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_collapse;
}
};
/// This represents 'default' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp parallel default(shared)
/// \endcode
/// In this example directive '#pragma omp parallel' has simple 'default'
/// clause with kind 'shared'.
class OMPDefaultClause : public OMPClause {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// A kind of the 'default' clause.
OpenMPDefaultClauseKind Kind = OMPC_DEFAULT_unknown;
/// Start location of the kind in source code.
SourceLocation KindKwLoc;
/// Set kind of the clauses.
///
/// \param K Argument of clause.
void setDefaultKind(OpenMPDefaultClauseKind K) { Kind = K; }
/// Set argument location.
///
/// \param KLoc Argument location.
void setDefaultKindKwLoc(SourceLocation KLoc) { KindKwLoc = KLoc; }
public:
/// Build 'default' clause with argument \a A ('none' or 'shared').
///
/// \param A Argument of the clause ('none' or 'shared').
/// \param ALoc Starting location of the argument.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPDefaultClause(OpenMPDefaultClauseKind A, SourceLocation ALoc,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(OMPC_default, StartLoc, EndLoc), LParenLoc(LParenLoc),
Kind(A), KindKwLoc(ALoc) {}
/// Build an empty clause.
OMPDefaultClause()
: OMPClause(OMPC_default, SourceLocation(), SourceLocation()) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Returns kind of the clause.
OpenMPDefaultClauseKind getDefaultKind() const { return Kind; }
/// Returns location of clause kind.
SourceLocation getDefaultKindKwLoc() const { return KindKwLoc; }
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_default;
}
};
/// This represents 'proc_bind' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp parallel proc_bind(master)
/// \endcode
/// In this example directive '#pragma omp parallel' has simple 'proc_bind'
/// clause with kind 'master'.
class OMPProcBindClause : public OMPClause {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// A kind of the 'proc_bind' clause.
OpenMPProcBindClauseKind Kind = OMPC_PROC_BIND_unknown;
/// Start location of the kind in source code.
SourceLocation KindKwLoc;
/// Set kind of the clause.
///
/// \param K Kind of clause.
void setProcBindKind(OpenMPProcBindClauseKind K) { Kind = K; }
/// Set clause kind location.
///
/// \param KLoc Kind location.
void setProcBindKindKwLoc(SourceLocation KLoc) { KindKwLoc = KLoc; }
public:
/// Build 'proc_bind' clause with argument \a A ('master', 'close' or
/// 'spread').
///
/// \param A Argument of the clause ('master', 'close' or 'spread').
/// \param ALoc Starting location of the argument.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPProcBindClause(OpenMPProcBindClauseKind A, SourceLocation ALoc,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(OMPC_proc_bind, StartLoc, EndLoc), LParenLoc(LParenLoc),
Kind(A), KindKwLoc(ALoc) {}
/// Build an empty clause.
OMPProcBindClause()
: OMPClause(OMPC_proc_bind, SourceLocation(), SourceLocation()) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Returns kind of the clause.
OpenMPProcBindClauseKind getProcBindKind() const { return Kind; }
/// Returns location of clause kind.
SourceLocation getProcBindKindKwLoc() const { return KindKwLoc; }
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_proc_bind;
}
};
/// This represents 'unified_address' clause in the '#pragma omp requires'
/// directive.
///
/// \code
/// #pragma omp requires unified_address
/// \endcode
/// In this example directive '#pragma omp requires' has 'unified_address'
/// clause.
class OMPUnifiedAddressClause final : public OMPClause {
public:
friend class OMPClauseReader;
/// Build 'unified_address' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPUnifiedAddressClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_unified_address, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPUnifiedAddressClause()
: OMPClause(OMPC_unified_address, SourceLocation(), SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_unified_address;
}
};
/// This represents 'unified_shared_memory' clause in the '#pragma omp requires'
/// directive.
///
/// \code
/// #pragma omp requires unified_shared_memory
/// \endcode
/// In this example directive '#pragma omp requires' has 'unified_shared_memory'
/// clause.
class OMPUnifiedSharedMemoryClause final : public OMPClause {
public:
friend class OMPClauseReader;
/// Build 'unified_shared_memory' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPUnifiedSharedMemoryClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_unified_shared_memory, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPUnifiedSharedMemoryClause()
: OMPClause(OMPC_unified_shared_memory, SourceLocation(), SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_unified_shared_memory;
}
};
/// This represents 'reverse_offload' clause in the '#pragma omp requires'
/// directive.
///
/// \code
/// #pragma omp requires reverse_offload
/// \endcode
/// In this example directive '#pragma omp requires' has 'reverse_offload'
/// clause.
class OMPReverseOffloadClause final : public OMPClause {
public:
friend class OMPClauseReader;
/// Build 'reverse_offload' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPReverseOffloadClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_reverse_offload, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPReverseOffloadClause()
: OMPClause(OMPC_reverse_offload, SourceLocation(), SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_reverse_offload;
}
};
/// This represents 'dynamic_allocators' clause in the '#pragma omp requires'
/// directive.
///
/// \code
/// #pragma omp requires dynamic_allocators
/// \endcode
/// In this example directive '#pragma omp requires' has 'dynamic_allocators'
/// clause.
class OMPDynamicAllocatorsClause final : public OMPClause {
public:
friend class OMPClauseReader;
/// Build 'dynamic_allocators' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPDynamicAllocatorsClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_dynamic_allocators, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPDynamicAllocatorsClause()
: OMPClause(OMPC_dynamic_allocators, SourceLocation(), SourceLocation()) {
}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_dynamic_allocators;
}
};
/// This represents 'atomic_default_mem_order' clause in the '#pragma omp
/// requires' directive.
///
/// \code
/// #pragma omp requires atomic_default_mem_order(seq_cst)
/// \endcode
/// In this example directive '#pragma omp requires' has simple
/// atomic_default_mem_order' clause with kind 'seq_cst'.
class OMPAtomicDefaultMemOrderClause final : public OMPClause {
friend class OMPClauseReader;
/// Location of '('
SourceLocation LParenLoc;
/// A kind of the 'atomic_default_mem_order' clause.
OpenMPAtomicDefaultMemOrderClauseKind Kind =
OMPC_ATOMIC_DEFAULT_MEM_ORDER_unknown;
/// Start location of the kind in source code.
SourceLocation KindKwLoc;
/// Set kind of the clause.
///
/// \param K Kind of clause.
void setAtomicDefaultMemOrderKind(OpenMPAtomicDefaultMemOrderClauseKind K) {
Kind = K;
}
/// Set clause kind location.
///
/// \param KLoc Kind location.
void setAtomicDefaultMemOrderKindKwLoc(SourceLocation KLoc) {
KindKwLoc = KLoc;
}
public:
/// Build 'atomic_default_mem_order' clause with argument \a A ('seq_cst',
/// 'acq_rel' or 'relaxed').
///
/// \param A Argument of the clause ('seq_cst', 'acq_rel' or 'relaxed').
/// \param ALoc Starting location of the argument.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPAtomicDefaultMemOrderClause(OpenMPAtomicDefaultMemOrderClauseKind A,
SourceLocation ALoc, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(OMPC_atomic_default_mem_order, StartLoc, EndLoc),
LParenLoc(LParenLoc), Kind(A), KindKwLoc(ALoc) {}
/// Build an empty clause.
OMPAtomicDefaultMemOrderClause()
: OMPClause(OMPC_atomic_default_mem_order, SourceLocation(),
SourceLocation()) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the locaiton of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Returns kind of the clause.
OpenMPAtomicDefaultMemOrderClauseKind getAtomicDefaultMemOrderKind() const {
return Kind;
}
/// Returns location of clause kind.
SourceLocation getAtomicDefaultMemOrderKindKwLoc() const { return KindKwLoc; }
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_atomic_default_mem_order;
}
};
/// This represents 'schedule' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp for schedule(static, 3)
/// \endcode
/// In this example directive '#pragma omp for' has 'schedule' clause with
/// arguments 'static' and '3'.
class OMPScheduleClause : public OMPClause, public OMPClauseWithPreInit {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// A kind of the 'schedule' clause.
OpenMPScheduleClauseKind Kind = OMPC_SCHEDULE_unknown;
/// Modifiers for 'schedule' clause.
enum {FIRST, SECOND, NUM_MODIFIERS};
OpenMPScheduleClauseModifier Modifiers[NUM_MODIFIERS];
/// Locations of modifiers.
SourceLocation ModifiersLoc[NUM_MODIFIERS];
/// Start location of the schedule ind in source code.
SourceLocation KindLoc;
/// Location of ',' (if any).
SourceLocation CommaLoc;
/// Chunk size.
Expr *ChunkSize = nullptr;
/// Set schedule kind.
///
/// \param K Schedule kind.
void setScheduleKind(OpenMPScheduleClauseKind K) { Kind = K; }
/// Set the first schedule modifier.
///
/// \param M Schedule modifier.
void setFirstScheduleModifier(OpenMPScheduleClauseModifier M) {
Modifiers[FIRST] = M;
}
/// Set the second schedule modifier.
///
/// \param M Schedule modifier.
void setSecondScheduleModifier(OpenMPScheduleClauseModifier M) {
Modifiers[SECOND] = M;
}
/// Set location of the first schedule modifier.
void setFirstScheduleModifierLoc(SourceLocation Loc) {
ModifiersLoc[FIRST] = Loc;
}
/// Set location of the second schedule modifier.
void setSecondScheduleModifierLoc(SourceLocation Loc) {
ModifiersLoc[SECOND] = Loc;
}
/// Set schedule modifier location.
///
/// \param M Schedule modifier location.
void setScheduleModifer(OpenMPScheduleClauseModifier M) {
if (Modifiers[FIRST] == OMPC_SCHEDULE_MODIFIER_unknown)
Modifiers[FIRST] = M;
else {
assert(Modifiers[SECOND] == OMPC_SCHEDULE_MODIFIER_unknown);
Modifiers[SECOND] = M;
}
}
/// Sets the location of '('.
///
/// \param Loc Location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Set schedule kind start location.
///
/// \param KLoc Schedule kind location.
void setScheduleKindLoc(SourceLocation KLoc) { KindLoc = KLoc; }
/// Set location of ','.
///
/// \param Loc Location of ','.
void setCommaLoc(SourceLocation Loc) { CommaLoc = Loc; }
/// Set chunk size.
///
/// \param E Chunk size.
void setChunkSize(Expr *E) { ChunkSize = E; }
public:
/// Build 'schedule' clause with schedule kind \a Kind and chunk size
/// expression \a ChunkSize.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param KLoc Starting location of the argument.
/// \param CommaLoc Location of ','.
/// \param EndLoc Ending location of the clause.
/// \param Kind Schedule kind.
/// \param ChunkSize Chunk size.
/// \param HelperChunkSize Helper chunk size for combined directives.
/// \param M1 The first modifier applied to 'schedule' clause.
/// \param M1Loc Location of the first modifier
/// \param M2 The second modifier applied to 'schedule' clause.
/// \param M2Loc Location of the second modifier
OMPScheduleClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation KLoc, SourceLocation CommaLoc,
SourceLocation EndLoc, OpenMPScheduleClauseKind Kind,
Expr *ChunkSize, Stmt *HelperChunkSize,
OpenMPScheduleClauseModifier M1, SourceLocation M1Loc,
OpenMPScheduleClauseModifier M2, SourceLocation M2Loc)
: OMPClause(OMPC_schedule, StartLoc, EndLoc), OMPClauseWithPreInit(this),
LParenLoc(LParenLoc), Kind(Kind), KindLoc(KLoc), CommaLoc(CommaLoc),
ChunkSize(ChunkSize) {
setPreInitStmt(HelperChunkSize);
Modifiers[FIRST] = M1;
Modifiers[SECOND] = M2;
ModifiersLoc[FIRST] = M1Loc;
ModifiersLoc[SECOND] = M2Loc;
}
/// Build an empty clause.
explicit OMPScheduleClause()
: OMPClause(OMPC_schedule, SourceLocation(), SourceLocation()),
OMPClauseWithPreInit(this) {
Modifiers[FIRST] = OMPC_SCHEDULE_MODIFIER_unknown;
Modifiers[SECOND] = OMPC_SCHEDULE_MODIFIER_unknown;
}
/// Get kind of the clause.
OpenMPScheduleClauseKind getScheduleKind() const { return Kind; }
/// Get the first modifier of the clause.
OpenMPScheduleClauseModifier getFirstScheduleModifier() const {
return Modifiers[FIRST];
}
/// Get the second modifier of the clause.
OpenMPScheduleClauseModifier getSecondScheduleModifier() const {
return Modifiers[SECOND];
}
/// Get location of '('.
SourceLocation getLParenLoc() { return LParenLoc; }
/// Get kind location.
SourceLocation getScheduleKindLoc() { return KindLoc; }
/// Get the first modifier location.
SourceLocation getFirstScheduleModifierLoc() const {
return ModifiersLoc[FIRST];
}
/// Get the second modifier location.
SourceLocation getSecondScheduleModifierLoc() const {
return ModifiersLoc[SECOND];
}
/// Get location of ','.
SourceLocation getCommaLoc() { return CommaLoc; }
/// Get chunk size.
Expr *getChunkSize() { return ChunkSize; }
/// Get chunk size.
const Expr *getChunkSize() const { return ChunkSize; }
child_range children() {
return child_range(reinterpret_cast<Stmt **>(&ChunkSize),
reinterpret_cast<Stmt **>(&ChunkSize) + 1);
}
const_child_range children() const {
auto Children = const_cast<OMPScheduleClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_schedule;
}
};
/// This represents 'ordered' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp for ordered (2)
/// \endcode
/// In this example directive '#pragma omp for' has 'ordered' clause with
/// parameter 2.
class OMPOrderedClause final
: public OMPClause,
private llvm::TrailingObjects<OMPOrderedClause, Expr *> {
friend class OMPClauseReader;
friend TrailingObjects;
/// Location of '('.
SourceLocation LParenLoc;
/// Number of for-loops.
Stmt *NumForLoops = nullptr;
/// Real number of loops.
unsigned NumberOfLoops = 0;
/// Build 'ordered' clause.
///
/// \param Num Expression, possibly associated with this clause.
/// \param NumLoops Number of loops, associated with this clause.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPOrderedClause(Expr *Num, unsigned NumLoops, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc)
: OMPClause(OMPC_ordered, StartLoc, EndLoc), LParenLoc(LParenLoc),
NumForLoops(Num), NumberOfLoops(NumLoops) {}
/// Build an empty clause.
explicit OMPOrderedClause(unsigned NumLoops)
: OMPClause(OMPC_ordered, SourceLocation(), SourceLocation()),
NumberOfLoops(NumLoops) {}
/// Set the number of associated for-loops.
void setNumForLoops(Expr *Num) { NumForLoops = Num; }
public:
/// Build 'ordered' clause.
///
/// \param Num Expression, possibly associated with this clause.
/// \param NumLoops Number of loops, associated with this clause.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
static OMPOrderedClause *Create(const ASTContext &C, Expr *Num,
unsigned NumLoops, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Build an empty clause.
static OMPOrderedClause* CreateEmpty(const ASTContext &C, unsigned NumLoops);
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Return the number of associated for-loops.
Expr *getNumForLoops() const { return cast_or_null<Expr>(NumForLoops); }
/// Set number of iterations for the specified loop.
void setLoopNumIterations(unsigned NumLoop, Expr *NumIterations);
/// Get number of iterations for all the loops.
ArrayRef<Expr *> getLoopNumIterations() const;
/// Set loop counter for the specified loop.
void setLoopCounter(unsigned NumLoop, Expr *Counter);
/// Get loops counter for the specified loop.
Expr *getLoopCounter(unsigned NumLoop);
const Expr *getLoopCounter(unsigned NumLoop) const;
child_range children() { return child_range(&NumForLoops, &NumForLoops + 1); }
const_child_range children() const {
return const_child_range(&NumForLoops, &NumForLoops + 1);
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_ordered;
}
};
/// This represents 'nowait' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp for nowait
/// \endcode
/// In this example directive '#pragma omp for' has 'nowait' clause.
class OMPNowaitClause : public OMPClause {
public:
/// Build 'nowait' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPNowaitClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_nowait, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPNowaitClause()
: OMPClause(OMPC_nowait, SourceLocation(), SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_nowait;
}
};
/// This represents 'untied' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp task untied
/// \endcode
/// In this example directive '#pragma omp task' has 'untied' clause.
class OMPUntiedClause : public OMPClause {
public:
/// Build 'untied' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPUntiedClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_untied, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPUntiedClause()
: OMPClause(OMPC_untied, SourceLocation(), SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_untied;
}
};
/// This represents 'mergeable' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp task mergeable
/// \endcode
/// In this example directive '#pragma omp task' has 'mergeable' clause.
class OMPMergeableClause : public OMPClause {
public:
/// Build 'mergeable' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPMergeableClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_mergeable, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPMergeableClause()
: OMPClause(OMPC_mergeable, SourceLocation(), SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_mergeable;
}
};
/// This represents 'read' clause in the '#pragma omp atomic' directive.
///
/// \code
/// #pragma omp atomic read
/// \endcode
/// In this example directive '#pragma omp atomic' has 'read' clause.
class OMPReadClause : public OMPClause {
public:
/// Build 'read' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPReadClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_read, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPReadClause() : OMPClause(OMPC_read, SourceLocation(), SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_read;
}
};
/// This represents 'write' clause in the '#pragma omp atomic' directive.
///
/// \code
/// #pragma omp atomic write
/// \endcode
/// In this example directive '#pragma omp atomic' has 'write' clause.
class OMPWriteClause : public OMPClause {
public:
/// Build 'write' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPWriteClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_write, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPWriteClause()
: OMPClause(OMPC_write, SourceLocation(), SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_write;
}
};
/// This represents 'update' clause in the '#pragma omp atomic'
/// directive.
///
/// \code
/// #pragma omp atomic update
/// \endcode
/// In this example directive '#pragma omp atomic' has 'update' clause.
class OMPUpdateClause : public OMPClause {
public:
/// Build 'update' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPUpdateClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_update, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPUpdateClause()
: OMPClause(OMPC_update, SourceLocation(), SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_update;
}
};
/// This represents 'capture' clause in the '#pragma omp atomic'
/// directive.
///
/// \code
/// #pragma omp atomic capture
/// \endcode
/// In this example directive '#pragma omp atomic' has 'capture' clause.
class OMPCaptureClause : public OMPClause {
public:
/// Build 'capture' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPCaptureClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_capture, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPCaptureClause()
: OMPClause(OMPC_capture, SourceLocation(), SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_capture;
}
};
/// This represents 'seq_cst' clause in the '#pragma omp atomic'
/// directive.
///
/// \code
/// #pragma omp atomic seq_cst
/// \endcode
/// In this example directive '#pragma omp atomic' has 'seq_cst' clause.
class OMPSeqCstClause : public OMPClause {
public:
/// Build 'seq_cst' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPSeqCstClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_seq_cst, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPSeqCstClause()
: OMPClause(OMPC_seq_cst, SourceLocation(), SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_seq_cst;
}
};
/// This represents clause 'private' in the '#pragma omp ...' directives.
///
/// \code
/// #pragma omp parallel private(a,b)
/// \endcode
/// In this example directive '#pragma omp parallel' has clause 'private'
/// with the variables 'a' and 'b'.
class OMPPrivateClause final
: public OMPVarListClause<OMPPrivateClause>,
private llvm::TrailingObjects<OMPPrivateClause, Expr *> {
friend class OMPClauseReader;
friend OMPVarListClause;
friend TrailingObjects;
/// Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
OMPPrivateClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned N)
: OMPVarListClause<OMPPrivateClause>(OMPC_private, StartLoc, LParenLoc,
EndLoc, N) {}
/// Build an empty clause.
///
/// \param N Number of variables.
explicit OMPPrivateClause(unsigned N)
: OMPVarListClause<OMPPrivateClause>(OMPC_private, SourceLocation(),
SourceLocation(), SourceLocation(),
N) {}
/// Sets the list of references to private copies with initializers for
/// new private variables.
/// \param VL List of references.
void setPrivateCopies(ArrayRef<Expr *> VL);
/// Gets the list of references to private copies with initializers for
/// new private variables.
MutableArrayRef<Expr *> getPrivateCopies() {
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getPrivateCopies() const {
return llvm::makeArrayRef(varlist_end(), varlist_size());
}
public:
/// Creates clause with a list of variables \a VL.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the variables.
/// \param PrivateVL List of references to private copies with initializers.
static OMPPrivateClause *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc, ArrayRef<Expr *> VL,
ArrayRef<Expr *> PrivateVL);
/// Creates an empty clause with the place for \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
static OMPPrivateClause *CreateEmpty(const ASTContext &C, unsigned N);
using private_copies_iterator = MutableArrayRef<Expr *>::iterator;
using private_copies_const_iterator = ArrayRef<const Expr *>::iterator;
using private_copies_range = llvm::iterator_range<private_copies_iterator>;
using private_copies_const_range =
llvm::iterator_range<private_copies_const_iterator>;
private_copies_range private_copies() {
return private_copies_range(getPrivateCopies().begin(),
getPrivateCopies().end());
}
private_copies_const_range private_copies() const {
return private_copies_const_range(getPrivateCopies().begin(),
getPrivateCopies().end());
}
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPPrivateClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_private;
}
};
/// This represents clause 'firstprivate' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp parallel firstprivate(a,b)
/// \endcode
/// In this example directive '#pragma omp parallel' has clause 'firstprivate'
/// with the variables 'a' and 'b'.
class OMPFirstprivateClause final
: public OMPVarListClause<OMPFirstprivateClause>,
public OMPClauseWithPreInit,
private llvm::TrailingObjects<OMPFirstprivateClause, Expr *> {
friend class OMPClauseReader;
friend OMPVarListClause;
friend TrailingObjects;
/// Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
OMPFirstprivateClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned N)
: OMPVarListClause<OMPFirstprivateClause>(OMPC_firstprivate, StartLoc,
LParenLoc, EndLoc, N),
OMPClauseWithPreInit(this) {}
/// Build an empty clause.
///
/// \param N Number of variables.
explicit OMPFirstprivateClause(unsigned N)
: OMPVarListClause<OMPFirstprivateClause>(
OMPC_firstprivate, SourceLocation(), SourceLocation(),
SourceLocation(), N),
OMPClauseWithPreInit(this) {}
/// Sets the list of references to private copies with initializers for
/// new private variables.
/// \param VL List of references.
void setPrivateCopies(ArrayRef<Expr *> VL);
/// Gets the list of references to private copies with initializers for
/// new private variables.
MutableArrayRef<Expr *> getPrivateCopies() {
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getPrivateCopies() const {
return llvm::makeArrayRef(varlist_end(), varlist_size());
}
/// Sets the list of references to initializer variables for new
/// private variables.
/// \param VL List of references.
void setInits(ArrayRef<Expr *> VL);
/// Gets the list of references to initializer variables for new
/// private variables.
MutableArrayRef<Expr *> getInits() {
return MutableArrayRef<Expr *>(getPrivateCopies().end(), varlist_size());
}
ArrayRef<const Expr *> getInits() const {
return llvm::makeArrayRef(getPrivateCopies().end(), varlist_size());
}
public:
/// Creates clause with a list of variables \a VL.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the original variables.
/// \param PrivateVL List of references to private copies with initializers.
/// \param InitVL List of references to auto generated variables used for
/// initialization of a single array element. Used if firstprivate variable is
/// of array type.
/// \param PreInit Statement that must be executed before entering the OpenMP
/// region with this clause.
static OMPFirstprivateClause *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> PrivateVL,
ArrayRef<Expr *> InitVL, Stmt *PreInit);
/// Creates an empty clause with the place for \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
static OMPFirstprivateClause *CreateEmpty(const ASTContext &C, unsigned N);
using private_copies_iterator = MutableArrayRef<Expr *>::iterator;
using private_copies_const_iterator = ArrayRef<const Expr *>::iterator;
using private_copies_range = llvm::iterator_range<private_copies_iterator>;
using private_copies_const_range =
llvm::iterator_range<private_copies_const_iterator>;
private_copies_range private_copies() {
return private_copies_range(getPrivateCopies().begin(),
getPrivateCopies().end());
}
private_copies_const_range private_copies() const {
return private_copies_const_range(getPrivateCopies().begin(),
getPrivateCopies().end());
}
using inits_iterator = MutableArrayRef<Expr *>::iterator;
using inits_const_iterator = ArrayRef<const Expr *>::iterator;
using inits_range = llvm::iterator_range<inits_iterator>;
using inits_const_range = llvm::iterator_range<inits_const_iterator>;
inits_range inits() {
return inits_range(getInits().begin(), getInits().end());
}
inits_const_range inits() const {
return inits_const_range(getInits().begin(), getInits().end());
}
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPFirstprivateClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_firstprivate;
}
};
/// This represents clause 'lastprivate' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp simd lastprivate(a,b)
/// \endcode
/// In this example directive '#pragma omp simd' has clause 'lastprivate'
/// with the variables 'a' and 'b'.
class OMPLastprivateClause final
: public OMPVarListClause<OMPLastprivateClause>,
public OMPClauseWithPostUpdate,
private llvm::TrailingObjects<OMPLastprivateClause, Expr *> {
// There are 4 additional tail-allocated arrays at the end of the class:
// 1. Contains list of pseudo variables with the default initialization for
// each non-firstprivate variables. Used in codegen for initialization of
// lastprivate copies.
// 2. List of helper expressions for proper generation of assignment operation
// required for lastprivate clause. This list represents private variables
// (for arrays, single array element).
// 3. List of helper expressions for proper generation of assignment operation
// required for lastprivate clause. This list represents original variables
// (for arrays, single array element).
// 4. List of helper expressions that represents assignment operation:
// \code
// DstExprs = SrcExprs;
// \endcode
// Required for proper codegen of final assignment performed by the
// lastprivate clause.
friend class OMPClauseReader;
friend OMPVarListClause;
friend TrailingObjects;
/// Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
OMPLastprivateClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned N)
: OMPVarListClause<OMPLastprivateClause>(OMPC_lastprivate, StartLoc,
LParenLoc, EndLoc, N),
OMPClauseWithPostUpdate(this) {}
/// Build an empty clause.
///
/// \param N Number of variables.
explicit OMPLastprivateClause(unsigned N)
: OMPVarListClause<OMPLastprivateClause>(
OMPC_lastprivate, SourceLocation(), SourceLocation(),
SourceLocation(), N),
OMPClauseWithPostUpdate(this) {}
/// Get the list of helper expressions for initialization of private
/// copies for lastprivate variables.
MutableArrayRef<Expr *> getPrivateCopies() {
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getPrivateCopies() const {
return llvm::makeArrayRef(varlist_end(), varlist_size());
}
/// Set list of helper expressions, required for proper codegen of the
/// clause. These expressions represent private variables (for arrays, single
/// array element) in the final assignment statement performed by the
/// lastprivate clause.
void setSourceExprs(ArrayRef<Expr *> SrcExprs);
/// Get the list of helper source expressions.
MutableArrayRef<Expr *> getSourceExprs() {
return MutableArrayRef<Expr *>(getPrivateCopies().end(), varlist_size());
}
ArrayRef<const Expr *> getSourceExprs() const {
return llvm::makeArrayRef(getPrivateCopies().end(), varlist_size());
}
/// Set list of helper expressions, required for proper codegen of the
/// clause. These expressions represent original variables (for arrays, single
/// array element) in the final assignment statement performed by the
/// lastprivate clause.
void setDestinationExprs(ArrayRef<Expr *> DstExprs);
/// Get the list of helper destination expressions.
MutableArrayRef<Expr *> getDestinationExprs() {
return MutableArrayRef<Expr *>(getSourceExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getDestinationExprs() const {
return llvm::makeArrayRef(getSourceExprs().end(), varlist_size());
}
/// Set list of helper assignment expressions, required for proper
/// codegen of the clause. These expressions are assignment expressions that
/// assign private copy of the variable to original variable.
void setAssignmentOps(ArrayRef<Expr *> AssignmentOps);
/// Get the list of helper assignment expressions.
MutableArrayRef<Expr *> getAssignmentOps() {
return MutableArrayRef<Expr *>(getDestinationExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getAssignmentOps() const {
return llvm::makeArrayRef(getDestinationExprs().end(), varlist_size());
}
public:
/// Creates clause with a list of variables \a VL.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the variables.
/// \param SrcExprs List of helper expressions for proper generation of
/// assignment operation required for lastprivate clause. This list represents
/// private variables (for arrays, single array element).
/// \param DstExprs List of helper expressions for proper generation of
/// assignment operation required for lastprivate clause. This list represents
/// original variables (for arrays, single array element).
/// \param AssignmentOps List of helper expressions that represents assignment
/// operation:
/// \code
/// DstExprs = SrcExprs;
/// \endcode
/// Required for proper codegen of final assignment performed by the
/// lastprivate clause.
/// \param PreInit Statement that must be executed before entering the OpenMP
/// region with this clause.
/// \param PostUpdate Expression that must be executed after exit from the
/// OpenMP region with this clause.
static OMPLastprivateClause *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs,
ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps,
Stmt *PreInit, Expr *PostUpdate);
/// Creates an empty clause with the place for \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
static OMPLastprivateClause *CreateEmpty(const ASTContext &C, unsigned N);
using helper_expr_iterator = MutableArrayRef<Expr *>::iterator;
using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator;
using helper_expr_range = llvm::iterator_range<helper_expr_iterator>;
using helper_expr_const_range =
llvm::iterator_range<helper_expr_const_iterator>;
/// Set list of helper expressions, required for generation of private
/// copies of original lastprivate variables.
void setPrivateCopies(ArrayRef<Expr *> PrivateCopies);
helper_expr_const_range private_copies() const {
return helper_expr_const_range(getPrivateCopies().begin(),
getPrivateCopies().end());
}
helper_expr_range private_copies() {
return helper_expr_range(getPrivateCopies().begin(),
getPrivateCopies().end());
}
helper_expr_const_range source_exprs() const {
return helper_expr_const_range(getSourceExprs().begin(),
getSourceExprs().end());
}
helper_expr_range source_exprs() {
return helper_expr_range(getSourceExprs().begin(), getSourceExprs().end());
}
helper_expr_const_range destination_exprs() const {
return helper_expr_const_range(getDestinationExprs().begin(),
getDestinationExprs().end());
}
helper_expr_range destination_exprs() {
return helper_expr_range(getDestinationExprs().begin(),
getDestinationExprs().end());
}
helper_expr_const_range assignment_ops() const {
return helper_expr_const_range(getAssignmentOps().begin(),
getAssignmentOps().end());
}
helper_expr_range assignment_ops() {
return helper_expr_range(getAssignmentOps().begin(),
getAssignmentOps().end());
}
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPLastprivateClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_lastprivate;
}
};
/// This represents clause 'shared' in the '#pragma omp ...' directives.
///
/// \code
/// #pragma omp parallel shared(a,b)
/// \endcode
/// In this example directive '#pragma omp parallel' has clause 'shared'
/// with the variables 'a' and 'b'.
class OMPSharedClause final
: public OMPVarListClause<OMPSharedClause>,
private llvm::TrailingObjects<OMPSharedClause, Expr *> {
friend OMPVarListClause;
friend TrailingObjects;
/// Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
OMPSharedClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned N)
: OMPVarListClause<OMPSharedClause>(OMPC_shared, StartLoc, LParenLoc,
EndLoc, N) {}
/// Build an empty clause.
///
/// \param N Number of variables.
explicit OMPSharedClause(unsigned N)
: OMPVarListClause<OMPSharedClause>(OMPC_shared, SourceLocation(),
SourceLocation(), SourceLocation(),
N) {}
public:
/// Creates clause with a list of variables \a VL.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the variables.
static OMPSharedClause *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc, ArrayRef<Expr *> VL);
/// Creates an empty clause with \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
static OMPSharedClause *CreateEmpty(const ASTContext &C, unsigned N);
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPSharedClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_shared;
}
};
/// This represents clause 'reduction' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp parallel reduction(+:a,b)
/// \endcode
/// In this example directive '#pragma omp parallel' has clause 'reduction'
/// with operator '+' and the variables 'a' and 'b'.
class OMPReductionClause final
: public OMPVarListClause<OMPReductionClause>,
public OMPClauseWithPostUpdate,
private llvm::TrailingObjects<OMPReductionClause, Expr *> {
friend class OMPClauseReader;
friend OMPVarListClause;
friend TrailingObjects;
/// Location of ':'.
SourceLocation ColonLoc;
/// Nested name specifier for C++.
NestedNameSpecifierLoc QualifierLoc;
/// Name of custom operator.
DeclarationNameInfo NameInfo;
/// Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param ColonLoc Location of ':'.
/// \param N Number of the variables in the clause.
/// \param QualifierLoc The nested-name qualifier with location information
/// \param NameInfo The full name info for reduction identifier.
OMPReductionClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ColonLoc, SourceLocation EndLoc, unsigned N,
NestedNameSpecifierLoc QualifierLoc,
const DeclarationNameInfo &NameInfo)
: OMPVarListClause<OMPReductionClause>(OMPC_reduction, StartLoc,
LParenLoc, EndLoc, N),
OMPClauseWithPostUpdate(this), ColonLoc(ColonLoc),
QualifierLoc(QualifierLoc), NameInfo(NameInfo) {}
/// Build an empty clause.
///
/// \param N Number of variables.
explicit OMPReductionClause(unsigned N)
: OMPVarListClause<OMPReductionClause>(OMPC_reduction, SourceLocation(),
SourceLocation(), SourceLocation(),
N),
OMPClauseWithPostUpdate(this) {}
/// Sets location of ':' symbol in clause.
void setColonLoc(SourceLocation CL) { ColonLoc = CL; }
/// Sets the name info for specified reduction identifier.
void setNameInfo(DeclarationNameInfo DNI) { NameInfo = DNI; }
/// Sets the nested name specifier.
void setQualifierLoc(NestedNameSpecifierLoc NSL) { QualifierLoc = NSL; }
/// Set list of helper expressions, required for proper codegen of the
/// clause. These expressions represent private copy of the reduction
/// variable.
void setPrivates(ArrayRef<Expr *> Privates);
/// Get the list of helper privates.
MutableArrayRef<Expr *> getPrivates() {
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getPrivates() const {
return llvm::makeArrayRef(varlist_end(), varlist_size());
}
/// Set list of helper expressions, required for proper codegen of the
/// clause. These expressions represent LHS expression in the final
/// reduction expression performed by the reduction clause.
void setLHSExprs(ArrayRef<Expr *> LHSExprs);
/// Get the list of helper LHS expressions.
MutableArrayRef<Expr *> getLHSExprs() {
return MutableArrayRef<Expr *>(getPrivates().end(), varlist_size());
}
ArrayRef<const Expr *> getLHSExprs() const {
return llvm::makeArrayRef(getPrivates().end(), varlist_size());
}
/// Set list of helper expressions, required for proper codegen of the
/// clause. These expressions represent RHS expression in the final
/// reduction expression performed by the reduction clause.
/// Also, variables in these expressions are used for proper initialization of
/// reduction copies.
void setRHSExprs(ArrayRef<Expr *> RHSExprs);
/// Get the list of helper destination expressions.
MutableArrayRef<Expr *> getRHSExprs() {
return MutableArrayRef<Expr *>(getLHSExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getRHSExprs() const {
return llvm::makeArrayRef(getLHSExprs().end(), varlist_size());
}
/// Set list of helper reduction expressions, required for proper
/// codegen of the clause. These expressions are binary expressions or
/// operator/custom reduction call that calculates new value from source
/// helper expressions to destination helper expressions.
void setReductionOps(ArrayRef<Expr *> ReductionOps);
/// Get the list of helper reduction expressions.
MutableArrayRef<Expr *> getReductionOps() {
return MutableArrayRef<Expr *>(getRHSExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getReductionOps() const {
return llvm::makeArrayRef(getRHSExprs().end(), varlist_size());
}
public:
/// Creates clause with a list of variables \a VL.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param ColonLoc Location of ':'.
/// \param EndLoc Ending location of the clause.
/// \param VL The variables in the clause.
/// \param QualifierLoc The nested-name qualifier with location information
/// \param NameInfo The full name info for reduction identifier.
/// \param Privates List of helper expressions for proper generation of
/// private copies.
/// \param LHSExprs List of helper expressions for proper generation of
/// assignment operation required for copyprivate clause. This list represents
/// LHSs of the reduction expressions.
/// \param RHSExprs List of helper expressions for proper generation of
/// assignment operation required for copyprivate clause. This list represents
/// RHSs of the reduction expressions.
/// Also, variables in these expressions are used for proper initialization of
/// reduction copies.
/// \param ReductionOps List of helper expressions that represents reduction
/// expressions:
/// \code
/// LHSExprs binop RHSExprs;
/// operator binop(LHSExpr, RHSExpr);
/// <CutomReduction>(LHSExpr, RHSExpr);
/// \endcode
/// Required for proper codegen of final reduction operation performed by the
/// reduction clause.
/// \param PreInit Statement that must be executed before entering the OpenMP
/// region with this clause.
/// \param PostUpdate Expression that must be executed after exit from the
/// OpenMP region with this clause.
static OMPReductionClause *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL,
NestedNameSpecifierLoc QualifierLoc,
const DeclarationNameInfo &NameInfo, ArrayRef<Expr *> Privates,
ArrayRef<Expr *> LHSExprs, ArrayRef<Expr *> RHSExprs,
ArrayRef<Expr *> ReductionOps, Stmt *PreInit, Expr *PostUpdate);
/// Creates an empty clause with the place for \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
static OMPReductionClause *CreateEmpty(const ASTContext &C, unsigned N);
/// Gets location of ':' symbol in clause.
SourceLocation getColonLoc() const { return ColonLoc; }
/// Gets the name info for specified reduction identifier.
const DeclarationNameInfo &getNameInfo() const { return NameInfo; }
/// Gets the nested name specifier.
NestedNameSpecifierLoc getQualifierLoc() const { return QualifierLoc; }
using helper_expr_iterator = MutableArrayRef<Expr *>::iterator;
using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator;
using helper_expr_range = llvm::iterator_range<helper_expr_iterator>;
using helper_expr_const_range =
llvm::iterator_range<helper_expr_const_iterator>;
helper_expr_const_range privates() const {
return helper_expr_const_range(getPrivates().begin(), getPrivates().end());
}
helper_expr_range privates() {
return helper_expr_range(getPrivates().begin(), getPrivates().end());
}
helper_expr_const_range lhs_exprs() const {
return helper_expr_const_range(getLHSExprs().begin(), getLHSExprs().end());
}
helper_expr_range lhs_exprs() {
return helper_expr_range(getLHSExprs().begin(), getLHSExprs().end());
}
helper_expr_const_range rhs_exprs() const {
return helper_expr_const_range(getRHSExprs().begin(), getRHSExprs().end());
}
helper_expr_range rhs_exprs() {
return helper_expr_range(getRHSExprs().begin(), getRHSExprs().end());
}
helper_expr_const_range reduction_ops() const {
return helper_expr_const_range(getReductionOps().begin(),
getReductionOps().end());
}
helper_expr_range reduction_ops() {
return helper_expr_range(getReductionOps().begin(),
getReductionOps().end());
}
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPReductionClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_reduction;
}
};
/// This represents clause 'task_reduction' in the '#pragma omp taskgroup'
/// directives.
///
/// \code
/// #pragma omp taskgroup task_reduction(+:a,b)
/// \endcode
/// In this example directive '#pragma omp taskgroup' has clause
/// 'task_reduction' with operator '+' and the variables 'a' and 'b'.
class OMPTaskReductionClause final
: public OMPVarListClause<OMPTaskReductionClause>,
public OMPClauseWithPostUpdate,
private llvm::TrailingObjects<OMPTaskReductionClause, Expr *> {
friend class OMPClauseReader;
friend OMPVarListClause;
friend TrailingObjects;
/// Location of ':'.
SourceLocation ColonLoc;
/// Nested name specifier for C++.
NestedNameSpecifierLoc QualifierLoc;
/// Name of custom operator.
DeclarationNameInfo NameInfo;
/// Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param ColonLoc Location of ':'.
/// \param N Number of the variables in the clause.
/// \param QualifierLoc The nested-name qualifier with location information
/// \param NameInfo The full name info for reduction identifier.
OMPTaskReductionClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ColonLoc, SourceLocation EndLoc,
unsigned N, NestedNameSpecifierLoc QualifierLoc,
const DeclarationNameInfo &NameInfo)
: OMPVarListClause<OMPTaskReductionClause>(OMPC_task_reduction, StartLoc,
LParenLoc, EndLoc, N),
OMPClauseWithPostUpdate(this), ColonLoc(ColonLoc),
QualifierLoc(QualifierLoc), NameInfo(NameInfo) {}
/// Build an empty clause.
///
/// \param N Number of variables.
explicit OMPTaskReductionClause(unsigned N)
: OMPVarListClause<OMPTaskReductionClause>(
OMPC_task_reduction, SourceLocation(), SourceLocation(),
SourceLocation(), N),
OMPClauseWithPostUpdate(this) {}
/// Sets location of ':' symbol in clause.
void setColonLoc(SourceLocation CL) { ColonLoc = CL; }
/// Sets the name info for specified reduction identifier.
void setNameInfo(DeclarationNameInfo DNI) { NameInfo = DNI; }
/// Sets the nested name specifier.
void setQualifierLoc(NestedNameSpecifierLoc NSL) { QualifierLoc = NSL; }
/// Set list of helper expressions, required for proper codegen of the clause.
/// These expressions represent private copy of the reduction variable.
void setPrivates(ArrayRef<Expr *> Privates);
/// Get the list of helper privates.
MutableArrayRef<Expr *> getPrivates() {
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getPrivates() const {
return llvm::makeArrayRef(varlist_end(), varlist_size());
}
/// Set list of helper expressions, required for proper codegen of the clause.
/// These expressions represent LHS expression in the final reduction
/// expression performed by the reduction clause.
void setLHSExprs(ArrayRef<Expr *> LHSExprs);
/// Get the list of helper LHS expressions.
MutableArrayRef<Expr *> getLHSExprs() {
return MutableArrayRef<Expr *>(getPrivates().end(), varlist_size());
}
ArrayRef<const Expr *> getLHSExprs() const {
return llvm::makeArrayRef(getPrivates().end(), varlist_size());
}
/// Set list of helper expressions, required for proper codegen of the clause.
/// These expressions represent RHS expression in the final reduction
/// expression performed by the reduction clause. Also, variables in these
/// expressions are used for proper initialization of reduction copies.
void setRHSExprs(ArrayRef<Expr *> RHSExprs);
/// Get the list of helper destination expressions.
MutableArrayRef<Expr *> getRHSExprs() {
return MutableArrayRef<Expr *>(getLHSExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getRHSExprs() const {
return llvm::makeArrayRef(getLHSExprs().end(), varlist_size());
}
/// Set list of helper reduction expressions, required for proper
/// codegen of the clause. These expressions are binary expressions or
/// operator/custom reduction call that calculates new value from source
/// helper expressions to destination helper expressions.
void setReductionOps(ArrayRef<Expr *> ReductionOps);
/// Get the list of helper reduction expressions.
MutableArrayRef<Expr *> getReductionOps() {
return MutableArrayRef<Expr *>(getRHSExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getReductionOps() const {
return llvm::makeArrayRef(getRHSExprs().end(), varlist_size());
}
public:
/// Creates clause with a list of variables \a VL.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param ColonLoc Location of ':'.
/// \param EndLoc Ending location of the clause.
/// \param VL The variables in the clause.
/// \param QualifierLoc The nested-name qualifier with location information
/// \param NameInfo The full name info for reduction identifier.
/// \param Privates List of helper expressions for proper generation of
/// private copies.
/// \param LHSExprs List of helper expressions for proper generation of
/// assignment operation required for copyprivate clause. This list represents
/// LHSs of the reduction expressions.
/// \param RHSExprs List of helper expressions for proper generation of
/// assignment operation required for copyprivate clause. This list represents
/// RHSs of the reduction expressions.
/// Also, variables in these expressions are used for proper initialization of
/// reduction copies.
/// \param ReductionOps List of helper expressions that represents reduction
/// expressions:
/// \code
/// LHSExprs binop RHSExprs;
/// operator binop(LHSExpr, RHSExpr);
/// <CutomReduction>(LHSExpr, RHSExpr);
/// \endcode
/// Required for proper codegen of final reduction operation performed by the
/// reduction clause.
/// \param PreInit Statement that must be executed before entering the OpenMP
/// region with this clause.
/// \param PostUpdate Expression that must be executed after exit from the
/// OpenMP region with this clause.
static OMPTaskReductionClause *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL,
NestedNameSpecifierLoc QualifierLoc,
const DeclarationNameInfo &NameInfo, ArrayRef<Expr *> Privates,
ArrayRef<Expr *> LHSExprs, ArrayRef<Expr *> RHSExprs,
ArrayRef<Expr *> ReductionOps, Stmt *PreInit, Expr *PostUpdate);
/// Creates an empty clause with the place for \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
static OMPTaskReductionClause *CreateEmpty(const ASTContext &C, unsigned N);
/// Gets location of ':' symbol in clause.
SourceLocation getColonLoc() const { return ColonLoc; }
/// Gets the name info for specified reduction identifier.
const DeclarationNameInfo &getNameInfo() const { return NameInfo; }
/// Gets the nested name specifier.
NestedNameSpecifierLoc getQualifierLoc() const { return QualifierLoc; }
using helper_expr_iterator = MutableArrayRef<Expr *>::iterator;
using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator;
using helper_expr_range = llvm::iterator_range<helper_expr_iterator>;
using helper_expr_const_range =
llvm::iterator_range<helper_expr_const_iterator>;
helper_expr_const_range privates() const {
return helper_expr_const_range(getPrivates().begin(), getPrivates().end());
}
helper_expr_range privates() {
return helper_expr_range(getPrivates().begin(), getPrivates().end());
}
helper_expr_const_range lhs_exprs() const {
return helper_expr_const_range(getLHSExprs().begin(), getLHSExprs().end());
}
helper_expr_range lhs_exprs() {
return helper_expr_range(getLHSExprs().begin(), getLHSExprs().end());
}
helper_expr_const_range rhs_exprs() const {
return helper_expr_const_range(getRHSExprs().begin(), getRHSExprs().end());
}
helper_expr_range rhs_exprs() {
return helper_expr_range(getRHSExprs().begin(), getRHSExprs().end());
}
helper_expr_const_range reduction_ops() const {
return helper_expr_const_range(getReductionOps().begin(),
getReductionOps().end());
}
helper_expr_range reduction_ops() {
return helper_expr_range(getReductionOps().begin(),
getReductionOps().end());
}
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPTaskReductionClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_task_reduction;
}
};
/// This represents clause 'in_reduction' in the '#pragma omp task' directives.
///
/// \code
/// #pragma omp task in_reduction(+:a,b)
/// \endcode
/// In this example directive '#pragma omp task' has clause 'in_reduction' with
/// operator '+' and the variables 'a' and 'b'.
class OMPInReductionClause final
: public OMPVarListClause<OMPInReductionClause>,
public OMPClauseWithPostUpdate,
private llvm::TrailingObjects<OMPInReductionClause, Expr *> {
friend class OMPClauseReader;
friend OMPVarListClause;
friend TrailingObjects;
/// Location of ':'.
SourceLocation ColonLoc;
/// Nested name specifier for C++.
NestedNameSpecifierLoc QualifierLoc;
/// Name of custom operator.
DeclarationNameInfo NameInfo;
/// Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param ColonLoc Location of ':'.
/// \param N Number of the variables in the clause.
/// \param QualifierLoc The nested-name qualifier with location information
/// \param NameInfo The full name info for reduction identifier.
OMPInReductionClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ColonLoc, SourceLocation EndLoc,
unsigned N, NestedNameSpecifierLoc QualifierLoc,
const DeclarationNameInfo &NameInfo)
: OMPVarListClause<OMPInReductionClause>(OMPC_in_reduction, StartLoc,
LParenLoc, EndLoc, N),
OMPClauseWithPostUpdate(this), ColonLoc(ColonLoc),
QualifierLoc(QualifierLoc), NameInfo(NameInfo) {}
/// Build an empty clause.
///
/// \param N Number of variables.
explicit OMPInReductionClause(unsigned N)
: OMPVarListClause<OMPInReductionClause>(
OMPC_in_reduction, SourceLocation(), SourceLocation(),
SourceLocation(), N),
OMPClauseWithPostUpdate(this) {}
/// Sets location of ':' symbol in clause.
void setColonLoc(SourceLocation CL) { ColonLoc = CL; }
/// Sets the name info for specified reduction identifier.
void setNameInfo(DeclarationNameInfo DNI) { NameInfo = DNI; }
/// Sets the nested name specifier.
void setQualifierLoc(NestedNameSpecifierLoc NSL) { QualifierLoc = NSL; }
/// Set list of helper expressions, required for proper codegen of the clause.
/// These expressions represent private copy of the reduction variable.
void setPrivates(ArrayRef<Expr *> Privates);
/// Get the list of helper privates.
MutableArrayRef<Expr *> getPrivates() {
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getPrivates() const {
return llvm::makeArrayRef(varlist_end(), varlist_size());
}
/// Set list of helper expressions, required for proper codegen of the clause.
/// These expressions represent LHS expression in the final reduction
/// expression performed by the reduction clause.
void setLHSExprs(ArrayRef<Expr *> LHSExprs);
/// Get the list of helper LHS expressions.
MutableArrayRef<Expr *> getLHSExprs() {
return MutableArrayRef<Expr *>(getPrivates().end(), varlist_size());
}
ArrayRef<const Expr *> getLHSExprs() const {
return llvm::makeArrayRef(getPrivates().end(), varlist_size());
}
/// Set list of helper expressions, required for proper codegen of the clause.
/// These expressions represent RHS expression in the final reduction
/// expression performed by the reduction clause. Also, variables in these
/// expressions are used for proper initialization of reduction copies.
void setRHSExprs(ArrayRef<Expr *> RHSExprs);
/// Get the list of helper destination expressions.
MutableArrayRef<Expr *> getRHSExprs() {
return MutableArrayRef<Expr *>(getLHSExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getRHSExprs() const {
return llvm::makeArrayRef(getLHSExprs().end(), varlist_size());
}
/// Set list of helper reduction expressions, required for proper
/// codegen of the clause. These expressions are binary expressions or
/// operator/custom reduction call that calculates new value from source
/// helper expressions to destination helper expressions.
void setReductionOps(ArrayRef<Expr *> ReductionOps);
/// Get the list of helper reduction expressions.
MutableArrayRef<Expr *> getReductionOps() {
return MutableArrayRef<Expr *>(getRHSExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getReductionOps() const {
return llvm::makeArrayRef(getRHSExprs().end(), varlist_size());
}
/// Set list of helper reduction taskgroup descriptors.
void setTaskgroupDescriptors(ArrayRef<Expr *> ReductionOps);
/// Get the list of helper reduction taskgroup descriptors.
MutableArrayRef<Expr *> getTaskgroupDescriptors() {
return MutableArrayRef<Expr *>(getReductionOps().end(), varlist_size());
}
ArrayRef<const Expr *> getTaskgroupDescriptors() const {
return llvm::makeArrayRef(getReductionOps().end(), varlist_size());
}
public:
/// Creates clause with a list of variables \a VL.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param ColonLoc Location of ':'.
/// \param EndLoc Ending location of the clause.
/// \param VL The variables in the clause.
/// \param QualifierLoc The nested-name qualifier with location information
/// \param NameInfo The full name info for reduction identifier.
/// \param Privates List of helper expressions for proper generation of
/// private copies.
/// \param LHSExprs List of helper expressions for proper generation of
/// assignment operation required for copyprivate clause. This list represents
/// LHSs of the reduction expressions.
/// \param RHSExprs List of helper expressions for proper generation of
/// assignment operation required for copyprivate clause. This list represents
/// RHSs of the reduction expressions.
/// Also, variables in these expressions are used for proper initialization of
/// reduction copies.
/// \param ReductionOps List of helper expressions that represents reduction
/// expressions:
/// \code
/// LHSExprs binop RHSExprs;
/// operator binop(LHSExpr, RHSExpr);
/// <CutomReduction>(LHSExpr, RHSExpr);
/// \endcode
/// Required for proper codegen of final reduction operation performed by the
/// reduction clause.
/// \param TaskgroupDescriptors List of helper taskgroup descriptors for
/// corresponding items in parent taskgroup task_reduction clause.
/// \param PreInit Statement that must be executed before entering the OpenMP
/// region with this clause.
/// \param PostUpdate Expression that must be executed after exit from the
/// OpenMP region with this clause.
static OMPInReductionClause *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL,
NestedNameSpecifierLoc QualifierLoc,
const DeclarationNameInfo &NameInfo, ArrayRef<Expr *> Privates,
ArrayRef<Expr *> LHSExprs, ArrayRef<Expr *> RHSExprs,
ArrayRef<Expr *> ReductionOps, ArrayRef<Expr *> TaskgroupDescriptors,
Stmt *PreInit, Expr *PostUpdate);
/// Creates an empty clause with the place for \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
static OMPInReductionClause *CreateEmpty(const ASTContext &C, unsigned N);
/// Gets location of ':' symbol in clause.
SourceLocation getColonLoc() const { return ColonLoc; }
/// Gets the name info for specified reduction identifier.
const DeclarationNameInfo &getNameInfo() const { return NameInfo; }
/// Gets the nested name specifier.
NestedNameSpecifierLoc getQualifierLoc() const { return QualifierLoc; }
using helper_expr_iterator = MutableArrayRef<Expr *>::iterator;
using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator;
using helper_expr_range = llvm::iterator_range<helper_expr_iterator>;
using helper_expr_const_range =
llvm::iterator_range<helper_expr_const_iterator>;
helper_expr_const_range privates() const {
return helper_expr_const_range(getPrivates().begin(), getPrivates().end());
}
helper_expr_range privates() {
return helper_expr_range(getPrivates().begin(), getPrivates().end());
}
helper_expr_const_range lhs_exprs() const {
return helper_expr_const_range(getLHSExprs().begin(), getLHSExprs().end());
}
helper_expr_range lhs_exprs() {
return helper_expr_range(getLHSExprs().begin(), getLHSExprs().end());
}
helper_expr_const_range rhs_exprs() const {
return helper_expr_const_range(getRHSExprs().begin(), getRHSExprs().end());
}
helper_expr_range rhs_exprs() {
return helper_expr_range(getRHSExprs().begin(), getRHSExprs().end());
}
helper_expr_const_range reduction_ops() const {
return helper_expr_const_range(getReductionOps().begin(),
getReductionOps().end());
}
helper_expr_range reduction_ops() {
return helper_expr_range(getReductionOps().begin(),
getReductionOps().end());
}
helper_expr_const_range taskgroup_descriptors() const {
return helper_expr_const_range(getTaskgroupDescriptors().begin(),
getTaskgroupDescriptors().end());
}
helper_expr_range taskgroup_descriptors() {
return helper_expr_range(getTaskgroupDescriptors().begin(),
getTaskgroupDescriptors().end());
}
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPInReductionClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_in_reduction;
}
};
/// This represents clause 'linear' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp simd linear(a,b : 2)
/// \endcode
/// In this example directive '#pragma omp simd' has clause 'linear'
/// with variables 'a', 'b' and linear step '2'.
class OMPLinearClause final
: public OMPVarListClause<OMPLinearClause>,
public OMPClauseWithPostUpdate,
private llvm::TrailingObjects<OMPLinearClause, Expr *> {
friend class OMPClauseReader;
friend OMPVarListClause;
friend TrailingObjects;
/// Modifier of 'linear' clause.
OpenMPLinearClauseKind Modifier = OMPC_LINEAR_val;
/// Location of linear modifier if any.
SourceLocation ModifierLoc;
/// Location of ':'.
SourceLocation ColonLoc;
/// Sets the linear step for clause.
void setStep(Expr *Step) { *(getFinals().end()) = Step; }
/// Sets the expression to calculate linear step for clause.
void setCalcStep(Expr *CalcStep) { *(getFinals().end() + 1) = CalcStep; }
/// Build 'linear' clause with given number of variables \a NumVars.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param ColonLoc Location of ':'.
/// \param EndLoc Ending location of the clause.
/// \param NumVars Number of variables.
OMPLinearClause(SourceLocation StartLoc, SourceLocation LParenLoc,
OpenMPLinearClauseKind Modifier, SourceLocation ModifierLoc,
SourceLocation ColonLoc, SourceLocation EndLoc,
unsigned NumVars)
: OMPVarListClause<OMPLinearClause>(OMPC_linear, StartLoc, LParenLoc,
EndLoc, NumVars),
OMPClauseWithPostUpdate(this), Modifier(Modifier),
ModifierLoc(ModifierLoc), ColonLoc(ColonLoc) {}
/// Build an empty clause.
///
/// \param NumVars Number of variables.
explicit OMPLinearClause(unsigned NumVars)
: OMPVarListClause<OMPLinearClause>(OMPC_linear, SourceLocation(),
SourceLocation(), SourceLocation(),
NumVars),
OMPClauseWithPostUpdate(this) {}
/// Gets the list of initial values for linear variables.
///
/// There are NumVars expressions with initial values allocated after the
/// varlist, they are followed by NumVars update expressions (used to update
/// the linear variable's value on current iteration) and they are followed by
/// NumVars final expressions (used to calculate the linear variable's
/// value after the loop body). After these lists, there are 2 helper
/// expressions - linear step and a helper to calculate it before the
/// loop body (used when the linear step is not constant):
///
/// { Vars[] /* in OMPVarListClause */; Privates[]; Inits[]; Updates[];
/// Finals[]; Step; CalcStep; }
MutableArrayRef<Expr *> getPrivates() {
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getPrivates() const {
return llvm::makeArrayRef(varlist_end(), varlist_size());
}
MutableArrayRef<Expr *> getInits() {
return MutableArrayRef<Expr *>(getPrivates().end(), varlist_size());
}
ArrayRef<const Expr *> getInits() const {
return llvm::makeArrayRef(getPrivates().end(), varlist_size());
}
/// Sets the list of update expressions for linear variables.
MutableArrayRef<Expr *> getUpdates() {
return MutableArrayRef<Expr *>(getInits().end(), varlist_size());
}
ArrayRef<const Expr *> getUpdates() const {
return llvm::makeArrayRef(getInits().end(), varlist_size());
}
/// Sets the list of final update expressions for linear variables.
MutableArrayRef<Expr *> getFinals() {
return MutableArrayRef<Expr *>(getUpdates().end(), varlist_size());
}
ArrayRef<const Expr *> getFinals() const {
return llvm::makeArrayRef(getUpdates().end(), varlist_size());
}
/// Sets the list of the copies of original linear variables.
/// \param PL List of expressions.
void setPrivates(ArrayRef<Expr *> PL);
/// Sets the list of the initial values for linear variables.
/// \param IL List of expressions.
void setInits(ArrayRef<Expr *> IL);
public:
/// Creates clause with a list of variables \a VL and a linear step
/// \a Step.
///
/// \param C AST Context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param Modifier Modifier of 'linear' clause.
/// \param ModifierLoc Modifier location.
/// \param ColonLoc Location of ':'.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the variables.
/// \param PL List of private copies of original variables.
/// \param IL List of initial values for the variables.
/// \param Step Linear step.
/// \param CalcStep Calculation of the linear step.
/// \param PreInit Statement that must be executed before entering the OpenMP
/// region with this clause.
/// \param PostUpdate Expression that must be executed after exit from the
/// OpenMP region with this clause.
static OMPLinearClause *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
OpenMPLinearClauseKind Modifier, SourceLocation ModifierLoc,
SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL,
ArrayRef<Expr *> PL, ArrayRef<Expr *> IL, Expr *Step, Expr *CalcStep,
Stmt *PreInit, Expr *PostUpdate);
/// Creates an empty clause with the place for \a NumVars variables.
///
/// \param C AST context.
/// \param NumVars Number of variables.
static OMPLinearClause *CreateEmpty(const ASTContext &C, unsigned NumVars);
/// Set modifier.
void setModifier(OpenMPLinearClauseKind Kind) { Modifier = Kind; }
/// Return modifier.
OpenMPLinearClauseKind getModifier() const { return Modifier; }
/// Set modifier location.
void setModifierLoc(SourceLocation Loc) { ModifierLoc = Loc; }
/// Return modifier location.
SourceLocation getModifierLoc() const { return ModifierLoc; }
/// Sets the location of ':'.
void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; }
/// Returns the location of ':'.
SourceLocation getColonLoc() const { return ColonLoc; }
/// Returns linear step.
Expr *getStep() { return *(getFinals().end()); }
/// Returns linear step.
const Expr *getStep() const { return *(getFinals().end()); }
/// Returns expression to calculate linear step.
Expr *getCalcStep() { return *(getFinals().end() + 1); }
/// Returns expression to calculate linear step.
const Expr *getCalcStep() const { return *(getFinals().end() + 1); }
/// Sets the list of update expressions for linear variables.
/// \param UL List of expressions.
void setUpdates(ArrayRef<Expr *> UL);
/// Sets the list of final update expressions for linear variables.
/// \param FL List of expressions.
void setFinals(ArrayRef<Expr *> FL);
using privates_iterator = MutableArrayRef<Expr *>::iterator;
using privates_const_iterator = ArrayRef<const Expr *>::iterator;
using privates_range = llvm::iterator_range<privates_iterator>;
using privates_const_range = llvm::iterator_range<privates_const_iterator>;
privates_range privates() {
return privates_range(getPrivates().begin(), getPrivates().end());
}
privates_const_range privates() const {
return privates_const_range(getPrivates().begin(), getPrivates().end());
}
using inits_iterator = MutableArrayRef<Expr *>::iterator;
using inits_const_iterator = ArrayRef<const Expr *>::iterator;
using inits_range = llvm::iterator_range<inits_iterator>;
using inits_const_range = llvm::iterator_range<inits_const_iterator>;
inits_range inits() {
return inits_range(getInits().begin(), getInits().end());
}
inits_const_range inits() const {
return inits_const_range(getInits().begin(), getInits().end());
}
using updates_iterator = MutableArrayRef<Expr *>::iterator;
using updates_const_iterator = ArrayRef<const Expr *>::iterator;
using updates_range = llvm::iterator_range<updates_iterator>;
using updates_const_range = llvm::iterator_range<updates_const_iterator>;
updates_range updates() {
return updates_range(getUpdates().begin(), getUpdates().end());
}
updates_const_range updates() const {
return updates_const_range(getUpdates().begin(), getUpdates().end());
}
using finals_iterator = MutableArrayRef<Expr *>::iterator;
using finals_const_iterator = ArrayRef<const Expr *>::iterator;
using finals_range = llvm::iterator_range<finals_iterator>;
using finals_const_range = llvm::iterator_range<finals_const_iterator>;
finals_range finals() {
return finals_range(getFinals().begin(), getFinals().end());
}
finals_const_range finals() const {
return finals_const_range(getFinals().begin(), getFinals().end());
}
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPLinearClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_linear;
}
};
/// This represents clause 'aligned' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp simd aligned(a,b : 8)
/// \endcode
/// In this example directive '#pragma omp simd' has clause 'aligned'
/// with variables 'a', 'b' and alignment '8'.
class OMPAlignedClause final
: public OMPVarListClause<OMPAlignedClause>,
private llvm::TrailingObjects<OMPAlignedClause, Expr *> {
friend class OMPClauseReader;
friend OMPVarListClause;
friend TrailingObjects;
/// Location of ':'.
SourceLocation ColonLoc;
/// Sets the alignment for clause.
void setAlignment(Expr *A) { *varlist_end() = A; }
/// Build 'aligned' clause with given number of variables \a NumVars.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param ColonLoc Location of ':'.
/// \param EndLoc Ending location of the clause.
/// \param NumVars Number of variables.
OMPAlignedClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ColonLoc, SourceLocation EndLoc,
unsigned NumVars)
: OMPVarListClause<OMPAlignedClause>(OMPC_aligned, StartLoc, LParenLoc,
EndLoc, NumVars),
ColonLoc(ColonLoc) {}
/// Build an empty clause.
///
/// \param NumVars Number of variables.
explicit OMPAlignedClause(unsigned NumVars)
: OMPVarListClause<OMPAlignedClause>(OMPC_aligned, SourceLocation(),
SourceLocation(), SourceLocation(),
NumVars) {}
public:
/// Creates clause with a list of variables \a VL and alignment \a A.
///
/// \param C AST Context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param ColonLoc Location of ':'.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the variables.
/// \param A Alignment.
static OMPAlignedClause *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc, ArrayRef<Expr *> VL,
Expr *A);
/// Creates an empty clause with the place for \a NumVars variables.
///
/// \param C AST context.
/// \param NumVars Number of variables.
static OMPAlignedClause *CreateEmpty(const ASTContext &C, unsigned NumVars);
/// Sets the location of ':'.
void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; }
/// Returns the location of ':'.
SourceLocation getColonLoc() const { return ColonLoc; }
/// Returns alignment.
Expr *getAlignment() { return *varlist_end(); }
/// Returns alignment.
const Expr *getAlignment() const { return *varlist_end(); }
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPAlignedClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_aligned;
}
};
/// This represents clause 'copyin' in the '#pragma omp ...' directives.
///
/// \code
/// #pragma omp parallel copyin(a,b)
/// \endcode
/// In this example directive '#pragma omp parallel' has clause 'copyin'
/// with the variables 'a' and 'b'.
class OMPCopyinClause final
: public OMPVarListClause<OMPCopyinClause>,
private llvm::TrailingObjects<OMPCopyinClause, Expr *> {
// Class has 3 additional tail allocated arrays:
// 1. List of helper expressions for proper generation of assignment operation
// required for copyin clause. This list represents sources.
// 2. List of helper expressions for proper generation of assignment operation
// required for copyin clause. This list represents destinations.
// 3. List of helper expressions that represents assignment operation:
// \code
// DstExprs = SrcExprs;
// \endcode
// Required for proper codegen of propagation of master's thread values of
// threadprivate variables to local instances of that variables in other
// implicit threads.
friend class OMPClauseReader;
friend OMPVarListClause;
friend TrailingObjects;
/// Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
OMPCopyinClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned N)
: OMPVarListClause<OMPCopyinClause>(OMPC_copyin, StartLoc, LParenLoc,
EndLoc, N) {}
/// Build an empty clause.
///
/// \param N Number of variables.
explicit OMPCopyinClause(unsigned N)
: OMPVarListClause<OMPCopyinClause>(OMPC_copyin, SourceLocation(),
SourceLocation(), SourceLocation(),
N) {}
/// Set list of helper expressions, required for proper codegen of the
/// clause. These expressions represent source expression in the final
/// assignment statement performed by the copyin clause.
void setSourceExprs(ArrayRef<Expr *> SrcExprs);
/// Get the list of helper source expressions.
MutableArrayRef<Expr *> getSourceExprs() {
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getSourceExprs() const {
return llvm::makeArrayRef(varlist_end(), varlist_size());
}
/// Set list of helper expressions, required for proper codegen of the
/// clause. These expressions represent destination expression in the final
/// assignment statement performed by the copyin clause.
void setDestinationExprs(ArrayRef<Expr *> DstExprs);
/// Get the list of helper destination expressions.
MutableArrayRef<Expr *> getDestinationExprs() {
return MutableArrayRef<Expr *>(getSourceExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getDestinationExprs() const {
return llvm::makeArrayRef(getSourceExprs().end(), varlist_size());
}
/// Set list of helper assignment expressions, required for proper
/// codegen of the clause. These expressions are assignment expressions that
/// assign source helper expressions to destination helper expressions
/// correspondingly.
void setAssignmentOps(ArrayRef<Expr *> AssignmentOps);
/// Get the list of helper assignment expressions.
MutableArrayRef<Expr *> getAssignmentOps() {
return MutableArrayRef<Expr *>(getDestinationExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getAssignmentOps() const {
return llvm::makeArrayRef(getDestinationExprs().end(), varlist_size());
}
public:
/// Creates clause with a list of variables \a VL.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the variables.
/// \param SrcExprs List of helper expressions for proper generation of
/// assignment operation required for copyin clause. This list represents
/// sources.
/// \param DstExprs List of helper expressions for proper generation of
/// assignment operation required for copyin clause. This list represents
/// destinations.
/// \param AssignmentOps List of helper expressions that represents assignment
/// operation:
/// \code
/// DstExprs = SrcExprs;
/// \endcode
/// Required for proper codegen of propagation of master's thread values of
/// threadprivate variables to local instances of that variables in other
/// implicit threads.
static OMPCopyinClause *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs,
ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps);
/// Creates an empty clause with \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
static OMPCopyinClause *CreateEmpty(const ASTContext &C, unsigned N);
using helper_expr_iterator = MutableArrayRef<Expr *>::iterator;
using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator;
using helper_expr_range = llvm::iterator_range<helper_expr_iterator>;
using helper_expr_const_range =
llvm::iterator_range<helper_expr_const_iterator>;
helper_expr_const_range source_exprs() const {
return helper_expr_const_range(getSourceExprs().begin(),
getSourceExprs().end());
}
helper_expr_range source_exprs() {
return helper_expr_range(getSourceExprs().begin(), getSourceExprs().end());
}
helper_expr_const_range destination_exprs() const {
return helper_expr_const_range(getDestinationExprs().begin(),
getDestinationExprs().end());
}
helper_expr_range destination_exprs() {
return helper_expr_range(getDestinationExprs().begin(),
getDestinationExprs().end());
}
helper_expr_const_range assignment_ops() const {
return helper_expr_const_range(getAssignmentOps().begin(),
getAssignmentOps().end());
}
helper_expr_range assignment_ops() {
return helper_expr_range(getAssignmentOps().begin(),
getAssignmentOps().end());
}
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPCopyinClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_copyin;
}
};
/// This represents clause 'copyprivate' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp single copyprivate(a,b)
/// \endcode
/// In this example directive '#pragma omp single' has clause 'copyprivate'
/// with the variables 'a' and 'b'.
class OMPCopyprivateClause final
: public OMPVarListClause<OMPCopyprivateClause>,
private llvm::TrailingObjects<OMPCopyprivateClause, Expr *> {
friend class OMPClauseReader;
friend OMPVarListClause;
friend TrailingObjects;
/// Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
OMPCopyprivateClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned N)
: OMPVarListClause<OMPCopyprivateClause>(OMPC_copyprivate, StartLoc,
LParenLoc, EndLoc, N) {}
/// Build an empty clause.
///
/// \param N Number of variables.
explicit OMPCopyprivateClause(unsigned N)
: OMPVarListClause<OMPCopyprivateClause>(
OMPC_copyprivate, SourceLocation(), SourceLocation(),
SourceLocation(), N) {}
/// Set list of helper expressions, required for proper codegen of the
/// clause. These expressions represent source expression in the final
/// assignment statement performed by the copyprivate clause.
void setSourceExprs(ArrayRef<Expr *> SrcExprs);
/// Get the list of helper source expressions.
MutableArrayRef<Expr *> getSourceExprs() {
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getSourceExprs() const {
return llvm::makeArrayRef(varlist_end(), varlist_size());
}
/// Set list of helper expressions, required for proper codegen of the
/// clause. These expressions represent destination expression in the final
/// assignment statement performed by the copyprivate clause.
void setDestinationExprs(ArrayRef<Expr *> DstExprs);
/// Get the list of helper destination expressions.
MutableArrayRef<Expr *> getDestinationExprs() {
return MutableArrayRef<Expr *>(getSourceExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getDestinationExprs() const {
return llvm::makeArrayRef(getSourceExprs().end(), varlist_size());
}
/// Set list of helper assignment expressions, required for proper
/// codegen of the clause. These expressions are assignment expressions that
/// assign source helper expressions to destination helper expressions
/// correspondingly.
void setAssignmentOps(ArrayRef<Expr *> AssignmentOps);
/// Get the list of helper assignment expressions.
MutableArrayRef<Expr *> getAssignmentOps() {
return MutableArrayRef<Expr *>(getDestinationExprs().end(), varlist_size());
}
ArrayRef<const Expr *> getAssignmentOps() const {
return llvm::makeArrayRef(getDestinationExprs().end(), varlist_size());
}
public:
/// Creates clause with a list of variables \a VL.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the variables.
/// \param SrcExprs List of helper expressions for proper generation of
/// assignment operation required for copyprivate clause. This list represents
/// sources.
/// \param DstExprs List of helper expressions for proper generation of
/// assignment operation required for copyprivate clause. This list represents
/// destinations.
/// \param AssignmentOps List of helper expressions that represents assignment
/// operation:
/// \code
/// DstExprs = SrcExprs;
/// \endcode
/// Required for proper codegen of final assignment performed by the
/// copyprivate clause.
static OMPCopyprivateClause *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs,
ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps);
/// Creates an empty clause with \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
static OMPCopyprivateClause *CreateEmpty(const ASTContext &C, unsigned N);
using helper_expr_iterator = MutableArrayRef<Expr *>::iterator;
using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator;
using helper_expr_range = llvm::iterator_range<helper_expr_iterator>;
using helper_expr_const_range =
llvm::iterator_range<helper_expr_const_iterator>;
helper_expr_const_range source_exprs() const {
return helper_expr_const_range(getSourceExprs().begin(),
getSourceExprs().end());
}
helper_expr_range source_exprs() {
return helper_expr_range(getSourceExprs().begin(), getSourceExprs().end());
}
helper_expr_const_range destination_exprs() const {
return helper_expr_const_range(getDestinationExprs().begin(),
getDestinationExprs().end());
}
helper_expr_range destination_exprs() {
return helper_expr_range(getDestinationExprs().begin(),
getDestinationExprs().end());
}
helper_expr_const_range assignment_ops() const {
return helper_expr_const_range(getAssignmentOps().begin(),
getAssignmentOps().end());
}
helper_expr_range assignment_ops() {
return helper_expr_range(getAssignmentOps().begin(),
getAssignmentOps().end());
}
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPCopyprivateClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_copyprivate;
}
};
/// This represents implicit clause 'flush' for the '#pragma omp flush'
/// directive.
/// This clause does not exist by itself, it can be only as a part of 'omp
/// flush' directive. This clause is introduced to keep the original structure
/// of \a OMPExecutableDirective class and its derivatives and to use the
/// existing infrastructure of clauses with the list of variables.
///
/// \code
/// #pragma omp flush(a,b)
/// \endcode
/// In this example directive '#pragma omp flush' has implicit clause 'flush'
/// with the variables 'a' and 'b'.
class OMPFlushClause final
: public OMPVarListClause<OMPFlushClause>,
private llvm::TrailingObjects<OMPFlushClause, Expr *> {
friend OMPVarListClause;
friend TrailingObjects;
/// Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
OMPFlushClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned N)
: OMPVarListClause<OMPFlushClause>(OMPC_flush, StartLoc, LParenLoc,
EndLoc, N) {}
/// Build an empty clause.
///
/// \param N Number of variables.
explicit OMPFlushClause(unsigned N)
: OMPVarListClause<OMPFlushClause>(OMPC_flush, SourceLocation(),
SourceLocation(), SourceLocation(),
N) {}
public:
/// Creates clause with a list of variables \a VL.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param VL List of references to the variables.
static OMPFlushClause *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc,
ArrayRef<Expr *> VL);
/// Creates an empty clause with \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
static OMPFlushClause *CreateEmpty(const ASTContext &C, unsigned N);
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPFlushClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_flush;
}
};
/// This represents implicit clause 'depend' for the '#pragma omp task'
/// directive.
///
/// \code
/// #pragma omp task depend(in:a,b)
/// \endcode
/// In this example directive '#pragma omp task' with clause 'depend' with the
/// variables 'a' and 'b' with dependency 'in'.
class OMPDependClause final
: public OMPVarListClause<OMPDependClause>,
private llvm::TrailingObjects<OMPDependClause, Expr *> {
friend class OMPClauseReader;
friend OMPVarListClause;
friend TrailingObjects;
/// Dependency type (one of in, out, inout).
OpenMPDependClauseKind DepKind = OMPC_DEPEND_unknown;
/// Dependency type location.
SourceLocation DepLoc;
/// Colon location.
SourceLocation ColonLoc;
/// Number of loops, associated with the depend clause.
unsigned NumLoops = 0;
/// Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
/// \param NumLoops Number of loops that is associated with this depend
/// clause.
OMPDependClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned N, unsigned NumLoops)
: OMPVarListClause<OMPDependClause>(OMPC_depend, StartLoc, LParenLoc,
EndLoc, N), NumLoops(NumLoops) {}
/// Build an empty clause.
///
/// \param N Number of variables.
/// \param NumLoops Number of loops that is associated with this depend
/// clause.
explicit OMPDependClause(unsigned N, unsigned NumLoops)
: OMPVarListClause<OMPDependClause>(OMPC_depend, SourceLocation(),
SourceLocation(), SourceLocation(),
N),
NumLoops(NumLoops) {}
/// Set dependency kind.
void setDependencyKind(OpenMPDependClauseKind K) { DepKind = K; }
/// Set dependency kind and its location.
void setDependencyLoc(SourceLocation Loc) { DepLoc = Loc; }
/// Set colon location.
void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; }
public:
/// Creates clause with a list of variables \a VL.
///
/// \param C AST context.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param DepKind Dependency type.
/// \param DepLoc Location of the dependency type.
/// \param ColonLoc Colon location.
/// \param VL List of references to the variables.
/// \param NumLoops Number of loops that is associated with this depend
/// clause.
static OMPDependClause *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc,
OpenMPDependClauseKind DepKind,
SourceLocation DepLoc, SourceLocation ColonLoc,
ArrayRef<Expr *> VL, unsigned NumLoops);
/// Creates an empty clause with \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
/// \param NumLoops Number of loops that is associated with this depend
/// clause.
static OMPDependClause *CreateEmpty(const ASTContext &C, unsigned N,
unsigned NumLoops);
/// Get dependency type.
OpenMPDependClauseKind getDependencyKind() const { return DepKind; }
/// Get dependency type location.
SourceLocation getDependencyLoc() const { return DepLoc; }
/// Get colon location.
SourceLocation getColonLoc() const { return ColonLoc; }
/// Get number of loops associated with the clause.
unsigned getNumLoops() const { return NumLoops; }
/// Set the loop data for the depend clauses with 'sink|source' kind of
/// dependency.
void setLoopData(unsigned NumLoop, Expr *Cnt);
/// Get the loop data.
Expr *getLoopData(unsigned NumLoop);
const Expr *getLoopData(unsigned NumLoop) const;
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPDependClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_depend;
}
};
/// This represents 'device' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp target device(a)
/// \endcode
/// In this example directive '#pragma omp target' has clause 'device'
/// with single expression 'a'.
class OMPDeviceClause : public OMPClause, public OMPClauseWithPreInit {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// Device number.
Stmt *Device = nullptr;
/// Set the device number.
///
/// \param E Device number.
void setDevice(Expr *E) { Device = E; }
public:
/// Build 'device' clause.
///
/// \param E Expression associated with this clause.
/// \param CaptureRegion Innermost OpenMP region where expressions in this
/// clause must be captured.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPDeviceClause(Expr *E, Stmt *HelperE, OpenMPDirectiveKind CaptureRegion,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(OMPC_device, StartLoc, EndLoc), OMPClauseWithPreInit(this),
LParenLoc(LParenLoc), Device(E) {
setPreInitStmt(HelperE, CaptureRegion);
}
/// Build an empty clause.
OMPDeviceClause()
: OMPClause(OMPC_device, SourceLocation(), SourceLocation()),
OMPClauseWithPreInit(this) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Return device number.
Expr *getDevice() { return cast<Expr>(Device); }
/// Return device number.
Expr *getDevice() const { return cast<Expr>(Device); }
child_range children() { return child_range(&Device, &Device + 1); }
const_child_range children() const {
return const_child_range(&Device, &Device + 1);
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_device;
}
};
/// This represents 'threads' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp ordered threads
/// \endcode
/// In this example directive '#pragma omp ordered' has simple 'threads' clause.
class OMPThreadsClause : public OMPClause {
public:
/// Build 'threads' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPThreadsClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_threads, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPThreadsClause()
: OMPClause(OMPC_threads, SourceLocation(), SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_threads;
}
};
/// This represents 'simd' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp ordered simd
/// \endcode
/// In this example directive '#pragma omp ordered' has simple 'simd' clause.
class OMPSIMDClause : public OMPClause {
public:
/// Build 'simd' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPSIMDClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_simd, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPSIMDClause() : OMPClause(OMPC_simd, SourceLocation(), SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_simd;
}
};
/// Struct that defines common infrastructure to handle mappable
/// expressions used in OpenMP clauses.
class OMPClauseMappableExprCommon {
public:
/// Class that represents a component of a mappable expression. E.g.
/// for an expression S.a, the first component is a declaration reference
/// expression associated with 'S' and the second is a member expression
/// associated with the field declaration 'a'. If the expression is an array
/// subscript it may not have any associated declaration. In that case the
/// associated declaration is set to nullptr.
class MappableComponent {
/// Expression associated with the component.
Expr *AssociatedExpression = nullptr;
/// Declaration associated with the declaration. If the component does
/// not have a declaration (e.g. array subscripts or section), this is set
/// to nullptr.
ValueDecl *AssociatedDeclaration = nullptr;
public:
explicit MappableComponent() = default;
explicit MappableComponent(Expr *AssociatedExpression,
ValueDecl *AssociatedDeclaration)
: AssociatedExpression(AssociatedExpression),
AssociatedDeclaration(
AssociatedDeclaration
? cast<ValueDecl>(AssociatedDeclaration->getCanonicalDecl())
: nullptr) {}
Expr *getAssociatedExpression() const { return AssociatedExpression; }
ValueDecl *getAssociatedDeclaration() const {
return AssociatedDeclaration;
}
};
// List of components of an expression. This first one is the whole
// expression and the last one is the base expression.
using MappableExprComponentList = SmallVector<MappableComponent, 8>;
using MappableExprComponentListRef = ArrayRef<MappableComponent>;
// List of all component lists associated to the same base declaration.
// E.g. if both 'S.a' and 'S.b' are a mappable expressions, each will have
// their component list but the same base declaration 'S'.
using MappableExprComponentLists = SmallVector<MappableExprComponentList, 8>;
using MappableExprComponentListsRef = ArrayRef<MappableExprComponentList>;
protected:
// Return the total number of elements in a list of component lists.
static unsigned
getComponentsTotalNumber(MappableExprComponentListsRef ComponentLists);
// Return the total number of elements in a list of declarations. All
// declarations are expected to be canonical.
static unsigned
getUniqueDeclarationsTotalNumber(ArrayRef<const ValueDecl *> Declarations);
};
/// This structure contains all sizes needed for by an
/// OMPMappableExprListClause.
struct OMPMappableExprListSizeTy {
/// Number of expressions listed.
unsigned NumVars;
/// Number of unique base declarations.
unsigned NumUniqueDeclarations;
/// Number of component lists.
unsigned NumComponentLists;
/// Total number of expression components.
unsigned NumComponents;
OMPMappableExprListSizeTy() = default;
OMPMappableExprListSizeTy(unsigned NumVars, unsigned NumUniqueDeclarations,
unsigned NumComponentLists, unsigned NumComponents)
: NumVars(NumVars), NumUniqueDeclarations(NumUniqueDeclarations),
NumComponentLists(NumComponentLists), NumComponents(NumComponents) {}
};
/// This represents clauses with a list of expressions that are mappable.
/// Examples of these clauses are 'map' in
/// '#pragma omp target [enter|exit] [data]...' directives, and 'to' and 'from
/// in '#pragma omp target update...' directives.
template <class T>
class OMPMappableExprListClause : public OMPVarListClause<T>,
public OMPClauseMappableExprCommon {
friend class OMPClauseReader;
/// Number of unique declarations in this clause.
unsigned NumUniqueDeclarations;
/// Number of component lists in this clause.
unsigned NumComponentLists;
/// Total number of components in this clause.
unsigned NumComponents;
/// C++ nested name specifier for the associated user-defined mapper.
NestedNameSpecifierLoc MapperQualifierLoc;
/// The associated user-defined mapper identifier information.
DeclarationNameInfo MapperIdInfo;
protected:
/// Build a clause for \a NumUniqueDeclarations declarations, \a
/// NumComponentLists total component lists, and \a NumComponents total
/// components.
///
/// \param K Kind of the clause.
/// \param Locs Locations needed to build a mappable clause. It includes 1)
/// StartLoc: starting location of the clause (the clause keyword); 2)
/// LParenLoc: location of '('; 3) EndLoc: ending location of the clause.
/// \param Sizes All required sizes to build a mappable clause. It includes 1)
/// NumVars: number of expressions listed in this clause; 2)
/// NumUniqueDeclarations: number of unique base declarations in this clause;
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
/// \param MapperQualifierLocPtr C++ nested name specifier for the associated
/// user-defined mapper.
/// \param MapperIdInfoPtr The identifier of associated user-defined mapper.
OMPMappableExprListClause(
OpenMPClauseKind K, const OMPVarListLocTy &Locs,
const OMPMappableExprListSizeTy &Sizes,
NestedNameSpecifierLoc *MapperQualifierLocPtr = nullptr,
DeclarationNameInfo *MapperIdInfoPtr = nullptr)
: OMPVarListClause<T>(K, Locs.StartLoc, Locs.LParenLoc, Locs.EndLoc,
Sizes.NumVars),
NumUniqueDeclarations(Sizes.NumUniqueDeclarations),
NumComponentLists(Sizes.NumComponentLists),
NumComponents(Sizes.NumComponents) {
if (MapperQualifierLocPtr)
MapperQualifierLoc = *MapperQualifierLocPtr;
if (MapperIdInfoPtr)
MapperIdInfo = *MapperIdInfoPtr;
}
/// Get the unique declarations that are in the trailing objects of the
/// class.
MutableArrayRef<ValueDecl *> getUniqueDeclsRef() {
return MutableArrayRef<ValueDecl *>(
static_cast<T *>(this)->template getTrailingObjects<ValueDecl *>(),
NumUniqueDeclarations);
}
/// Get the unique declarations that are in the trailing objects of the
/// class.
ArrayRef<ValueDecl *> getUniqueDeclsRef() const {
return ArrayRef<ValueDecl *>(
static_cast<const T *>(this)
->template getTrailingObjects<ValueDecl *>(),
NumUniqueDeclarations);
}
/// Set the unique declarations that are in the trailing objects of the
/// class.
void setUniqueDecls(ArrayRef<ValueDecl *> UDs) {
assert(UDs.size() == NumUniqueDeclarations &&
"Unexpected amount of unique declarations.");
std::copy(UDs.begin(), UDs.end(), getUniqueDeclsRef().begin());
}
/// Get the number of lists per declaration that are in the trailing
/// objects of the class.
MutableArrayRef<unsigned> getDeclNumListsRef() {
return MutableArrayRef<unsigned>(
static_cast<T *>(this)->template getTrailingObjects<unsigned>(),
NumUniqueDeclarations);
}
/// Get the number of lists per declaration that are in the trailing
/// objects of the class.
ArrayRef<unsigned> getDeclNumListsRef() const {
return ArrayRef<unsigned>(
static_cast<const T *>(this)->template getTrailingObjects<unsigned>(),
NumUniqueDeclarations);
}
/// Set the number of lists per declaration that are in the trailing
/// objects of the class.
void setDeclNumLists(ArrayRef<unsigned> DNLs) {
assert(DNLs.size() == NumUniqueDeclarations &&
"Unexpected amount of list numbers.");
std::copy(DNLs.begin(), DNLs.end(), getDeclNumListsRef().begin());
}
/// Get the cumulative component lists sizes that are in the trailing
/// objects of the class. They are appended after the number of lists.
MutableArrayRef<unsigned> getComponentListSizesRef() {
return MutableArrayRef<unsigned>(
static_cast<T *>(this)->template getTrailingObjects<unsigned>() +
NumUniqueDeclarations,
NumComponentLists);
}
/// Get the cumulative component lists sizes that are in the trailing
/// objects of the class. They are appended after the number of lists.
ArrayRef<unsigned> getComponentListSizesRef() const {
return ArrayRef<unsigned>(
static_cast<const T *>(this)->template getTrailingObjects<unsigned>() +
NumUniqueDeclarations,
NumComponentLists);
}
/// Set the cumulative component lists sizes that are in the trailing
/// objects of the class.
void setComponentListSizes(ArrayRef<unsigned> CLSs) {
assert(CLSs.size() == NumComponentLists &&
"Unexpected amount of component lists.");
std::copy(CLSs.begin(), CLSs.end(), getComponentListSizesRef().begin());
}
/// Get the components that are in the trailing objects of the class.
MutableArrayRef<MappableComponent> getComponentsRef() {
return MutableArrayRef<MappableComponent>(
static_cast<T *>(this)
->template getTrailingObjects<MappableComponent>(),
NumComponents);
}
/// Get the components that are in the trailing objects of the class.
ArrayRef<MappableComponent> getComponentsRef() const {
return ArrayRef<MappableComponent>(
static_cast<const T *>(this)
->template getTrailingObjects<MappableComponent>(),
NumComponents);
}
/// Set the components that are in the trailing objects of the class.
/// This requires the list sizes so that it can also fill the original
/// expressions, which are the first component of each list.
void setComponents(ArrayRef<MappableComponent> Components,
ArrayRef<unsigned> CLSs) {
assert(Components.size() == NumComponents &&
"Unexpected amount of component lists.");
assert(CLSs.size() == NumComponentLists &&
"Unexpected amount of list sizes.");
std::copy(Components.begin(), Components.end(), getComponentsRef().begin());
}
/// Fill the clause information from the list of declarations and
/// associated component lists.
void setClauseInfo(ArrayRef<ValueDecl *> Declarations,
MappableExprComponentListsRef ComponentLists) {
// Perform some checks to make sure the data sizes are consistent with the
// information available when the clause was created.
assert(getUniqueDeclarationsTotalNumber(Declarations) ==
NumUniqueDeclarations &&
"Unexpected number of mappable expression info entries!");
assert(getComponentsTotalNumber(ComponentLists) == NumComponents &&
"Unexpected total number of components!");
assert(Declarations.size() == ComponentLists.size() &&
"Declaration and component lists size is not consistent!");
assert(Declarations.size() == NumComponentLists &&
"Unexpected declaration and component lists size!");
// Organize the components by declaration and retrieve the original
// expression. Original expressions are always the first component of the
// mappable component list.
llvm::MapVector<ValueDecl *, SmallVector<MappableExprComponentListRef, 8>>
ComponentListMap;
{
auto CI = ComponentLists.begin();
for (auto DI = Declarations.begin(), DE = Declarations.end(); DI != DE;
++DI, ++CI) {
assert(!CI->empty() && "Invalid component list!");
ComponentListMap[*DI].push_back(*CI);
}
}
// Iterators of the target storage.
auto UniqueDeclarations = getUniqueDeclsRef();
auto UDI = UniqueDeclarations.begin();
auto DeclNumLists = getDeclNumListsRef();
auto DNLI = DeclNumLists.begin();
auto ComponentListSizes = getComponentListSizesRef();
auto CLSI = ComponentListSizes.begin();
auto Components = getComponentsRef();
auto CI = Components.begin();
// Variable to compute the accumulation of the number of components.
unsigned PrevSize = 0u;
// Scan all the declarations and associated component lists.
for (auto &M : ComponentListMap) {
// The declaration.
auto *D = M.first;
// The component lists.
auto CL = M.second;
// Initialize the entry.
*UDI = D;
++UDI;
*DNLI = CL.size();
++DNLI;
// Obtain the cumulative sizes and concatenate all the components in the
// reserved storage.
for (auto C : CL) {
// Accumulate with the previous size.
PrevSize += C.size();
// Save the size.
*CLSI = PrevSize;
++CLSI;
// Append components after the current components iterator.
CI = std::copy(C.begin(), C.end(), CI);
}
}
}
/// Set the nested name specifier of associated user-defined mapper.
void setMapperQualifierLoc(NestedNameSpecifierLoc NNSL) {
MapperQualifierLoc = NNSL;
}
/// Set the name of associated user-defined mapper.
void setMapperIdInfo(DeclarationNameInfo MapperId) {
MapperIdInfo = MapperId;
}
/// Get the user-defined mapper references that are in the trailing objects of
/// the class.
MutableArrayRef<Expr *> getUDMapperRefs() {
return llvm::makeMutableArrayRef<Expr *>(
static_cast<T *>(this)->template getTrailingObjects<Expr *>() +
OMPVarListClause<T>::varlist_size(),
OMPVarListClause<T>::varlist_size());
}
/// Get the user-defined mappers references that are in the trailing objects
/// of the class.
ArrayRef<Expr *> getUDMapperRefs() const {
return llvm::makeArrayRef<Expr *>(
static_cast<T *>(this)->template getTrailingObjects<Expr *>() +
OMPVarListClause<T>::varlist_size(),
OMPVarListClause<T>::varlist_size());
}
/// Set the user-defined mappers that are in the trailing objects of the
/// class.
void setUDMapperRefs(ArrayRef<Expr *> DMDs) {
assert(DMDs.size() == OMPVarListClause<T>::varlist_size() &&
"Unexpected number of user-defined mappers.");
std::copy(DMDs.begin(), DMDs.end(), getUDMapperRefs().begin());
}
public:
/// Return the number of unique base declarations in this clause.
unsigned getUniqueDeclarationsNum() const { return NumUniqueDeclarations; }
/// Return the number of lists derived from the clause expressions.
unsigned getTotalComponentListNum() const { return NumComponentLists; }
/// Return the total number of components in all lists derived from the
/// clause.
unsigned getTotalComponentsNum() const { return NumComponents; }
/// Gets the nested name specifier for associated user-defined mapper.
NestedNameSpecifierLoc getMapperQualifierLoc() const {
return MapperQualifierLoc;
}
/// Gets the name info for associated user-defined mapper.
const DeclarationNameInfo &getMapperIdInfo() const { return MapperIdInfo; }
/// Iterator that browse the components by lists. It also allows
/// browsing components of a single declaration.
class const_component_lists_iterator
: public llvm::iterator_adaptor_base<
const_component_lists_iterator,
MappableExprComponentListRef::const_iterator,
std::forward_iterator_tag, MappableComponent, ptrdiff_t,
MappableComponent, MappableComponent> {
// The declaration the iterator currently refers to.
ArrayRef<ValueDecl *>::iterator DeclCur;
// The list number associated with the current declaration.
ArrayRef<unsigned>::iterator NumListsCur;
// Remaining lists for the current declaration.
unsigned RemainingLists = 0;
// The cumulative size of the previous list, or zero if there is no previous
// list.
unsigned PrevListSize = 0;
// The cumulative sizes of the current list - it will delimit the remaining
// range of interest.
ArrayRef<unsigned>::const_iterator ListSizeCur;
ArrayRef<unsigned>::const_iterator ListSizeEnd;
// Iterator to the end of the components storage.
MappableExprComponentListRef::const_iterator End;
public:
/// Construct an iterator that scans all lists.
explicit const_component_lists_iterator(
ArrayRef<ValueDecl *> UniqueDecls, ArrayRef<unsigned> DeclsListNum,
ArrayRef<unsigned> CumulativeListSizes,
MappableExprComponentListRef Components)
: const_component_lists_iterator::iterator_adaptor_base(
Components.begin()),
DeclCur(UniqueDecls.begin()), NumListsCur(DeclsListNum.begin()),
ListSizeCur(CumulativeListSizes.begin()),
ListSizeEnd(CumulativeListSizes.end()), End(Components.end()) {
assert(UniqueDecls.size() == DeclsListNum.size() &&
"Inconsistent number of declarations and list sizes!");
if (!DeclsListNum.empty())
RemainingLists = *NumListsCur;
}
/// Construct an iterator that scan lists for a given declaration \a
/// Declaration.
explicit const_component_lists_iterator(
const ValueDecl *Declaration, ArrayRef<ValueDecl *> UniqueDecls,
ArrayRef<unsigned> DeclsListNum, ArrayRef<unsigned> CumulativeListSizes,
MappableExprComponentListRef Components)
: const_component_lists_iterator(UniqueDecls, DeclsListNum,
CumulativeListSizes, Components) {
// Look for the desired declaration. While we are looking for it, we
// update the state so that we know the component where a given list
// starts.
for (; DeclCur != UniqueDecls.end(); ++DeclCur, ++NumListsCur) {
if (*DeclCur == Declaration)
break;
assert(*NumListsCur > 0 && "No lists associated with declaration??");
// Skip the lists associated with the current declaration, but save the
// last list size that was skipped.
std::advance(ListSizeCur, *NumListsCur - 1);
PrevListSize = *ListSizeCur;
++ListSizeCur;
}
// If we didn't find any declaration, advance the iterator to after the
// last component and set remaining lists to zero.
if (ListSizeCur == CumulativeListSizes.end()) {
this->I = End;
RemainingLists = 0u;
return;
}
// Set the remaining lists with the total number of lists of the current
// declaration.
RemainingLists = *NumListsCur;
// Adjust the list size end iterator to the end of the relevant range.
ListSizeEnd = ListSizeCur;
std::advance(ListSizeEnd, RemainingLists);
// Given that the list sizes are cumulative, the index of the component
// that start the list is the size of the previous list.
std::advance(this->I, PrevListSize);
}
// Return the array with the current list. The sizes are cumulative, so the
// array size is the difference between the current size and previous one.
std::pair<const ValueDecl *, MappableExprComponentListRef>
operator*() const {
assert(ListSizeCur != ListSizeEnd && "Invalid iterator!");
return std::make_pair(
*DeclCur,
MappableExprComponentListRef(&*this->I, *ListSizeCur - PrevListSize));
}
std::pair<const ValueDecl *, MappableExprComponentListRef>
operator->() const {
return **this;
}
// Skip the components of the current list.
const_component_lists_iterator &operator++() {
assert(ListSizeCur != ListSizeEnd && RemainingLists &&
"Invalid iterator!");
// If we don't have more lists just skip all the components. Otherwise,
// advance the iterator by the number of components in the current list.
if (std::next(ListSizeCur) == ListSizeEnd) {
this->I = End;
RemainingLists = 0;
} else {
std::advance(this->I, *ListSizeCur - PrevListSize);
PrevListSize = *ListSizeCur;
// We are done with a declaration, move to the next one.
if (!(--RemainingLists)) {
++DeclCur;
++NumListsCur;
RemainingLists = *NumListsCur;
assert(RemainingLists && "No lists in the following declaration??");
}
}
++ListSizeCur;
return *this;
}
};
using const_component_lists_range =
llvm::iterator_range<const_component_lists_iterator>;
/// Iterators for all component lists.
const_component_lists_iterator component_lists_begin() const {
return const_component_lists_iterator(
getUniqueDeclsRef(), getDeclNumListsRef(), getComponentListSizesRef(),
getComponentsRef());
}
const_component_lists_iterator component_lists_end() const {
return const_component_lists_iterator(
ArrayRef<ValueDecl *>(), ArrayRef<unsigned>(), ArrayRef<unsigned>(),
MappableExprComponentListRef(getComponentsRef().end(),
getComponentsRef().end()));
}
const_component_lists_range component_lists() const {
return {component_lists_begin(), component_lists_end()};
}
/// Iterators for component lists associated with the provided
/// declaration.
const_component_lists_iterator
decl_component_lists_begin(const ValueDecl *VD) const {
return const_component_lists_iterator(
VD, getUniqueDeclsRef(), getDeclNumListsRef(),
getComponentListSizesRef(), getComponentsRef());
}
const_component_lists_iterator decl_component_lists_end() const {
return component_lists_end();
}
const_component_lists_range decl_component_lists(const ValueDecl *VD) const {
return {decl_component_lists_begin(VD), decl_component_lists_end()};
}
/// Iterators to access all the declarations, number of lists, list sizes, and
/// components.
using const_all_decls_iterator = ArrayRef<ValueDecl *>::iterator;
using const_all_decls_range = llvm::iterator_range<const_all_decls_iterator>;
const_all_decls_range all_decls() const {
auto A = getUniqueDeclsRef();
return const_all_decls_range(A.begin(), A.end());
}
using const_all_num_lists_iterator = ArrayRef<unsigned>::iterator;
using const_all_num_lists_range =
llvm::iterator_range<const_all_num_lists_iterator>;
const_all_num_lists_range all_num_lists() const {
auto A = getDeclNumListsRef();
return const_all_num_lists_range(A.begin(), A.end());
}
using const_all_lists_sizes_iterator = ArrayRef<unsigned>::iterator;
using const_all_lists_sizes_range =
llvm::iterator_range<const_all_lists_sizes_iterator>;
const_all_lists_sizes_range all_lists_sizes() const {
auto A = getComponentListSizesRef();
return const_all_lists_sizes_range(A.begin(), A.end());
}
using const_all_components_iterator = ArrayRef<MappableComponent>::iterator;
using const_all_components_range =
llvm::iterator_range<const_all_components_iterator>;
const_all_components_range all_components() const {
auto A = getComponentsRef();
return const_all_components_range(A.begin(), A.end());
}
using mapperlist_iterator = MutableArrayRef<Expr *>::iterator;
using mapperlist_const_iterator = ArrayRef<const Expr *>::iterator;
using mapperlist_range = llvm::iterator_range<mapperlist_iterator>;
using mapperlist_const_range =
llvm::iterator_range<mapperlist_const_iterator>;
mapperlist_iterator mapperlist_begin() { return getUDMapperRefs().begin(); }
mapperlist_iterator mapperlist_end() { return getUDMapperRefs().end(); }
mapperlist_const_iterator mapperlist_begin() const {
return getUDMapperRefs().begin();
}
mapperlist_const_iterator mapperlist_end() const {
return getUDMapperRefs().end();
}
mapperlist_range mapperlists() {
return mapperlist_range(mapperlist_begin(), mapperlist_end());
}
mapperlist_const_range mapperlists() const {
return mapperlist_const_range(mapperlist_begin(), mapperlist_end());
}
};
/// This represents clause 'map' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp target map(a,b)
/// \endcode
/// In this example directive '#pragma omp target' has clause 'map'
/// with the variables 'a' and 'b'.
class OMPMapClause final : public OMPMappableExprListClause<OMPMapClause>,
private llvm::TrailingObjects<
OMPMapClause, Expr *, ValueDecl *, unsigned,
OMPClauseMappableExprCommon::MappableComponent> {
friend class OMPClauseReader;
friend OMPMappableExprListClause;
friend OMPVarListClause;
friend TrailingObjects;
/// Define the sizes of each trailing object array except the last one. This
/// is required for TrailingObjects to work properly.
size_t numTrailingObjects(OverloadToken<Expr *>) const {
// There are varlist_size() of expressions, and varlist_size() of
// user-defined mappers.
return 2 * varlist_size();
}
size_t numTrailingObjects(OverloadToken<ValueDecl *>) const {
return getUniqueDeclarationsNum();
}
size_t numTrailingObjects(OverloadToken<unsigned>) const {
return getUniqueDeclarationsNum() + getTotalComponentListNum();
}
public:
/// Number of allowed map-type-modifiers.
static constexpr unsigned NumberOfModifiers =
OMPC_MAP_MODIFIER_last - OMPC_MAP_MODIFIER_unknown - 1;
private:
/// Map-type-modifiers for the 'map' clause.
OpenMPMapModifierKind MapTypeModifiers[NumberOfModifiers] = {
OMPC_MAP_MODIFIER_unknown, OMPC_MAP_MODIFIER_unknown,
OMPC_MAP_MODIFIER_unknown};
/// Location of map-type-modifiers for the 'map' clause.
SourceLocation MapTypeModifiersLoc[NumberOfModifiers];
/// Map type for the 'map' clause.
OpenMPMapClauseKind MapType = OMPC_MAP_unknown;
/// Is this an implicit map type or not.
bool MapTypeIsImplicit = false;
/// Location of the map type.
SourceLocation MapLoc;
/// Colon location.
SourceLocation ColonLoc;
/// Build a clause for \a NumVars listed expressions, \a
/// NumUniqueDeclarations declarations, \a NumComponentLists total component
/// lists, and \a NumComponents total expression components.
///
/// \param MapModifiers Map-type-modifiers.
/// \param MapModifiersLoc Locations of map-type-modifiers.
/// \param MapperQualifierLoc C++ nested name specifier for the associated
/// user-defined mapper.
/// \param MapperIdInfo The identifier of associated user-defined mapper.
/// \param MapType Map type.
/// \param MapTypeIsImplicit Map type is inferred implicitly.
/// \param MapLoc Location of the map type.
/// \param Locs Locations needed to build a mappable clause. It includes 1)
/// StartLoc: starting location of the clause (the clause keyword); 2)
/// LParenLoc: location of '('; 3) EndLoc: ending location of the clause.
/// \param Sizes All required sizes to build a mappable clause. It includes 1)
/// NumVars: number of expressions listed in this clause; 2)
/// NumUniqueDeclarations: number of unique base declarations in this clause;
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
explicit OMPMapClause(ArrayRef<OpenMPMapModifierKind> MapModifiers,
ArrayRef<SourceLocation> MapModifiersLoc,
NestedNameSpecifierLoc MapperQualifierLoc,
DeclarationNameInfo MapperIdInfo,
OpenMPMapClauseKind MapType, bool MapTypeIsImplicit,
SourceLocation MapLoc, const OMPVarListLocTy &Locs,
const OMPMappableExprListSizeTy &Sizes)
: OMPMappableExprListClause(OMPC_map, Locs, Sizes, &MapperQualifierLoc,
&MapperIdInfo),
MapType(MapType), MapTypeIsImplicit(MapTypeIsImplicit), MapLoc(MapLoc) {
assert(llvm::array_lengthof(MapTypeModifiers) == MapModifiers.size() &&
"Unexpected number of map type modifiers.");
llvm::copy(MapModifiers, std::begin(MapTypeModifiers));
assert(llvm::array_lengthof(MapTypeModifiersLoc) ==
MapModifiersLoc.size() &&
"Unexpected number of map type modifier locations.");
llvm::copy(MapModifiersLoc, std::begin(MapTypeModifiersLoc));
}
/// Build an empty clause.
///
/// \param Sizes All required sizes to build a mappable clause. It includes 1)
/// NumVars: number of expressions listed in this clause; 2)
/// NumUniqueDeclarations: number of unique base declarations in this clause;
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
explicit OMPMapClause(const OMPMappableExprListSizeTy &Sizes)
: OMPMappableExprListClause(OMPC_map, OMPVarListLocTy(), Sizes) {}
/// Set map-type-modifier for the clause.
///
/// \param I index for map-type-modifier.
/// \param T map-type-modifier for the clause.
void setMapTypeModifier(unsigned I, OpenMPMapModifierKind T) {
assert(I < NumberOfModifiers &&
"Unexpected index to store map type modifier, exceeds array size.");
MapTypeModifiers[I] = T;
}
/// Set location for the map-type-modifier.
///
/// \param I index for map-type-modifier location.
/// \param TLoc map-type-modifier location.
void setMapTypeModifierLoc(unsigned I, SourceLocation TLoc) {
assert(I < NumberOfModifiers &&
"Index to store map type modifier location exceeds array size.");
MapTypeModifiersLoc[I] = TLoc;
}
/// Set type for the clause.
///
/// \param T Type for the clause.
void setMapType(OpenMPMapClauseKind T) { MapType = T; }
/// Set type location.
///
/// \param TLoc Type location.
void setMapLoc(SourceLocation TLoc) { MapLoc = TLoc; }
/// Set colon location.
void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; }
public:
/// Creates clause with a list of variables \a VL.
///
/// \param C AST context.
/// \param Locs Locations needed to build a mappable clause. It includes 1)
/// StartLoc: starting location of the clause (the clause keyword); 2)
/// LParenLoc: location of '('; 3) EndLoc: ending location of the clause.
/// \param Vars The original expression used in the clause.
/// \param Declarations Declarations used in the clause.
/// \param ComponentLists Component lists used in the clause.
/// \param UDMapperRefs References to user-defined mappers associated with
/// expressions used in the clause.
/// \param MapModifiers Map-type-modifiers.
/// \param MapModifiersLoc Location of map-type-modifiers.
/// \param UDMQualifierLoc C++ nested name specifier for the associated
/// user-defined mapper.
/// \param MapperId The identifier of associated user-defined mapper.
/// \param Type Map type.
/// \param TypeIsImplicit Map type is inferred implicitly.
/// \param TypeLoc Location of the map type.
static OMPMapClause *
Create(const ASTContext &C, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> Vars, ArrayRef<ValueDecl *> Declarations,
MappableExprComponentListsRef ComponentLists,
ArrayRef<Expr *> UDMapperRefs,
ArrayRef<OpenMPMapModifierKind> MapModifiers,
ArrayRef<SourceLocation> MapModifiersLoc,
NestedNameSpecifierLoc UDMQualifierLoc, DeclarationNameInfo MapperId,
OpenMPMapClauseKind Type, bool TypeIsImplicit, SourceLocation TypeLoc);
/// Creates an empty clause with the place for \a NumVars original
/// expressions, \a NumUniqueDeclarations declarations, \NumComponentLists
/// lists, and \a NumComponents expression components.
///
/// \param C AST context.
/// \param Sizes All required sizes to build a mappable clause. It includes 1)
/// NumVars: number of expressions listed in this clause; 2)
/// NumUniqueDeclarations: number of unique base declarations in this clause;
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
static OMPMapClause *CreateEmpty(const ASTContext &C,
const OMPMappableExprListSizeTy &Sizes);
/// Fetches mapping kind for the clause.
OpenMPMapClauseKind getMapType() const LLVM_READONLY { return MapType; }
/// Is this an implicit map type?
/// We have to capture 'IsMapTypeImplicit' from the parser for more
/// informative error messages. It helps distinguish map(r) from
/// map(tofrom: r), which is important to print more helpful error
/// messages for some target directives.
bool isImplicitMapType() const LLVM_READONLY { return MapTypeIsImplicit; }
/// Fetches the map-type-modifier at 'Cnt' index of array of modifiers.
///
/// \param Cnt index for map-type-modifier.
OpenMPMapModifierKind getMapTypeModifier(unsigned Cnt) const LLVM_READONLY {
assert(Cnt < NumberOfModifiers &&
"Requested modifier exceeds the total number of modifiers.");
return MapTypeModifiers[Cnt];
}
/// Fetches the map-type-modifier location at 'Cnt' index of array of
/// modifiers' locations.
///
/// \param Cnt index for map-type-modifier location.
SourceLocation getMapTypeModifierLoc(unsigned Cnt) const LLVM_READONLY {
assert(Cnt < NumberOfModifiers &&
"Requested modifier location exceeds total number of modifiers.");
return MapTypeModifiersLoc[Cnt];
}
/// Fetches ArrayRef of map-type-modifiers.
ArrayRef<OpenMPMapModifierKind> getMapTypeModifiers() const LLVM_READONLY {
return llvm::makeArrayRef(MapTypeModifiers);
}
/// Fetches ArrayRef of location of map-type-modifiers.
ArrayRef<SourceLocation> getMapTypeModifiersLoc() const LLVM_READONLY {
return llvm::makeArrayRef(MapTypeModifiersLoc);
}
/// Fetches location of clause mapping kind.
SourceLocation getMapLoc() const LLVM_READONLY { return MapLoc; }
/// Get colon location.
SourceLocation getColonLoc() const { return ColonLoc; }
child_range children() {
return child_range(
reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPMapClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_map;
}
};
/// This represents 'num_teams' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp teams num_teams(n)
/// \endcode
/// In this example directive '#pragma omp teams' has clause 'num_teams'
/// with single expression 'n'.
class OMPNumTeamsClause : public OMPClause, public OMPClauseWithPreInit {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// NumTeams number.
Stmt *NumTeams = nullptr;
/// Set the NumTeams number.
///
/// \param E NumTeams number.
void setNumTeams(Expr *E) { NumTeams = E; }
public:
/// Build 'num_teams' clause.
///
/// \param E Expression associated with this clause.
/// \param HelperE Helper Expression associated with this clause.
/// \param CaptureRegion Innermost OpenMP region where expressions in this
/// clause must be captured.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPNumTeamsClause(Expr *E, Stmt *HelperE, OpenMPDirectiveKind CaptureRegion,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(OMPC_num_teams, StartLoc, EndLoc), OMPClauseWithPreInit(this),
LParenLoc(LParenLoc), NumTeams(E) {
setPreInitStmt(HelperE, CaptureRegion);
}
/// Build an empty clause.
OMPNumTeamsClause()
: OMPClause(OMPC_num_teams, SourceLocation(), SourceLocation()),
OMPClauseWithPreInit(this) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Return NumTeams number.
Expr *getNumTeams() { return cast<Expr>(NumTeams); }
/// Return NumTeams number.
Expr *getNumTeams() const { return cast<Expr>(NumTeams); }
child_range children() { return child_range(&NumTeams, &NumTeams + 1); }
const_child_range children() const {
return const_child_range(&NumTeams, &NumTeams + 1);
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_num_teams;
}
};
/// This represents 'thread_limit' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp teams thread_limit(n)
/// \endcode
/// In this example directive '#pragma omp teams' has clause 'thread_limit'
/// with single expression 'n'.
class OMPThreadLimitClause : public OMPClause, public OMPClauseWithPreInit {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// ThreadLimit number.
Stmt *ThreadLimit = nullptr;
/// Set the ThreadLimit number.
///
/// \param E ThreadLimit number.
void setThreadLimit(Expr *E) { ThreadLimit = E; }
public:
/// Build 'thread_limit' clause.
///
/// \param E Expression associated with this clause.
/// \param HelperE Helper Expression associated with this clause.
/// \param CaptureRegion Innermost OpenMP region where expressions in this
/// clause must be captured.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPThreadLimitClause(Expr *E, Stmt *HelperE,
OpenMPDirectiveKind CaptureRegion,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(OMPC_thread_limit, StartLoc, EndLoc),
OMPClauseWithPreInit(this), LParenLoc(LParenLoc), ThreadLimit(E) {
setPreInitStmt(HelperE, CaptureRegion);
}
/// Build an empty clause.
OMPThreadLimitClause()
: OMPClause(OMPC_thread_limit, SourceLocation(), SourceLocation()),
OMPClauseWithPreInit(this) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Return ThreadLimit number.
Expr *getThreadLimit() { return cast<Expr>(ThreadLimit); }
/// Return ThreadLimit number.
Expr *getThreadLimit() const { return cast<Expr>(ThreadLimit); }
child_range children() { return child_range(&ThreadLimit, &ThreadLimit + 1); }
const_child_range children() const {
return const_child_range(&ThreadLimit, &ThreadLimit + 1);
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_thread_limit;
}
};
/// This represents 'priority' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp task priority(n)
/// \endcode
/// In this example directive '#pragma omp teams' has clause 'priority' with
/// single expression 'n'.
class OMPPriorityClause : public OMPClause {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// Priority number.
Stmt *Priority = nullptr;
/// Set the Priority number.
///
/// \param E Priority number.
void setPriority(Expr *E) { Priority = E; }
public:
/// Build 'priority' clause.
///
/// \param E Expression associated with this clause.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPPriorityClause(Expr *E, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(OMPC_priority, StartLoc, EndLoc), LParenLoc(LParenLoc),
Priority(E) {}
/// Build an empty clause.
OMPPriorityClause()
: OMPClause(OMPC_priority, SourceLocation(), SourceLocation()) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Return Priority number.
Expr *getPriority() { return cast<Expr>(Priority); }
/// Return Priority number.
Expr *getPriority() const { return cast<Expr>(Priority); }
child_range children() { return child_range(&Priority, &Priority + 1); }
const_child_range children() const {
return const_child_range(&Priority, &Priority + 1);
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_priority;
}
};
/// This represents 'grainsize' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp taskloop grainsize(4)
/// \endcode
/// In this example directive '#pragma omp taskloop' has clause 'grainsize'
/// with single expression '4'.
class OMPGrainsizeClause : public OMPClause {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// Safe iteration space distance.
Stmt *Grainsize = nullptr;
/// Set safelen.
void setGrainsize(Expr *Size) { Grainsize = Size; }
public:
/// Build 'grainsize' clause.
///
/// \param Size Expression associated with this clause.
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPGrainsizeClause(Expr *Size, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc)
: OMPClause(OMPC_grainsize, StartLoc, EndLoc), LParenLoc(LParenLoc),
Grainsize(Size) {}
/// Build an empty clause.
explicit OMPGrainsizeClause()
: OMPClause(OMPC_grainsize, SourceLocation(), SourceLocation()) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Return safe iteration space distance.
Expr *getGrainsize() const { return cast_or_null<Expr>(Grainsize); }
child_range children() { return child_range(&Grainsize, &Grainsize + 1); }
const_child_range children() const {
return const_child_range(&Grainsize, &Grainsize + 1);
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_grainsize;
}
};
/// This represents 'nogroup' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp taskloop nogroup
/// \endcode
/// In this example directive '#pragma omp taskloop' has 'nogroup' clause.
class OMPNogroupClause : public OMPClause {
public:
/// Build 'nogroup' clause.
///
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPNogroupClause(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPClause(OMPC_nogroup, StartLoc, EndLoc) {}
/// Build an empty clause.
OMPNogroupClause()
: OMPClause(OMPC_nogroup, SourceLocation(), SourceLocation()) {}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_nogroup;
}
};
/// This represents 'num_tasks' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp taskloop num_tasks(4)
/// \endcode
/// In this example directive '#pragma omp taskloop' has clause 'num_tasks'
/// with single expression '4'.
class OMPNumTasksClause : public OMPClause {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// Safe iteration space distance.
Stmt *NumTasks = nullptr;
/// Set safelen.
void setNumTasks(Expr *Size) { NumTasks = Size; }
public:
/// Build 'num_tasks' clause.
///
/// \param Size Expression associated with this clause.
/// \param StartLoc Starting location of the clause.
/// \param EndLoc Ending location of the clause.
OMPNumTasksClause(Expr *Size, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc)
: OMPClause(OMPC_num_tasks, StartLoc, EndLoc), LParenLoc(LParenLoc),
NumTasks(Size) {}
/// Build an empty clause.
explicit OMPNumTasksClause()
: OMPClause(OMPC_num_tasks, SourceLocation(), SourceLocation()) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Return safe iteration space distance.
Expr *getNumTasks() const { return cast_or_null<Expr>(NumTasks); }
child_range children() { return child_range(&NumTasks, &NumTasks + 1); }
const_child_range children() const {
return const_child_range(&NumTasks, &NumTasks + 1);
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_num_tasks;
}
};
/// This represents 'hint' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp critical (name) hint(6)
/// \endcode
/// In this example directive '#pragma omp critical' has name 'name' and clause
/// 'hint' with argument '6'.
class OMPHintClause : public OMPClause {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// Hint expression of the 'hint' clause.
Stmt *Hint = nullptr;
/// Set hint expression.
void setHint(Expr *H) { Hint = H; }
public:
/// Build 'hint' clause with expression \a Hint.
///
/// \param Hint Hint expression.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPHintClause(Expr *Hint, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc)
: OMPClause(OMPC_hint, StartLoc, EndLoc), LParenLoc(LParenLoc),
Hint(Hint) {}
/// Build an empty clause.
OMPHintClause() : OMPClause(OMPC_hint, SourceLocation(), SourceLocation()) {}
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Returns the location of '('.
SourceLocation getLParenLoc() const { return LParenLoc; }
/// Returns number of threads.
Expr *getHint() const { return cast_or_null<Expr>(Hint); }
child_range children() { return child_range(&Hint, &Hint + 1); }
const_child_range children() const {
return const_child_range(&Hint, &Hint + 1);
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_hint;
}
};
/// This represents 'dist_schedule' clause in the '#pragma omp ...'
/// directive.
///
/// \code
/// #pragma omp distribute dist_schedule(static, 3)
/// \endcode
/// In this example directive '#pragma omp distribute' has 'dist_schedule'
/// clause with arguments 'static' and '3'.
class OMPDistScheduleClause : public OMPClause, public OMPClauseWithPreInit {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// A kind of the 'schedule' clause.
OpenMPDistScheduleClauseKind Kind = OMPC_DIST_SCHEDULE_unknown;
/// Start location of the schedule kind in source code.
SourceLocation KindLoc;
/// Location of ',' (if any).
SourceLocation CommaLoc;
/// Chunk size.
Expr *ChunkSize = nullptr;
/// Set schedule kind.
///
/// \param K Schedule kind.
void setDistScheduleKind(OpenMPDistScheduleClauseKind K) { Kind = K; }
/// Sets the location of '('.
///
/// \param Loc Location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Set schedule kind start location.
///
/// \param KLoc Schedule kind location.
void setDistScheduleKindLoc(SourceLocation KLoc) { KindLoc = KLoc; }
/// Set location of ','.
///
/// \param Loc Location of ','.
void setCommaLoc(SourceLocation Loc) { CommaLoc = Loc; }
/// Set chunk size.
///
/// \param E Chunk size.
void setChunkSize(Expr *E) { ChunkSize = E; }
public:
/// Build 'dist_schedule' clause with schedule kind \a Kind and chunk
/// size expression \a ChunkSize.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param KLoc Starting location of the argument.
/// \param CommaLoc Location of ','.
/// \param EndLoc Ending location of the clause.
/// \param Kind DistSchedule kind.
/// \param ChunkSize Chunk size.
/// \param HelperChunkSize Helper chunk size for combined directives.
OMPDistScheduleClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation KLoc, SourceLocation CommaLoc,
SourceLocation EndLoc,
OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize,
Stmt *HelperChunkSize)
: OMPClause(OMPC_dist_schedule, StartLoc, EndLoc),
OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Kind(Kind),
KindLoc(KLoc), CommaLoc(CommaLoc), ChunkSize(ChunkSize) {
setPreInitStmt(HelperChunkSize);
}
/// Build an empty clause.
explicit OMPDistScheduleClause()
: OMPClause(OMPC_dist_schedule, SourceLocation(), SourceLocation()),
OMPClauseWithPreInit(this) {}
/// Get kind of the clause.
OpenMPDistScheduleClauseKind getDistScheduleKind() const { return Kind; }
/// Get location of '('.
SourceLocation getLParenLoc() { return LParenLoc; }
/// Get kind location.
SourceLocation getDistScheduleKindLoc() { return KindLoc; }
/// Get location of ','.
SourceLocation getCommaLoc() { return CommaLoc; }
/// Get chunk size.
Expr *getChunkSize() { return ChunkSize; }
/// Get chunk size.
const Expr *getChunkSize() const { return ChunkSize; }
child_range children() {
return child_range(reinterpret_cast<Stmt **>(&ChunkSize),
reinterpret_cast<Stmt **>(&ChunkSize) + 1);
}
const_child_range children() const {
auto Children = const_cast<OMPDistScheduleClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_dist_schedule;
}
};
/// This represents 'defaultmap' clause in the '#pragma omp ...' directive.
///
/// \code
/// #pragma omp target defaultmap(tofrom: scalar)
/// \endcode
/// In this example directive '#pragma omp target' has 'defaultmap' clause of kind
/// 'scalar' with modifier 'tofrom'.
class OMPDefaultmapClause : public OMPClause {
friend class OMPClauseReader;
/// Location of '('.
SourceLocation LParenLoc;
/// Modifiers for 'defaultmap' clause.
OpenMPDefaultmapClauseModifier Modifier = OMPC_DEFAULTMAP_MODIFIER_unknown;
/// Locations of modifiers.
SourceLocation ModifierLoc;
/// A kind of the 'defaultmap' clause.
OpenMPDefaultmapClauseKind Kind = OMPC_DEFAULTMAP_unknown;
/// Start location of the defaultmap kind in source code.
SourceLocation KindLoc;
/// Set defaultmap kind.
///
/// \param K Defaultmap kind.
void setDefaultmapKind(OpenMPDefaultmapClauseKind K) { Kind = K; }
/// Set the defaultmap modifier.
///
/// \param M Defaultmap modifier.
void setDefaultmapModifier(OpenMPDefaultmapClauseModifier M) {
Modifier = M;
}
/// Set location of the defaultmap modifier.
void setDefaultmapModifierLoc(SourceLocation Loc) {
ModifierLoc = Loc;
}
/// Sets the location of '('.
///
/// \param Loc Location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
/// Set defaultmap kind start location.
///
/// \param KLoc Defaultmap kind location.
void setDefaultmapKindLoc(SourceLocation KLoc) { KindLoc = KLoc; }
public:
/// Build 'defaultmap' clause with defaultmap kind \a Kind
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param KLoc Starting location of the argument.
/// \param EndLoc Ending location of the clause.
/// \param Kind Defaultmap kind.
/// \param M The modifier applied to 'defaultmap' clause.
/// \param MLoc Location of the modifier
OMPDefaultmapClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation MLoc, SourceLocation KLoc,
SourceLocation EndLoc, OpenMPDefaultmapClauseKind Kind,
OpenMPDefaultmapClauseModifier M)
: OMPClause(OMPC_defaultmap, StartLoc, EndLoc), LParenLoc(LParenLoc),
Modifier(M), ModifierLoc(MLoc), Kind(Kind), KindLoc(KLoc) {}
/// Build an empty clause.
explicit OMPDefaultmapClause()
: OMPClause(OMPC_defaultmap, SourceLocation(), SourceLocation()) {}
/// Get kind of the clause.
OpenMPDefaultmapClauseKind getDefaultmapKind() const { return Kind; }
/// Get the modifier of the clause.
OpenMPDefaultmapClauseModifier getDefaultmapModifier() const {
return Modifier;
}
/// Get location of '('.
SourceLocation getLParenLoc() { return LParenLoc; }
/// Get kind location.
SourceLocation getDefaultmapKindLoc() { return KindLoc; }
/// Get the modifier location.
SourceLocation getDefaultmapModifierLoc() const {
return ModifierLoc;
}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_defaultmap;
}
};
/// This represents clause 'to' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp target update to(a,b)
/// \endcode
/// In this example directive '#pragma omp target update' has clause 'to'
/// with the variables 'a' and 'b'.
class OMPToClause final : public OMPMappableExprListClause<OMPToClause>,
private llvm::TrailingObjects<
OMPToClause, Expr *, ValueDecl *, unsigned,
OMPClauseMappableExprCommon::MappableComponent> {
friend class OMPClauseReader;
friend OMPMappableExprListClause;
friend OMPVarListClause;
friend TrailingObjects;
/// Build clause with number of variables \a NumVars.
///
/// \param MapperQualifierLoc C++ nested name specifier for the associated
/// user-defined mapper.
/// \param MapperIdInfo The identifier of associated user-defined mapper.
/// \param Locs Locations needed to build a mappable clause. It includes 1)
/// StartLoc: starting location of the clause (the clause keyword); 2)
/// LParenLoc: location of '('; 3) EndLoc: ending location of the clause.
/// \param Sizes All required sizes to build a mappable clause. It includes 1)
/// NumVars: number of expressions listed in this clause; 2)
/// NumUniqueDeclarations: number of unique base declarations in this clause;
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
explicit OMPToClause(NestedNameSpecifierLoc MapperQualifierLoc,
DeclarationNameInfo MapperIdInfo,
const OMPVarListLocTy &Locs,
const OMPMappableExprListSizeTy &Sizes)
: OMPMappableExprListClause(OMPC_to, Locs, Sizes, &MapperQualifierLoc,
&MapperIdInfo) {}
/// Build an empty clause.
///
/// \param Sizes All required sizes to build a mappable clause. It includes 1)
/// NumVars: number of expressions listed in this clause; 2)
/// NumUniqueDeclarations: number of unique base declarations in this clause;
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
explicit OMPToClause(const OMPMappableExprListSizeTy &Sizes)
: OMPMappableExprListClause(OMPC_to, OMPVarListLocTy(), Sizes) {}
/// Define the sizes of each trailing object array except the last one. This
/// is required for TrailingObjects to work properly.
size_t numTrailingObjects(OverloadToken<Expr *>) const {
// There are varlist_size() of expressions, and varlist_size() of
// user-defined mappers.
return 2 * varlist_size();
}
size_t numTrailingObjects(OverloadToken<ValueDecl *>) const {
return getUniqueDeclarationsNum();
}
size_t numTrailingObjects(OverloadToken<unsigned>) const {
return getUniqueDeclarationsNum() + getTotalComponentListNum();
}
public:
/// Creates clause with a list of variables \a Vars.
///
/// \param C AST context.
/// \param Locs Locations needed to build a mappable clause. It includes 1)
/// StartLoc: starting location of the clause (the clause keyword); 2)
/// LParenLoc: location of '('; 3) EndLoc: ending location of the clause.
/// \param Vars The original expression used in the clause.
/// \param Declarations Declarations used in the clause.
/// \param ComponentLists Component lists used in the clause.
/// \param UDMapperRefs References to user-defined mappers associated with
/// expressions used in the clause.
/// \param UDMQualifierLoc C++ nested name specifier for the associated
/// user-defined mapper.
/// \param MapperId The identifier of associated user-defined mapper.
static OMPToClause *Create(const ASTContext &C, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> Vars,
ArrayRef<ValueDecl *> Declarations,
MappableExprComponentListsRef ComponentLists,
ArrayRef<Expr *> UDMapperRefs,
NestedNameSpecifierLoc UDMQualifierLoc,
DeclarationNameInfo MapperId);
/// Creates an empty clause with the place for \a NumVars variables.
///
/// \param C AST context.
/// \param Sizes All required sizes to build a mappable clause. It includes 1)
/// NumVars: number of expressions listed in this clause; 2)
/// NumUniqueDeclarations: number of unique base declarations in this clause;
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
static OMPToClause *CreateEmpty(const ASTContext &C,
const OMPMappableExprListSizeTy &Sizes);
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPToClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_to;
}
};
/// This represents clause 'from' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp target update from(a,b)
/// \endcode
/// In this example directive '#pragma omp target update' has clause 'from'
/// with the variables 'a' and 'b'.
class OMPFromClause final
: public OMPMappableExprListClause<OMPFromClause>,
private llvm::TrailingObjects<
OMPFromClause, Expr *, ValueDecl *, unsigned,
OMPClauseMappableExprCommon::MappableComponent> {
friend class OMPClauseReader;
friend OMPMappableExprListClause;
friend OMPVarListClause;
friend TrailingObjects;
/// Build clause with number of variables \a NumVars.
///
/// \param MapperQualifierLoc C++ nested name specifier for the associated
/// user-defined mapper.
/// \param MapperIdInfo The identifier of associated user-defined mapper.
/// \param Locs Locations needed to build a mappable clause. It includes 1)
/// StartLoc: starting location of the clause (the clause keyword); 2)
/// LParenLoc: location of '('; 3) EndLoc: ending location of the clause.
/// \param Sizes All required sizes to build a mappable clause. It includes 1)
/// NumVars: number of expressions listed in this clause; 2)
/// NumUniqueDeclarations: number of unique base declarations in this clause;
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
explicit OMPFromClause(NestedNameSpecifierLoc MapperQualifierLoc,
DeclarationNameInfo MapperIdInfo,
const OMPVarListLocTy &Locs,
const OMPMappableExprListSizeTy &Sizes)
: OMPMappableExprListClause(OMPC_from, Locs, Sizes, &MapperQualifierLoc,
&MapperIdInfo) {}
/// Build an empty clause.
///
/// \param Sizes All required sizes to build a mappable clause. It includes 1)
/// NumVars: number of expressions listed in this clause; 2)
/// NumUniqueDeclarations: number of unique base declarations in this clause;
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
explicit OMPFromClause(const OMPMappableExprListSizeTy &Sizes)
: OMPMappableExprListClause(OMPC_from, OMPVarListLocTy(), Sizes) {}
/// Define the sizes of each trailing object array except the last one. This
/// is required for TrailingObjects to work properly.
size_t numTrailingObjects(OverloadToken<Expr *>) const {
// There are varlist_size() of expressions, and varlist_size() of
// user-defined mappers.
return 2 * varlist_size();
}
size_t numTrailingObjects(OverloadToken<ValueDecl *>) const {
return getUniqueDeclarationsNum();
}
size_t numTrailingObjects(OverloadToken<unsigned>) const {
return getUniqueDeclarationsNum() + getTotalComponentListNum();
}
public:
/// Creates clause with a list of variables \a Vars.
///
/// \param C AST context.
/// \param Locs Locations needed to build a mappable clause. It includes 1)
/// StartLoc: starting location of the clause (the clause keyword); 2)
/// LParenLoc: location of '('; 3) EndLoc: ending location of the clause.
/// \param Vars The original expression used in the clause.
/// \param Declarations Declarations used in the clause.
/// \param ComponentLists Component lists used in the clause.
/// \param UDMapperRefs References to user-defined mappers associated with
/// expressions used in the clause.
/// \param UDMQualifierLoc C++ nested name specifier for the associated
/// user-defined mapper.
/// \param MapperId The identifier of associated user-defined mapper.
static OMPFromClause *Create(const ASTContext &C, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> Vars,
ArrayRef<ValueDecl *> Declarations,
MappableExprComponentListsRef ComponentLists,
ArrayRef<Expr *> UDMapperRefs,
NestedNameSpecifierLoc UDMQualifierLoc,
DeclarationNameInfo MapperId);
/// Creates an empty clause with the place for \a NumVars variables.
///
/// \param C AST context.
/// \param Sizes All required sizes to build a mappable clause. It includes 1)
/// NumVars: number of expressions listed in this clause; 2)
/// NumUniqueDeclarations: number of unique base declarations in this clause;
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
static OMPFromClause *CreateEmpty(const ASTContext &C,
const OMPMappableExprListSizeTy &Sizes);
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPFromClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_from;
}
};
/// This represents clause 'use_device_ptr' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp target data use_device_ptr(a,b)
/// \endcode
/// In this example directive '#pragma omp target data' has clause
/// 'use_device_ptr' with the variables 'a' and 'b'.
class OMPUseDevicePtrClause final
: public OMPMappableExprListClause<OMPUseDevicePtrClause>,
private llvm::TrailingObjects<
OMPUseDevicePtrClause, Expr *, ValueDecl *, unsigned,
OMPClauseMappableExprCommon::MappableComponent> {
friend class OMPClauseReader;
friend OMPMappableExprListClause;
friend OMPVarListClause;
friend TrailingObjects;
/// Build clause with number of variables \a NumVars.
///
/// \param Locs Locations needed to build a mappable clause. It includes 1)
/// StartLoc: starting location of the clause (the clause keyword); 2)
/// LParenLoc: location of '('; 3) EndLoc: ending location of the clause.
/// \param Sizes All required sizes to build a mappable clause. It includes 1)
/// NumVars: number of expressions listed in this clause; 2)
/// NumUniqueDeclarations: number of unique base declarations in this clause;
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
explicit OMPUseDevicePtrClause(const OMPVarListLocTy &Locs,
const OMPMappableExprListSizeTy &Sizes)
: OMPMappableExprListClause(OMPC_use_device_ptr, Locs, Sizes) {}
/// Build an empty clause.
///
/// \param Sizes All required sizes to build a mappable clause. It includes 1)
/// NumVars: number of expressions listed in this clause; 2)
/// NumUniqueDeclarations: number of unique base declarations in this clause;
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
explicit OMPUseDevicePtrClause(const OMPMappableExprListSizeTy &Sizes)
: OMPMappableExprListClause(OMPC_use_device_ptr, OMPVarListLocTy(),
Sizes) {}
/// Define the sizes of each trailing object array except the last one. This
/// is required for TrailingObjects to work properly.
size_t numTrailingObjects(OverloadToken<Expr *>) const {
return 3 * varlist_size();
}
size_t numTrailingObjects(OverloadToken<ValueDecl *>) const {
return getUniqueDeclarationsNum();
}
size_t numTrailingObjects(OverloadToken<unsigned>) const {
return getUniqueDeclarationsNum() + getTotalComponentListNum();
}
/// Sets the list of references to private copies with initializers for new
/// private variables.
/// \param VL List of references.
void setPrivateCopies(ArrayRef<Expr *> VL);
/// Gets the list of references to private copies with initializers for new
/// private variables.
MutableArrayRef<Expr *> getPrivateCopies() {
return MutableArrayRef<Expr *>(varlist_end(), varlist_size());
}
ArrayRef<const Expr *> getPrivateCopies() const {
return llvm::makeArrayRef(varlist_end(), varlist_size());
}
/// Sets the list of references to initializer variables for new private
/// variables.
/// \param VL List of references.
void setInits(ArrayRef<Expr *> VL);
/// Gets the list of references to initializer variables for new private
/// variables.
MutableArrayRef<Expr *> getInits() {
return MutableArrayRef<Expr *>(getPrivateCopies().end(), varlist_size());
}
ArrayRef<const Expr *> getInits() const {
return llvm::makeArrayRef(getPrivateCopies().end(), varlist_size());
}
public:
/// Creates clause with a list of variables \a Vars.
///
/// \param C AST context.
/// \param Locs Locations needed to build a mappable clause. It includes 1)
/// StartLoc: starting location of the clause (the clause keyword); 2)
/// LParenLoc: location of '('; 3) EndLoc: ending location of the clause.
/// \param Vars The original expression used in the clause.
/// \param PrivateVars Expressions referring to private copies.
/// \param Inits Expressions referring to private copy initializers.
/// \param Declarations Declarations used in the clause.
/// \param ComponentLists Component lists used in the clause.
static OMPUseDevicePtrClause *
Create(const ASTContext &C, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> Vars, ArrayRef<Expr *> PrivateVars,
ArrayRef<Expr *> Inits, ArrayRef<ValueDecl *> Declarations,
MappableExprComponentListsRef ComponentLists);
/// Creates an empty clause with the place for \a NumVars variables.
///
/// \param C AST context.
/// \param Sizes All required sizes to build a mappable clause. It includes 1)
/// NumVars: number of expressions listed in this clause; 2)
/// NumUniqueDeclarations: number of unique base declarations in this clause;
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
static OMPUseDevicePtrClause *
CreateEmpty(const ASTContext &C, const OMPMappableExprListSizeTy &Sizes);
using private_copies_iterator = MutableArrayRef<Expr *>::iterator;
using private_copies_const_iterator = ArrayRef<const Expr *>::iterator;
using private_copies_range = llvm::iterator_range<private_copies_iterator>;
using private_copies_const_range =
llvm::iterator_range<private_copies_const_iterator>;
private_copies_range private_copies() {
return private_copies_range(getPrivateCopies().begin(),
getPrivateCopies().end());
}
private_copies_const_range private_copies() const {
return private_copies_const_range(getPrivateCopies().begin(),
getPrivateCopies().end());
}
using inits_iterator = MutableArrayRef<Expr *>::iterator;
using inits_const_iterator = ArrayRef<const Expr *>::iterator;
using inits_range = llvm::iterator_range<inits_iterator>;
using inits_const_range = llvm::iterator_range<inits_const_iterator>;
inits_range inits() {
return inits_range(getInits().begin(), getInits().end());
}
inits_const_range inits() const {
return inits_const_range(getInits().begin(), getInits().end());
}
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPUseDevicePtrClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_use_device_ptr;
}
};
/// This represents clause 'is_device_ptr' in the '#pragma omp ...'
/// directives.
///
/// \code
/// #pragma omp target is_device_ptr(a,b)
/// \endcode
/// In this example directive '#pragma omp target' has clause
/// 'is_device_ptr' with the variables 'a' and 'b'.
class OMPIsDevicePtrClause final
: public OMPMappableExprListClause<OMPIsDevicePtrClause>,
private llvm::TrailingObjects<
OMPIsDevicePtrClause, Expr *, ValueDecl *, unsigned,
OMPClauseMappableExprCommon::MappableComponent> {
friend class OMPClauseReader;
friend OMPMappableExprListClause;
friend OMPVarListClause;
friend TrailingObjects;
/// Build clause with number of variables \a NumVars.
///
/// \param Locs Locations needed to build a mappable clause. It includes 1)
/// StartLoc: starting location of the clause (the clause keyword); 2)
/// LParenLoc: location of '('; 3) EndLoc: ending location of the clause.
/// \param Sizes All required sizes to build a mappable clause. It includes 1)
/// NumVars: number of expressions listed in this clause; 2)
/// NumUniqueDeclarations: number of unique base declarations in this clause;
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
explicit OMPIsDevicePtrClause(const OMPVarListLocTy &Locs,
const OMPMappableExprListSizeTy &Sizes)
: OMPMappableExprListClause(OMPC_is_device_ptr, Locs, Sizes) {}
/// Build an empty clause.
///
/// \param Sizes All required sizes to build a mappable clause. It includes 1)
/// NumVars: number of expressions listed in this clause; 2)
/// NumUniqueDeclarations: number of unique base declarations in this clause;
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
explicit OMPIsDevicePtrClause(const OMPMappableExprListSizeTy &Sizes)
: OMPMappableExprListClause(OMPC_is_device_ptr, OMPVarListLocTy(),
Sizes) {}
/// Define the sizes of each trailing object array except the last one. This
/// is required for TrailingObjects to work properly.
size_t numTrailingObjects(OverloadToken<Expr *>) const {
return varlist_size();
}
size_t numTrailingObjects(OverloadToken<ValueDecl *>) const {
return getUniqueDeclarationsNum();
}
size_t numTrailingObjects(OverloadToken<unsigned>) const {
return getUniqueDeclarationsNum() + getTotalComponentListNum();
}
public:
/// Creates clause with a list of variables \a Vars.
///
/// \param C AST context.
/// \param Locs Locations needed to build a mappable clause. It includes 1)
/// StartLoc: starting location of the clause (the clause keyword); 2)
/// LParenLoc: location of '('; 3) EndLoc: ending location of the clause.
/// \param Vars The original expression used in the clause.
/// \param Declarations Declarations used in the clause.
/// \param ComponentLists Component lists used in the clause.
static OMPIsDevicePtrClause *
Create(const ASTContext &C, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> Vars, ArrayRef<ValueDecl *> Declarations,
MappableExprComponentListsRef ComponentLists);
/// Creates an empty clause with the place for \a NumVars variables.
///
/// \param C AST context.
/// \param Sizes All required sizes to build a mappable clause. It includes 1)
/// NumVars: number of expressions listed in this clause; 2)
/// NumUniqueDeclarations: number of unique base declarations in this clause;
/// 3) NumComponentLists: number of component lists in this clause; and 4)
/// NumComponents: total number of expression components in the clause.
static OMPIsDevicePtrClause *
CreateEmpty(const ASTContext &C, const OMPMappableExprListSizeTy &Sizes);
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
reinterpret_cast<Stmt **>(varlist_end()));
}
const_child_range children() const {
auto Children = const_cast<OMPIsDevicePtrClause *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_range used_children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range used_children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
static bool classof(const OMPClause *T) {
return T->getClauseKind() == OMPC_is_device_ptr;
}
};
/// This class implements a simple visitor for OMPClause
/// subclasses.
template<class ImplClass, template <typename> class Ptr, typename RetTy>
class OMPClauseVisitorBase {
public:
#define PTR(CLASS) typename Ptr<CLASS>::type
#define DISPATCH(CLASS) \
return static_cast<ImplClass*>(this)->Visit##CLASS(static_cast<PTR(CLASS)>(S))
#define OPENMP_CLAUSE(Name, Class) \
RetTy Visit ## Class (PTR(Class) S) { DISPATCH(Class); }
#include "clang/Basic/OpenMPKinds.def"
RetTy Visit(PTR(OMPClause) S) {
// Top switch clause: visit each OMPClause.
switch (S->getClauseKind()) {
default: llvm_unreachable("Unknown clause kind!");
#define OPENMP_CLAUSE(Name, Class) \
case OMPC_ ## Name : return Visit ## Class(static_cast<PTR(Class)>(S));
#include "clang/Basic/OpenMPKinds.def"
}
}
// Base case, ignore it. :)
RetTy VisitOMPClause(PTR(OMPClause) Node) { return RetTy(); }
#undef PTR
#undef DISPATCH
};
template <typename T>
using const_ptr = typename std::add_pointer<typename std::add_const<T>::type>;
template<class ImplClass, typename RetTy = void>
class OMPClauseVisitor :
public OMPClauseVisitorBase <ImplClass, std::add_pointer, RetTy> {};
template<class ImplClass, typename RetTy = void>
class ConstOMPClauseVisitor :
public OMPClauseVisitorBase <ImplClass, const_ptr, RetTy> {};
class OMPClausePrinter final : public OMPClauseVisitor<OMPClausePrinter> {
raw_ostream &OS;
const PrintingPolicy &Policy;
/// Process clauses with list of variables.
template <typename T> void VisitOMPClauseList(T *Node, char StartSym);
public:
OMPClausePrinter(raw_ostream &OS, const PrintingPolicy &Policy)
: OS(OS), Policy(Policy) {}
#define OPENMP_CLAUSE(Name, Class) void Visit##Class(Class *S);
#include "clang/Basic/OpenMPKinds.def"
};
} // namespace clang
#endif // LLVM_CLANG_AST_OPENMPCLAUSE_H
|
pyfr_gemm_cm.c | /******************************************************************************
* Copyright (c) Intel Corporation - All rights reserved. *
* This file is part of the LIBXSMM library. *
* *
* For information on the license, see the LICENSE file. *
* Further information: https://github.com/hfp/libxsmm/ *
* SPDX-License-Identifier: BSD-3-Clause *
******************************************************************************/
/* Alexander Heinecke (Intel Corp.)
******************************************************************************/
#include <stdlib.h>
#include <stdio.h>
#include <sys/time.h>
#include <mkl.h>
#include <libxsmm.h>
static double sec(struct timeval start, struct timeval end) {
return ((double)(((end.tv_sec * 1000000 + end.tv_usec) - (start.tv_sec * 1000000 + start.tv_usec)))) / 1.0e6;
}
int main(int argc, char *argv[])
{
int n,m,k;
int lda,ldb,ldc;
double* a;
double* b;
double* c1;
double* c2;
struct timeval l_start, l_end;
double l_total = 0.0;
int reps, i, j;
const int nblock = 16;
double alpha = 1.0, beta = 1.0;
char transa = 'N', transb = 'N';
libxsmm_gemm_prefetch_type l_prefetch_op = LIBXSMM_PREFETCH_NONE;
libxsmm_dmmfunction kernel = NULL;
if (argc != 5) {
fprintf(stderr, "Invalid ./a,out M N K reps\n");
exit(-1);
}
m = atoi(argv[1]);
n = atoi(argv[2]);
k = atoi(argv[3]);
reps = atoi(argv[4]);
/* this is col-major what you want to use for the sizes in question */
lda = m;
ldb = k;
ldc = m;
if (n % nblock != 0) {
fprintf(stderr, "N needs to be divisable by %i\n", nblock);
exit(-1);
}
a = (double*)_mm_malloc(lda*k*sizeof(double), 64);
b = (double*)_mm_malloc(ldb*n*sizeof(double), 64);
c1 = (double*)_mm_malloc(ldc*n*sizeof(double), 64);
c2 = (double*)_mm_malloc(ldc*n*sizeof(double), 64);
#pragma omp parallel for
for (i = 0; i < lda*k; i++) {
a[i] = libxsmm_rng_f64();
}
#pragma omp parallel for
for (i = 0; i < ldb*n; i++) {
b[i] = libxsmm_rng_f64();
}
#pragma omp parallel for
for (i = 0; i < ldc*n; i++) {
c1[i] = 0;
c2[i] = 0;
}
/* JIT Kernel */
kernel = libxsmm_dmmdispatch(m, nblock, k, NULL, NULL, NULL, NULL, NULL, NULL, &l_prefetch_op );
/* init MKL */
dgemm(&transa, &transb, &m, &n, &k, &alpha, a, &lda, b, &ldb, &beta, c1, &ldc);
#pragma omp parallel for
for (i = 0; i < ldc*n; i++) {
c1[i] = 0;
c2[i] = 0;
}
gettimeofday(&l_start, NULL);
for ( j = 0; j < reps; j++ ) {
dgemm(&transa, &transb, &m, &n, &k, &alpha, a, &lda, b, &ldb, &beta, c1, &ldc);
}
gettimeofday(&l_end, NULL);
l_total = sec(l_start, l_end);
fprintf(stdout, "time[s] MKL (CM, M=%i, N=%i, K=%i): %f\n", m, n, k, l_total/(double)reps );
fprintf(stdout, "GFLOPS MKL (CM, M=%i, N=%i, K=%i): %f\n", m, n, k, (2.0 * (double)m * (double)n * (double)k * (double)reps * 1.0e-9) / l_total );
fprintf(stdout, "GB/s MKL (CM, M=%i, N=%i, K=%i): %f\n", m, n, k, ((double)sizeof(double) * (((double)m * (double)n) + ((double)k * (double)n)) * (double)reps * 1.0e-9) / l_total );
gettimeofday(&l_start, NULL);
for ( j = 0; j < reps; j++ ) {
#pragma omp parallel for private(i)
for ( i = 0; i < n; i+=nblock) {
kernel( a, b+(ldb*i), c2+(ldc*i), NULL, NULL, NULL );
}
gettimeofday(&l_end, NULL);
}
l_total = sec(l_start, l_end);
fprintf(stdout, "time[s] libxsmm (CM, M=%i, N=%i, K=%i): %f\n", m, n, k, l_total/(double)reps );
fprintf(stdout, "GFLOPS libxsmm (CM, M=%i, N=%i, K=%i): %f\n", m, n, k, (2.0 * (double)m * (double)n * (double)k * (double)reps * 1.0e-9) / l_total );
fprintf(stdout, "GB/s libxsmm (CM, M=%i, N=%i, K=%i): %f\n", m, n, k, ((double)sizeof(double) * (((double)m * (double)n) + ((double)k * (double)n)) * (double)reps * 1.0e-9) / l_total );
/* test result */
double max_error = 0.0;
for ( i = 0; i < ldc*n; i++) {
if (max_error < fabs(c1[i] - c2[i])) {
max_error = fabs(c1[i] - c2[i]);
}
}
printf("max error: %f\n\n", max_error);
}
|
reduce.h | /******************************************************************************
* ** Copyright (c) 2016, Intel Corporation **
* ** All rights reserved. **
* ** **
* ** Redistribution and use in source and binary forms, with or without **
* ** modification, are permitted provided that the following conditions **
* ** are met: **
* ** 1. Redistributions of source code must retain the above copyright **
* ** notice, this list of conditions and the following disclaimer. **
* ** 2. Redistributions in binary form must reproduce the above copyright **
* ** notice, this list of conditions and the following disclaimer in the **
* ** documentation and/or other materials provided with the distribution. **
* ** 3. Neither the name of the copyright holder nor the names of its **
* ** contributors may be used to endorse or promote products derived **
* ** from this software without specific prior written permission. **
* ** **
* ** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS **
* ** "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT **
* ** LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR **
* ** A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT **
* ** HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, **
* ** SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED **
* ** TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR **
* ** PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF **
* ** LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING **
* ** NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS **
* ** SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* * ******************************************************************************/
/* Michael Anderson (Intel Corp.)
* * ******************************************************************************/
#ifndef SRC_SINGLENODE_REDUCE_H_
#define SRC_SINGLENODE_REDUCE_H_
template <typename T>
void reduce_dense_segment(T* value, int * bitvector, int nnz, T* result, bool* res_set,
void (*op_fp)(const T&, const T&, T*, void*), void* vsp) {
for(int i = 0 ; i < nnz ; i++)
{
if(get_bitvector(i, bitvector))
{
//T temp_result = *result;
op_fp(*result, value[i], result, vsp);
}
}
}
template <typename VT, typename T>
void mapreduce_dense_segment(VT* value, int * bitvector, int nnz, T* result, bool* res_set,
void (*op_map)(VT*, T*, void*), void (*op_fp)(const T&, const T&, T*, void*), void* vsp) {
int nthreads = omp_get_max_threads();
T * local_reduced = new T[nthreads*16];
bool * firstSet = new bool[nthreads*16];
#pragma omp parallel for
for(int p = 0 ; p < nthreads ; p++)
{
firstSet[p*16] = false;
int nnz_per_thread = (nnz + nthreads - 1) / nthreads;
int start = nnz_per_thread * p;
int end = nnz_per_thread * (p+1);
if(start > nnz) start = nnz;
if(end > nnz) end = nnz;
for(int i = start ; i < end ; i++)
{
if(get_bitvector(i, bitvector))
{
T temp_result2;
op_map(value + i, &temp_result2, vsp);
if(firstSet[p*16])
{
T temp_result = local_reduced[p*16];
op_fp(temp_result, temp_result2, local_reduced + p*16, vsp);
}
else
{
local_reduced[p*16] = temp_result2;
firstSet[p*16] = true;
}
}
}
}
// Reduce each thread's local result
for(int p = 0 ; p < nthreads ; p++)
{
if(firstSet[p*16])
{
//T temp_result = *result;
op_fp(*result, local_reduced[p*16], result, vsp);
}
}
delete [] local_reduced;
delete [] firstSet;
}
template <typename T>
void reduce_segment(const DenseSegment<T> * segment, T* res, bool* res_set,
void (*op_fp)(const T&, const T&, T*, void*), void* vsp) {
reduce_dense_segment(segment->properties->value, segment->properties->bit_vector, segment->capacity, res, res_set, op_fp, vsp);
}
template <typename VT, typename T>
void mapreduce_segment(DenseSegment<VT> * segment, T* res, bool* res_set,
void (*op_map)(VT*, T*, void*), void (*op_fp)(const T&, const T&, T*, void*), void* vsp) {
segment->alloc();
segment->initialize();
mapreduce_dense_segment(segment->properties->value, segment->properties->bit_vector, segment->capacity, res, res_set, op_map, op_fp, vsp);
}
#endif // SRC_SINGLENODE_REDUCE_H_
|
GB_unaryop__ainv_fp32_int64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_fp32_int64
// op(A') function: GB_tran__ainv_fp32_int64
// C type: float
// A type: int64_t
// cast: float cij = (float) aij
// unaryop: cij = -aij
#define GB_ATYPE \
int64_t
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, x) \
float z = (float) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_FP32 || GxB_NO_INT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_fp32_int64
(
float *restrict Cx,
const int64_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_fp32_int64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
host_targ.c | #include <stdio.h>
#include <omp.h>
int arr[100];
int nt =12;
int iid, gid, gdd, gdn;
int main()
{
fprintf(stderr, "Omp host get_num_devices %d\n", omp_get_num_devices());
#pragma omp target map(tofrom: iid, gid, gdd, gdn)
{ iid = omp_is_initial_device();
gid = omp_get_initial_device();
gdd = omp_get_default_device();
gdn = omp_get_device_num();
}
fprintf(stderr, "Omp target omp_is_initial_device %d\n", iid);
fprintf(stderr, "Omp target omp_get_initial_device %d\n", gid);
fprintf(stderr, "Omp target omp_get_default_device %d\n", gdd);
fprintf(stderr, "Omp target omp_get_device_num %d\n", gdn);
#pragma omp target teams distribute parallel for num_threads(nt)
for (int i=0; i<100;i++)
arr[i] =i;
//Verify
int errors = 0;
for (int i=0; i<100;i++){
if(arr[i] != i)
errors++;
}
if(!errors){
fprintf(stderr, "Success\n");
return 0;
} else{
fprintf(stderr, "Failed\nErrors: %d\n", errors);
return 1;
}
}
|
GB_unop__ceil_fp64_fp64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__ceil_fp64_fp64
// op(A') function: GB_unop_tran__ceil_fp64_fp64
// C type: double
// A type: double
// cast: double cij = aij
// unaryop: cij = ceil (aij)
#define GB_ATYPE \
double
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = ceil (x) ;
// casting
#define GB_CAST(z, aij) \
double z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
double aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
double z = aij ; \
Cx [pC] = ceil (z) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_CEIL || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__ceil_fp64_fp64
(
double *Cx, // Cx and Ax may be aliased
const double *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (double), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
double z = aij ;
Cx [p] = ceil (z) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
double aij = Ax [p] ;
double z = aij ;
Cx [p] = ceil (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__ceil_fp64_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unop__cos_fc32_fc32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__cos_fc32_fc32)
// op(A') function: GB (_unop_tran__cos_fc32_fc32)
// C type: GxB_FC32_t
// A type: GxB_FC32_t
// cast: GxB_FC32_t cij = aij
// unaryop: cij = ccosf (aij)
#define GB_ATYPE \
GxB_FC32_t
#define GB_CTYPE \
GxB_FC32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = ccosf (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC32_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC32_t z = aij ; \
Cx [pC] = ccosf (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_COS || GxB_NO_FC32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__cos_fc32_fc32)
(
GxB_FC32_t *Cx, // Cx and Ax may be aliased
const GxB_FC32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = ccosf (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = ccosf (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__cos_fc32_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__iseq_int32.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__iseq_int32)
// A.*B function (eWiseMult): GB (_AemultB_08__iseq_int32)
// A.*B function (eWiseMult): GB (_AemultB_02__iseq_int32)
// A.*B function (eWiseMult): GB (_AemultB_04__iseq_int32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__iseq_int32)
// A*D function (colscale): GB (_AxD__iseq_int32)
// D*A function (rowscale): GB (_DxB__iseq_int32)
// C+=B function (dense accum): GB (_Cdense_accumB__iseq_int32)
// C+=b function (dense accum): GB (_Cdense_accumb__iseq_int32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__iseq_int32)
// C=scalar+B GB (_bind1st__iseq_int32)
// C=scalar+B' GB (_bind1st_tran__iseq_int32)
// C=A+scalar GB (_bind2nd__iseq_int32)
// C=A'+scalar GB (_bind2nd_tran__iseq_int32)
// C type: int32_t
// A type: int32_t
// A pattern? 0
// B type: int32_t
// B pattern? 0
// BinaryOp: cij = (aij == bij)
#define GB_ATYPE \
int32_t
#define GB_BTYPE \
int32_t
#define GB_CTYPE \
int32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int32_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int32_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x == y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISEQ || GxB_NO_INT32 || GxB_NO_ISEQ_INT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__iseq_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__iseq_int32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__iseq_int32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int32_t
int32_t bwork = (*((int32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__iseq_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *restrict Cx = (int32_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__iseq_int32)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *restrict Cx = (int32_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__iseq_int32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int32_t alpha_scalar ;
int32_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int32_t *) alpha_scalar_in)) ;
beta_scalar = (*((int32_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__iseq_int32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__iseq_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__iseq_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__iseq_int32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__iseq_int32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *Cx = (int32_t *) Cx_output ;
int32_t x = (*((int32_t *) x_input)) ;
int32_t *Bx = (int32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int32_t bij = GBX (Bx, p, false) ;
Cx [p] = (x == bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__iseq_int32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int32_t *Cx = (int32_t *) Cx_output ;
int32_t *Ax = (int32_t *) Ax_input ;
int32_t y = (*((int32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int32_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij == y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x == aij) ; \
}
GrB_Info GB (_bind1st_tran__iseq_int32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t x = (*((const int32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij == y) ; \
}
GrB_Info GB (_bind2nd_tran__iseq_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t y = (*((const int32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
main.c | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include <time.h>
#include "omp.h"
#include "functions.h"
int main (int argc, char **argv) {
int Nthreads = 20;
omp_set_num_threads(Nthreads);
//seed value for the randomizer
double seed = clock(); //this will make your program run differently everytime
//double seed = 0; //uncomment this and your program will behave the same everytime it's run
srand(seed);
//declare storage for an ElGamal cryptosytem
unsigned int p, g, h, x;
//begin with rank 0 getting user's input
unsigned int n;
printf("Enter a number of bits: "); fflush(stdout);
char status = scanf("%u",&n);
//make sure the input makes sense
if ((n<9)||(n>31)) {//Updated bounds. 8 is no good (need to encode chars)
printf("Unsupported bit size.\n");
return 0;
}
printf("\n");
//setup an ElGamal cryptosystem
setupElGamal(n,&p,&g,&h,&x);
int bufferSize = 1024;
unsigned char *message = (unsigned char *) malloc(bufferSize*sizeof(unsigned char));
//populate the string with a message
strcpy(message, "Hello, this is the message as a string.");
printf("Message = \"%s\"\n", message);
/* Q1.1 Finish this line */
unsigned int charsPerInt = (n - 1) / 8;
padString(message, charsPerInt);
printf("Padded Message = \"%s\"\n", message);
unsigned int Nchars = strlen(message);
unsigned int Nints = strlen(message)/charsPerInt;
//storage for message as elements of Z_p
unsigned int *Zmessage =
(unsigned int *) malloc(Nints*sizeof(unsigned int));
//storage for extra encryption coefficient
unsigned int *a =
(unsigned int *) malloc(Nints*sizeof(unsigned int));
// cast the string into an unsigned int array
convertStringToZ(message, Nchars, Zmessage, Nints);
//Encrypt the Zmessage with the ElGamal cyrptographic system
ElGamalEncrypt(Zmessage,a,Nints,p,g,h);
printf("The encrypted text is: ");
for (unsigned int i=0;i<Nints;i++) {
printf("(%u,%u) ", Zmessage[i], a[i]);
}
printf("]\n");
//Decrypt the Zmessage with the ElGamal cyrptographic system
ElGamalDecrypt(Zmessage,a,Nints,p,x);
convertZToString(Zmessage, Nints, message, Nchars);
printf("Decrypted Message = \"%s\"\n", message);
printf("\n");
//Suppose we don't know the secret key. Use OpenMP threads to try and find it in parallel
printf("Using %d OpenMP threads to find the secret key...\n", Nthreads);
/* Q2.3 Parallelize this loop with OpenMP */
double startTime = omp_get_wtime();
int chow = 0;
#pragma omp shared(chow)
{
#pragma omp for
for (unsigned int i=0;i<p-1;i++) {
if (chow == 0) {
if (modExp(g,i+1,p)==h) {
printf("Secret key found! x = %u \n", i+1);
chow = 1;
}
}
}
double endTime = omp_get_wtime();
double totalTime = endTime-startTime;
double work = (double) p;
double throughput = work/totalTime;
printf("Searching all keys took %g seconds, throughput was %g values tested per second.\n", totalTime, throughput);
return 0;
}
}
|
dropout_op.h | /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <cstring>
#include <random>
#include <string>
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h"
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
template <typename T, int MajorType = Eigen::RowMajor,
typename IndexType = Eigen::DenseIndex>
using EigenMatrix = framework::EigenMatrix<T, MajorType, IndexType>;
template <typename DeviceContext, typename T>
class CPUDropoutKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto* x = context.Input<Tensor>("X");
auto* y = context.Output<Tensor>("Out");
const auto* x_data = x->data<T>();
auto* y_data = y->mutable_data<T>(context.GetPlace());
float dropout_prob = context.Attr<float>("dropout_prob");
auto& dropout_implementation =
context.Attr<std::string>("dropout_implementation");
bool upscale_in_train = (dropout_implementation == "upscale_in_train");
if (!context.Attr<bool>("is_test")) {
auto* mask = context.Output<Tensor>("Mask");
auto* mask_data = mask->mutable_data<uint8_t>(context.GetPlace());
size_t size = framework::product(mask->dims());
// Special case when dropout_prob is 1.0
if (dropout_prob == 1.0f) {
std::memset(y_data, 0, size * sizeof(*y_data)); // NOLINT
std::memset(mask_data, 0, size * sizeof(*mask_data)); // NOLINT
return;
}
// NOTE: fixed seed should only be used in unittest or for debug.
// Guarantee to use random seed in training.
std::random_device rnd;
std::minstd_rand engine;
int seed =
context.Attr<bool>("fix_seed") ? context.Attr<int>("seed") : rnd();
engine.seed(seed);
std::uniform_real_distribution<float> dist(0, 1);
for (size_t i = 0; i < size; ++i) {
if (dist(engine) < dropout_prob) {
mask_data[i] = 0;
y_data[i] = 0;
} else {
mask_data[i] = 1;
if (upscale_in_train) {
y_data[i] = x_data[i] / static_cast<T>(1.0f - dropout_prob);
} else {
y_data[i] = x_data[i];
}
}
}
} else {
if (upscale_in_train) {
const auto* X_data = x->data<T>();
auto* Y_data = y->mutable_data<T>(context.GetPlace());
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for
#endif
for (int i = 0; i < x->numel(); i++) {
Y_data[i] = X_data[i];
}
} else {
auto X = EigenMatrix<T>::Reshape(*x, 1);
auto Y = EigenMatrix<T>::Reshape(*y, 1);
auto& place =
*context.template device_context<DeviceContext>().eigen_device();
Y.device(place) = X * static_cast<T>(1.0f - dropout_prob);
}
}
}
};
template <typename DeviceContext, typename T>
class DropoutGradKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
PADDLE_ENFORCE(!context.Attr<bool>("is_test"),
"GradOp is only callable when is_test is false");
auto* grad_x = context.Output<Tensor>(framework::GradVarName("X"));
auto* grad_y = context.Input<Tensor>(framework::GradVarName("Out"));
auto* mask = context.Input<Tensor>("Mask");
grad_x->mutable_data<T>(context.GetPlace());
auto M = EigenMatrix<uint8_t>::Reshape(*mask, 1);
auto dX = EigenMatrix<T>::Reshape(*grad_x, 1);
auto dY = EigenMatrix<T>::Reshape(*grad_y, 1);
auto& place =
*context.template device_context<DeviceContext>().eigen_device();
auto& dropout_implementation =
context.Attr<std::string>("dropout_implementation");
if (dropout_implementation == "upscale_in_train") {
float dropout_prob = context.Attr<float>("dropout_prob");
if (dropout_prob == 1.0f) {
dX.device(place) = static_cast<T>(0) * dY;
} else {
dX.device(place) =
dY * M.cast<T>() / static_cast<T>(1.0f - dropout_prob);
}
} else {
dX.device(place) = dY * M.cast<T>();
}
}
};
} // namespace operators
} // namespace paddle
|
c-parser.c | /* Parser for C and Objective-C.
Copyright (C) 1987-2020 Free Software Foundation, Inc.
Parser actions based on the old Bison parser; structure somewhat
influenced by and fragments based on the C++ parser.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
/* TODO:
Make sure all relevant comments, and all relevant code from all
actions, brought over from old parser. Verify exact correspondence
of syntax accepted.
Add testcases covering every input symbol in every state in old and
new parsers.
Include full syntax for GNU C, including erroneous cases accepted
with error messages, in syntax productions in comments.
Make more diagnostics in the front end generally take an explicit
location rather than implicitly using input_location. */
#include "config.h"
#define INCLUDE_UNIQUE_PTR
#include "system.h"
#include "coretypes.h"
#include "target.h"
#include "function.h"
#include "c-tree.h"
#include "timevar.h"
#include "stringpool.h"
#include "cgraph.h"
#include "attribs.h"
#include "stor-layout.h"
#include "varasm.h"
#include "trans-mem.h"
#include "c-family/c-pragma.h"
#include "c-lang.h"
#include "c-family/c-objc.h"
#include "plugin.h"
#include "omp-general.h"
#include "omp-offload.h"
#include "builtins.h"
#include "gomp-constants.h"
#include "c-family/c-indentation.h"
#include "gimple-expr.h"
#include "context.h"
#include "gcc-rich-location.h"
#include "c-parser.h"
#include "gimple-parser.h"
#include "read-rtl-function.h"
#include "run-rtl-passes.h"
#include "intl.h"
#include "c-family/name-hint.h"
#include "tree-iterator.h"
#include "tree-pretty-print.h"
#include "memmodel.h"
#include "c-family/known-headers.h"
/* We need to walk over decls with incomplete struct/union/enum types
after parsing the whole translation unit.
In finish_decl(), if the decl is static, has incomplete
struct/union/enum type, it is appended to incomplete_record_decls.
In c_parser_translation_unit(), we iterate over incomplete_record_decls
and report error if any of the decls are still incomplete. */
vec<tree> incomplete_record_decls;
void
set_c_expr_source_range (c_expr *expr,
location_t start, location_t finish)
{
expr->src_range.m_start = start;
expr->src_range.m_finish = finish;
if (expr->value)
set_source_range (expr->value, start, finish);
}
void
set_c_expr_source_range (c_expr *expr,
source_range src_range)
{
expr->src_range = src_range;
if (expr->value)
set_source_range (expr->value, src_range);
}
/* Initialization routine for this file. */
void
c_parse_init (void)
{
/* The only initialization required is of the reserved word
identifiers. */
unsigned int i;
tree id;
int mask = 0;
/* Make sure RID_MAX hasn't grown past the 8 bits used to hold the keyword in
the c_token structure. */
gcc_assert (RID_MAX <= 255);
mask |= D_CXXONLY;
if (!flag_isoc99)
mask |= D_C99;
if (flag_no_asm)
{
mask |= D_ASM | D_EXT;
if (!flag_isoc99)
mask |= D_EXT89;
}
if (!c_dialect_objc ())
mask |= D_OBJC | D_CXX_OBJC;
ridpointers = ggc_cleared_vec_alloc<tree> ((int) RID_MAX);
for (i = 0; i < num_c_common_reswords; i++)
{
/* If a keyword is disabled, do not enter it into the table
and so create a canonical spelling that isn't a keyword. */
if (c_common_reswords[i].disable & mask)
{
if (warn_cxx_compat
&& (c_common_reswords[i].disable & D_CXXWARN))
{
id = get_identifier (c_common_reswords[i].word);
C_SET_RID_CODE (id, RID_CXX_COMPAT_WARN);
C_IS_RESERVED_WORD (id) = 1;
}
continue;
}
id = get_identifier (c_common_reswords[i].word);
C_SET_RID_CODE (id, c_common_reswords[i].rid);
C_IS_RESERVED_WORD (id) = 1;
ridpointers [(int) c_common_reswords[i].rid] = id;
}
for (i = 0; i < NUM_INT_N_ENTS; i++)
{
/* We always create the symbols but they aren't always supported. */
char name[50];
sprintf (name, "__int%d", int_n_data[i].bitsize);
id = get_identifier (name);
C_SET_RID_CODE (id, RID_FIRST_INT_N + i);
C_IS_RESERVED_WORD (id) = 1;
sprintf (name, "__int%d__", int_n_data[i].bitsize);
id = get_identifier (name);
C_SET_RID_CODE (id, RID_FIRST_INT_N + i);
C_IS_RESERVED_WORD (id) = 1;
}
}
/* A parser structure recording information about the state and
context of parsing. Includes lexer information with up to two
tokens of look-ahead; more are not needed for C. */
struct GTY(()) c_parser {
/* The look-ahead tokens. */
c_token * GTY((skip)) tokens;
/* Buffer for look-ahead tokens. */
c_token tokens_buf[4];
/* How many look-ahead tokens are available (0 - 4, or
more if parsing from pre-lexed tokens). */
unsigned int tokens_avail;
/* Raw look-ahead tokens, used only for checking in Objective-C
whether '[[' starts attributes. */
vec<c_token, va_gc> *raw_tokens;
/* The number of raw look-ahead tokens that have since been fully
lexed. */
unsigned int raw_tokens_used;
/* True if a syntax error is being recovered from; false otherwise.
c_parser_error sets this flag. It should clear this flag when
enough tokens have been consumed to recover from the error. */
BOOL_BITFIELD error : 1;
/* True if we're processing a pragma, and shouldn't automatically
consume CPP_PRAGMA_EOL. */
BOOL_BITFIELD in_pragma : 1;
/* True if we're parsing the outermost block of an if statement. */
BOOL_BITFIELD in_if_block : 1;
/* True if we want to lex a translated, joined string (for an
initial #pragma pch_preprocess). Otherwise the parser is
responsible for concatenating strings and translating to the
execution character set as needed. */
BOOL_BITFIELD lex_joined_string : 1;
/* True if, when the parser is concatenating string literals, it
should translate them to the execution character set (false
inside attributes). */
BOOL_BITFIELD translate_strings_p : 1;
/* Objective-C specific parser/lexer information. */
/* True if we are in a context where the Objective-C "PQ" keywords
are considered keywords. */
BOOL_BITFIELD objc_pq_context : 1;
/* True if we are parsing a (potential) Objective-C foreach
statement. This is set to true after we parsed 'for (' and while
we wait for 'in' or ';' to decide if it's a standard C for loop or an
Objective-C foreach loop. */
BOOL_BITFIELD objc_could_be_foreach_context : 1;
/* The following flag is needed to contextualize Objective-C lexical
analysis. In some cases (e.g., 'int NSObject;'), it is
undesirable to bind an identifier to an Objective-C class, even
if a class with that name exists. */
BOOL_BITFIELD objc_need_raw_identifier : 1;
/* Nonzero if we're processing a __transaction statement. The value
is 1 | TM_STMT_ATTR_*. */
unsigned int in_transaction : 4;
/* True if we are in a context where the Objective-C "Property attribute"
keywords are valid. */
BOOL_BITFIELD objc_property_attr_context : 1;
/* Whether we have just seen/constructed a string-literal. Set when
returning a string-literal from c_parser_string_literal. Reset
in consume_token. Useful when we get a parse error and see an
unknown token, which could have been a string-literal constant
macro. */
BOOL_BITFIELD seen_string_literal : 1;
/* Location of the last consumed token. */
location_t last_token_location;
};
/* Return a pointer to the Nth token in PARSERs tokens_buf. */
c_token *
c_parser_tokens_buf (c_parser *parser, unsigned n)
{
return &parser->tokens_buf[n];
}
/* Return the error state of PARSER. */
bool
c_parser_error (c_parser *parser)
{
return parser->error;
}
/* Set the error state of PARSER to ERR. */
void
c_parser_set_error (c_parser *parser, bool err)
{
parser->error = err;
}
/* The actual parser and external interface. ??? Does this need to be
garbage-collected? */
static GTY (()) c_parser *the_parser;
/* Read in and lex a single token, storing it in *TOKEN. If RAW,
context-sensitive postprocessing of the token is not done. */
static void
c_lex_one_token (c_parser *parser, c_token *token, bool raw = false)
{
timevar_push (TV_LEX);
if (raw || vec_safe_length (parser->raw_tokens) == 0)
{
token->type = c_lex_with_flags (&token->value, &token->location,
&token->flags,
(parser->lex_joined_string
? 0 : C_LEX_STRING_NO_JOIN));
token->id_kind = C_ID_NONE;
token->keyword = RID_MAX;
token->pragma_kind = PRAGMA_NONE;
}
else
{
/* Use a token previously lexed as a raw look-ahead token, and
complete the processing on it. */
*token = (*parser->raw_tokens)[parser->raw_tokens_used];
++parser->raw_tokens_used;
if (parser->raw_tokens_used == vec_safe_length (parser->raw_tokens))
{
vec_free (parser->raw_tokens);
parser->raw_tokens_used = 0;
}
}
if (raw)
goto out;
switch (token->type)
{
case CPP_NAME:
{
tree decl;
bool objc_force_identifier = parser->objc_need_raw_identifier;
if (c_dialect_objc ())
parser->objc_need_raw_identifier = false;
if (C_IS_RESERVED_WORD (token->value))
{
enum rid rid_code = C_RID_CODE (token->value);
if (rid_code == RID_CXX_COMPAT_WARN)
{
warning_at (token->location,
OPT_Wc___compat,
"identifier %qE conflicts with C++ keyword",
token->value);
}
else if (rid_code >= RID_FIRST_ADDR_SPACE
&& rid_code <= RID_LAST_ADDR_SPACE)
{
addr_space_t as;
as = (addr_space_t) (rid_code - RID_FIRST_ADDR_SPACE);
targetm.addr_space.diagnose_usage (as, token->location);
token->id_kind = C_ID_ADDRSPACE;
token->keyword = rid_code;
break;
}
else if (c_dialect_objc () && OBJC_IS_PQ_KEYWORD (rid_code))
{
/* We found an Objective-C "pq" keyword (in, out,
inout, bycopy, byref, oneway). They need special
care because the interpretation depends on the
context. */
if (parser->objc_pq_context)
{
token->type = CPP_KEYWORD;
token->keyword = rid_code;
break;
}
else if (parser->objc_could_be_foreach_context
&& rid_code == RID_IN)
{
/* We are in Objective-C, inside a (potential)
foreach context (which means after having
parsed 'for (', but before having parsed ';'),
and we found 'in'. We consider it the keyword
which terminates the declaration at the
beginning of a foreach-statement. Note that
this means you can't use 'in' for anything else
in that context; in particular, in Objective-C
you can't use 'in' as the name of the running
variable in a C for loop. We could potentially
try to add code here to disambiguate, but it
seems a reasonable limitation. */
token->type = CPP_KEYWORD;
token->keyword = rid_code;
break;
}
/* Else, "pq" keywords outside of the "pq" context are
not keywords, and we fall through to the code for
normal tokens. */
}
else if (c_dialect_objc () && OBJC_IS_PATTR_KEYWORD (rid_code))
{
/* We found an Objective-C "property attribute"
keyword (getter, setter, readonly, etc). These are
only valid in the property context. */
if (parser->objc_property_attr_context)
{
token->type = CPP_KEYWORD;
token->keyword = rid_code;
break;
}
/* Else they are not special keywords.
*/
}
else if (c_dialect_objc ()
&& (OBJC_IS_AT_KEYWORD (rid_code)
|| OBJC_IS_CXX_KEYWORD (rid_code)))
{
/* We found one of the Objective-C "@" keywords (defs,
selector, synchronized, etc) or one of the
Objective-C "cxx" keywords (class, private,
protected, public, try, catch, throw) without a
preceding '@' sign. Do nothing and fall through to
the code for normal tokens (in C++ we would still
consider the CXX ones keywords, but not in C). */
;
}
else
{
token->type = CPP_KEYWORD;
token->keyword = rid_code;
break;
}
}
decl = lookup_name (token->value);
if (decl)
{
if (TREE_CODE (decl) == TYPE_DECL)
{
token->id_kind = C_ID_TYPENAME;
break;
}
}
else if (c_dialect_objc ())
{
tree objc_interface_decl = objc_is_class_name (token->value);
/* Objective-C class names are in the same namespace as
variables and typedefs, and hence are shadowed by local
declarations. */
if (objc_interface_decl
&& (!objc_force_identifier || global_bindings_p ()))
{
token->value = objc_interface_decl;
token->id_kind = C_ID_CLASSNAME;
break;
}
}
token->id_kind = C_ID_ID;
}
break;
case CPP_AT_NAME:
/* This only happens in Objective-C; it must be a keyword. */
token->type = CPP_KEYWORD;
switch (C_RID_CODE (token->value))
{
/* Replace 'class' with '@class', 'private' with '@private',
etc. This prevents confusion with the C++ keyword
'class', and makes the tokens consistent with other
Objective-C 'AT' keywords. For example '@class' is
reported as RID_AT_CLASS which is consistent with
'@synchronized', which is reported as
RID_AT_SYNCHRONIZED.
*/
case RID_CLASS: token->keyword = RID_AT_CLASS; break;
case RID_PRIVATE: token->keyword = RID_AT_PRIVATE; break;
case RID_PROTECTED: token->keyword = RID_AT_PROTECTED; break;
case RID_PUBLIC: token->keyword = RID_AT_PUBLIC; break;
case RID_THROW: token->keyword = RID_AT_THROW; break;
case RID_TRY: token->keyword = RID_AT_TRY; break;
case RID_CATCH: token->keyword = RID_AT_CATCH; break;
case RID_SYNCHRONIZED: token->keyword = RID_AT_SYNCHRONIZED; break;
default: token->keyword = C_RID_CODE (token->value);
}
break;
case CPP_COLON:
case CPP_COMMA:
case CPP_CLOSE_PAREN:
case CPP_SEMICOLON:
/* These tokens may affect the interpretation of any identifiers
following, if doing Objective-C. */
if (c_dialect_objc ())
parser->objc_need_raw_identifier = false;
break;
case CPP_PRAGMA:
/* We smuggled the cpp_token->u.pragma value in an INTEGER_CST. */
token->pragma_kind = (enum pragma_kind) TREE_INT_CST_LOW (token->value);
token->value = NULL;
break;
default:
break;
}
out:
timevar_pop (TV_LEX);
}
/* Return a pointer to the next token from PARSER, reading it in if
necessary. */
c_token *
c_parser_peek_token (c_parser *parser)
{
if (parser->tokens_avail == 0)
{
c_lex_one_token (parser, &parser->tokens[0]);
parser->tokens_avail = 1;
}
return &parser->tokens[0];
}
/* Return a pointer to the next-but-one token from PARSER, reading it
in if necessary. The next token is already read in. */
c_token *
c_parser_peek_2nd_token (c_parser *parser)
{
if (parser->tokens_avail >= 2)
return &parser->tokens[1];
gcc_assert (parser->tokens_avail == 1);
gcc_assert (parser->tokens[0].type != CPP_EOF);
gcc_assert (parser->tokens[0].type != CPP_PRAGMA_EOL);
c_lex_one_token (parser, &parser->tokens[1]);
parser->tokens_avail = 2;
return &parser->tokens[1];
}
/* Return a pointer to the Nth token from PARSER, reading it
in if necessary. The N-1th token is already read in. */
c_token *
c_parser_peek_nth_token (c_parser *parser, unsigned int n)
{
/* N is 1-based, not zero-based. */
gcc_assert (n > 0);
if (parser->tokens_avail >= n)
return &parser->tokens[n - 1];
gcc_assert (parser->tokens_avail == n - 1);
c_lex_one_token (parser, &parser->tokens[n - 1]);
parser->tokens_avail = n;
return &parser->tokens[n - 1];
}
/* Return a pointer to the Nth token from PARSER, reading it in as a
raw look-ahead token if necessary. The N-1th token is already read
in. Raw look-ahead tokens remain available for when the non-raw
functions above are called. */
c_token *
c_parser_peek_nth_token_raw (c_parser *parser, unsigned int n)
{
/* N is 1-based, not zero-based. */
gcc_assert (n > 0);
if (parser->tokens_avail >= n)
return &parser->tokens[n - 1];
unsigned int raw_len = vec_safe_length (parser->raw_tokens);
unsigned int raw_avail
= parser->tokens_avail + raw_len - parser->raw_tokens_used;
gcc_assert (raw_avail >= n - 1);
if (raw_avail >= n)
return &(*parser->raw_tokens)[parser->raw_tokens_used
+ n - 1 - parser->tokens_avail];
vec_safe_reserve (parser->raw_tokens, 1);
parser->raw_tokens->quick_grow (raw_len + 1);
c_lex_one_token (parser, &(*parser->raw_tokens)[raw_len], true);
return &(*parser->raw_tokens)[raw_len];
}
bool
c_keyword_starts_typename (enum rid keyword)
{
switch (keyword)
{
case RID_UNSIGNED:
case RID_LONG:
case RID_SHORT:
case RID_SIGNED:
case RID_COMPLEX:
case RID_INT:
case RID_CHAR:
case RID_FLOAT:
case RID_DOUBLE:
case RID_VOID:
case RID_DFLOAT32:
case RID_DFLOAT64:
case RID_DFLOAT128:
CASE_RID_FLOATN_NX:
case RID_BOOL:
case RID_ENUM:
case RID_STRUCT:
case RID_UNION:
case RID_TYPEOF:
case RID_CONST:
case RID_ATOMIC:
case RID_VOLATILE:
case RID_RESTRICT:
case RID_ATTRIBUTE:
case RID_FRACT:
case RID_ACCUM:
case RID_SAT:
case RID_AUTO_TYPE:
case RID_ALIGNAS:
return true;
default:
if (keyword >= RID_FIRST_INT_N
&& keyword < RID_FIRST_INT_N + NUM_INT_N_ENTS
&& int_n_enabled_p[keyword - RID_FIRST_INT_N])
return true;
return false;
}
}
/* Return true if TOKEN can start a type name,
false otherwise. */
bool
c_token_starts_typename (c_token *token)
{
switch (token->type)
{
case CPP_NAME:
switch (token->id_kind)
{
case C_ID_ID:
return false;
case C_ID_ADDRSPACE:
return true;
case C_ID_TYPENAME:
return true;
case C_ID_CLASSNAME:
gcc_assert (c_dialect_objc ());
return true;
default:
gcc_unreachable ();
}
case CPP_KEYWORD:
return c_keyword_starts_typename (token->keyword);
case CPP_LESS:
if (c_dialect_objc ())
return true;
return false;
default:
return false;
}
}
/* Return true if the next token from PARSER can start a type name,
false otherwise. LA specifies how to do lookahead in order to
detect unknown type names. If unsure, pick CLA_PREFER_ID. */
static inline bool
c_parser_next_tokens_start_typename (c_parser *parser, enum c_lookahead_kind la)
{
c_token *token = c_parser_peek_token (parser);
if (c_token_starts_typename (token))
return true;
/* Try a bit harder to detect an unknown typename. */
if (la != cla_prefer_id
&& token->type == CPP_NAME
&& token->id_kind == C_ID_ID
/* Do not try too hard when we could have "object in array". */
&& !parser->objc_could_be_foreach_context
&& (la == cla_prefer_type
|| c_parser_peek_2nd_token (parser)->type == CPP_NAME
|| c_parser_peek_2nd_token (parser)->type == CPP_MULT)
/* Only unknown identifiers. */
&& !lookup_name (token->value))
return true;
return false;
}
/* Return true if TOKEN is a type qualifier, false otherwise. */
static bool
c_token_is_qualifier (c_token *token)
{
switch (token->type)
{
case CPP_NAME:
switch (token->id_kind)
{
case C_ID_ADDRSPACE:
return true;
default:
return false;
}
case CPP_KEYWORD:
switch (token->keyword)
{
case RID_CONST:
case RID_VOLATILE:
case RID_RESTRICT:
case RID_ATTRIBUTE:
case RID_ATOMIC:
return true;
default:
return false;
}
case CPP_LESS:
return false;
default:
gcc_unreachable ();
}
}
/* Return true if the next token from PARSER is a type qualifier,
false otherwise. */
static inline bool
c_parser_next_token_is_qualifier (c_parser *parser)
{
c_token *token = c_parser_peek_token (parser);
return c_token_is_qualifier (token);
}
/* Return true if TOKEN can start declaration specifiers (not
including standard attributes), false otherwise. */
static bool
c_token_starts_declspecs (c_token *token)
{
switch (token->type)
{
case CPP_NAME:
switch (token->id_kind)
{
case C_ID_ID:
return false;
case C_ID_ADDRSPACE:
return true;
case C_ID_TYPENAME:
return true;
case C_ID_CLASSNAME:
gcc_assert (c_dialect_objc ());
return true;
default:
gcc_unreachable ();
}
case CPP_KEYWORD:
switch (token->keyword)
{
case RID_STATIC:
case RID_EXTERN:
case RID_REGISTER:
case RID_TYPEDEF:
case RID_INLINE:
case RID_NORETURN:
case RID_AUTO:
case RID_THREAD:
case RID_UNSIGNED:
case RID_LONG:
case RID_SHORT:
case RID_SIGNED:
case RID_COMPLEX:
case RID_INT:
case RID_CHAR:
case RID_FLOAT:
case RID_DOUBLE:
case RID_VOID:
case RID_DFLOAT32:
case RID_DFLOAT64:
case RID_DFLOAT128:
CASE_RID_FLOATN_NX:
case RID_BOOL:
case RID_ENUM:
case RID_STRUCT:
case RID_UNION:
case RID_TYPEOF:
case RID_CONST:
case RID_VOLATILE:
case RID_RESTRICT:
case RID_ATTRIBUTE:
case RID_FRACT:
case RID_ACCUM:
case RID_SAT:
case RID_ALIGNAS:
case RID_ATOMIC:
case RID_AUTO_TYPE:
return true;
default:
if (token->keyword >= RID_FIRST_INT_N
&& token->keyword < RID_FIRST_INT_N + NUM_INT_N_ENTS
&& int_n_enabled_p[token->keyword - RID_FIRST_INT_N])
return true;
return false;
}
case CPP_LESS:
if (c_dialect_objc ())
return true;
return false;
default:
return false;
}
}
/* Return true if TOKEN can start declaration specifiers (not
including standard attributes) or a static assertion, false
otherwise. */
static bool
c_token_starts_declaration (c_token *token)
{
if (c_token_starts_declspecs (token)
|| token->keyword == RID_STATIC_ASSERT)
return true;
else
return false;
}
/* Return true if the next token from PARSER can start declaration
specifiers (not including standard attributes), false
otherwise. */
bool
c_parser_next_token_starts_declspecs (c_parser *parser)
{
c_token *token = c_parser_peek_token (parser);
/* In Objective-C, a classname normally starts a declspecs unless it
is immediately followed by a dot. In that case, it is the
Objective-C 2.0 "dot-syntax" for class objects, ie, calls the
setter/getter on the class. c_token_starts_declspecs() can't
differentiate between the two cases because it only checks the
current token, so we have a special check here. */
if (c_dialect_objc ()
&& token->type == CPP_NAME
&& token->id_kind == C_ID_CLASSNAME
&& c_parser_peek_2nd_token (parser)->type == CPP_DOT)
return false;
return c_token_starts_declspecs (token);
}
/* Return true if the next tokens from PARSER can start declaration
specifiers (not including standard attributes) or a static
assertion, false otherwise. */
bool
c_parser_next_tokens_start_declaration (c_parser *parser)
{
c_token *token = c_parser_peek_token (parser);
/* Same as above. */
if (c_dialect_objc ()
&& token->type == CPP_NAME
&& token->id_kind == C_ID_CLASSNAME
&& c_parser_peek_2nd_token (parser)->type == CPP_DOT)
return false;
/* Labels do not start declarations. */
if (token->type == CPP_NAME
&& c_parser_peek_2nd_token (parser)->type == CPP_COLON)
return false;
if (c_token_starts_declaration (token))
return true;
if (c_parser_next_tokens_start_typename (parser, cla_nonabstract_decl))
return true;
return false;
}
/* Consume the next token from PARSER. */
void
c_parser_consume_token (c_parser *parser)
{
gcc_assert (parser->tokens_avail >= 1);
gcc_assert (parser->tokens[0].type != CPP_EOF);
gcc_assert (!parser->in_pragma || parser->tokens[0].type != CPP_PRAGMA_EOL);
gcc_assert (parser->error || parser->tokens[0].type != CPP_PRAGMA);
parser->last_token_location = parser->tokens[0].location;
if (parser->tokens != &parser->tokens_buf[0])
parser->tokens++;
else if (parser->tokens_avail >= 2)
{
parser->tokens[0] = parser->tokens[1];
if (parser->tokens_avail >= 3)
{
parser->tokens[1] = parser->tokens[2];
if (parser->tokens_avail >= 4)
parser->tokens[2] = parser->tokens[3];
}
}
parser->tokens_avail--;
parser->seen_string_literal = false;
}
/* Expect the current token to be a #pragma. Consume it and remember
that we've begun parsing a pragma. */
static void
c_parser_consume_pragma (c_parser *parser)
{
gcc_assert (!parser->in_pragma);
gcc_assert (parser->tokens_avail >= 1);
gcc_assert (parser->tokens[0].type == CPP_PRAGMA);
if (parser->tokens != &parser->tokens_buf[0])
parser->tokens++;
else if (parser->tokens_avail >= 2)
{
parser->tokens[0] = parser->tokens[1];
if (parser->tokens_avail >= 3)
parser->tokens[1] = parser->tokens[2];
}
parser->tokens_avail--;
parser->in_pragma = true;
}
/* Update the global input_location from TOKEN. */
static inline void
c_parser_set_source_position_from_token (c_token *token)
{
if (token->type != CPP_EOF)
{
input_location = token->location;
}
}
/* Helper function for c_parser_error.
Having peeked a token of kind TOK1_KIND that might signify
a conflict marker, peek successor tokens to determine
if we actually do have a conflict marker.
Specifically, we consider a run of 7 '<', '=' or '>' characters
at the start of a line as a conflict marker.
These come through the lexer as three pairs and a single,
e.g. three CPP_LSHIFT ("<<") and a CPP_LESS ('<').
If it returns true, *OUT_LOC is written to with the location/range
of the marker. */
static bool
c_parser_peek_conflict_marker (c_parser *parser, enum cpp_ttype tok1_kind,
location_t *out_loc)
{
c_token *token2 = c_parser_peek_2nd_token (parser);
if (token2->type != tok1_kind)
return false;
c_token *token3 = c_parser_peek_nth_token (parser, 3);
if (token3->type != tok1_kind)
return false;
c_token *token4 = c_parser_peek_nth_token (parser, 4);
if (token4->type != conflict_marker_get_final_tok_kind (tok1_kind))
return false;
/* It must be at the start of the line. */
location_t start_loc = c_parser_peek_token (parser)->location;
if (LOCATION_COLUMN (start_loc) != 1)
return false;
/* We have a conflict marker. Construct a location of the form:
<<<<<<<
^~~~~~~
with start == caret, finishing at the end of the marker. */
location_t finish_loc = get_finish (token4->location);
*out_loc = make_location (start_loc, start_loc, finish_loc);
return true;
}
/* Issue a diagnostic of the form
FILE:LINE: MESSAGE before TOKEN
where TOKEN is the next token in the input stream of PARSER.
MESSAGE (specified by the caller) is usually of the form "expected
OTHER-TOKEN".
Use RICHLOC as the location of the diagnostic.
Do not issue a diagnostic if still recovering from an error.
Return true iff an error was actually emitted.
??? This is taken from the C++ parser, but building up messages in
this way is not i18n-friendly and some other approach should be
used. */
static bool
c_parser_error_richloc (c_parser *parser, const char *gmsgid,
rich_location *richloc)
{
c_token *token = c_parser_peek_token (parser);
if (parser->error)
return false;
parser->error = true;
if (!gmsgid)
return false;
/* If this is actually a conflict marker, report it as such. */
if (token->type == CPP_LSHIFT
|| token->type == CPP_RSHIFT
|| token->type == CPP_EQ_EQ)
{
location_t loc;
if (c_parser_peek_conflict_marker (parser, token->type, &loc))
{
error_at (loc, "version control conflict marker in file");
return true;
}
}
/* If we were parsing a string-literal and there is an unknown name
token right after, then check to see if that could also have been
a literal string by checking the name against a list of known
standard string literal constants defined in header files. If
there is one, then add that as an hint to the error message. */
auto_diagnostic_group d;
name_hint h;
if (parser->seen_string_literal && token->type == CPP_NAME)
{
tree name = token->value;
const char *token_name = IDENTIFIER_POINTER (name);
const char *header_hint
= get_c_stdlib_header_for_string_macro_name (token_name);
if (header_hint != NULL)
h = name_hint (NULL, new suggest_missing_header (token->location,
token_name,
header_hint));
}
c_parse_error (gmsgid,
/* Because c_parse_error does not understand
CPP_KEYWORD, keywords are treated like
identifiers. */
(token->type == CPP_KEYWORD ? CPP_NAME : token->type),
/* ??? The C parser does not save the cpp flags of a
token, we need to pass 0 here and we will not get
the source spelling of some tokens but rather the
canonical spelling. */
token->value, /*flags=*/0, richloc);
return true;
}
/* As c_parser_error_richloc, but issue the message at the
location of PARSER's next token, or at input_location
if the next token is EOF. */
bool
c_parser_error (c_parser *parser, const char *gmsgid)
{
c_token *token = c_parser_peek_token (parser);
c_parser_set_source_position_from_token (token);
rich_location richloc (line_table, input_location);
return c_parser_error_richloc (parser, gmsgid, &richloc);
}
/* Some tokens naturally come in pairs e.g.'(' and ')'.
This class is for tracking such a matching pair of symbols.
In particular, it tracks the location of the first token,
so that if the second token is missing, we can highlight the
location of the first token when notifying the user about the
problem. */
template <typename traits_t>
class token_pair
{
public:
/* token_pair's ctor. */
token_pair () : m_open_loc (UNKNOWN_LOCATION) {}
/* If the next token is the opening symbol for this pair, consume it and
return true.
Otherwise, issue an error and return false.
In either case, record the location of the opening token. */
bool require_open (c_parser *parser)
{
c_token *token = c_parser_peek_token (parser);
if (token)
m_open_loc = token->location;
return c_parser_require (parser, traits_t::open_token_type,
traits_t::open_gmsgid);
}
/* Consume the next token from PARSER, recording its location as
that of the opening token within the pair. */
void consume_open (c_parser *parser)
{
c_token *token = c_parser_peek_token (parser);
gcc_assert (token->type == traits_t::open_token_type);
m_open_loc = token->location;
c_parser_consume_token (parser);
}
/* If the next token is the closing symbol for this pair, consume it
and return true.
Otherwise, issue an error, highlighting the location of the
corresponding opening token, and return false. */
bool require_close (c_parser *parser) const
{
return c_parser_require (parser, traits_t::close_token_type,
traits_t::close_gmsgid, m_open_loc);
}
/* Like token_pair::require_close, except that tokens will be skipped
until the desired token is found. An error message is still produced
if the next token is not as expected. */
void skip_until_found_close (c_parser *parser) const
{
c_parser_skip_until_found (parser, traits_t::close_token_type,
traits_t::close_gmsgid, m_open_loc);
}
private:
location_t m_open_loc;
};
/* Traits for token_pair<T> for tracking matching pairs of parentheses. */
struct matching_paren_traits
{
static const enum cpp_ttype open_token_type = CPP_OPEN_PAREN;
static const char * const open_gmsgid;
static const enum cpp_ttype close_token_type = CPP_CLOSE_PAREN;
static const char * const close_gmsgid;
};
const char * const matching_paren_traits::open_gmsgid = "expected %<(%>";
const char * const matching_paren_traits::close_gmsgid = "expected %<)%>";
/* "matching_parens" is a token_pair<T> class for tracking matching
pairs of parentheses. */
typedef token_pair<matching_paren_traits> matching_parens;
/* Traits for token_pair<T> for tracking matching pairs of braces. */
struct matching_brace_traits
{
static const enum cpp_ttype open_token_type = CPP_OPEN_BRACE;
static const char * const open_gmsgid;
static const enum cpp_ttype close_token_type = CPP_CLOSE_BRACE;
static const char * const close_gmsgid;
};
const char * const matching_brace_traits::open_gmsgid = "expected %<{%>";
const char * const matching_brace_traits::close_gmsgid = "expected %<}%>";
/* "matching_braces" is a token_pair<T> class for tracking matching
pairs of braces. */
typedef token_pair<matching_brace_traits> matching_braces;
/* Get a description of the matching symbol to TYPE e.g. "(" for
CPP_CLOSE_PAREN. */
static const char *
get_matching_symbol (enum cpp_ttype type)
{
switch (type)
{
default:
gcc_unreachable ();
return "";
case CPP_CLOSE_PAREN:
return "(";
case CPP_CLOSE_BRACE:
return "{";
}
}
/* If the next token is of the indicated TYPE, consume it. Otherwise,
issue the error MSGID. If MSGID is NULL then a message has already
been produced and no message will be produced this time. Returns
true if found, false otherwise.
If MATCHING_LOCATION is not UNKNOWN_LOCATION, then highlight it
within any error as the location of an "opening" token matching
the close token TYPE (e.g. the location of the '(' when TYPE is
CPP_CLOSE_PAREN).
If TYPE_IS_UNIQUE is true (the default) then msgid describes exactly
one type (e.g. "expected %<)%>") and thus it may be reasonable to
attempt to generate a fix-it hint for the problem.
Otherwise msgid describes multiple token types (e.g.
"expected %<;%>, %<,%> or %<)%>"), and thus we shouldn't attempt to
generate a fix-it hint. */
bool
c_parser_require (c_parser *parser,
enum cpp_ttype type,
const char *msgid,
location_t matching_location,
bool type_is_unique)
{
if (c_parser_next_token_is (parser, type))
{
c_parser_consume_token (parser);
return true;
}
else
{
location_t next_token_loc = c_parser_peek_token (parser)->location;
gcc_rich_location richloc (next_token_loc);
/* Potentially supply a fix-it hint, suggesting to add the
missing token immediately after the *previous* token.
This may move the primary location within richloc. */
if (!parser->error && type_is_unique)
maybe_suggest_missing_token_insertion (&richloc, type,
parser->last_token_location);
/* If matching_location != UNKNOWN_LOCATION, highlight it.
Attempt to consolidate diagnostics by printing it as a
secondary range within the main diagnostic. */
bool added_matching_location = false;
if (matching_location != UNKNOWN_LOCATION)
added_matching_location
= richloc.add_location_if_nearby (matching_location);
if (c_parser_error_richloc (parser, msgid, &richloc))
/* If we weren't able to consolidate matching_location, then
print it as a secondary diagnostic. */
if (matching_location != UNKNOWN_LOCATION && !added_matching_location)
inform (matching_location, "to match this %qs",
get_matching_symbol (type));
return false;
}
}
/* If the next token is the indicated keyword, consume it. Otherwise,
issue the error MSGID. Returns true if found, false otherwise. */
static bool
c_parser_require_keyword (c_parser *parser,
enum rid keyword,
const char *msgid)
{
if (c_parser_next_token_is_keyword (parser, keyword))
{
c_parser_consume_token (parser);
return true;
}
else
{
c_parser_error (parser, msgid);
return false;
}
}
/* Like c_parser_require, except that tokens will be skipped until the
desired token is found. An error message is still produced if the
next token is not as expected. If MSGID is NULL then a message has
already been produced and no message will be produced this
time.
If MATCHING_LOCATION is not UNKNOWN_LOCATION, then highlight it
within any error as the location of an "opening" token matching
the close token TYPE (e.g. the location of the '(' when TYPE is
CPP_CLOSE_PAREN). */
void
c_parser_skip_until_found (c_parser *parser,
enum cpp_ttype type,
const char *msgid,
location_t matching_location)
{
unsigned nesting_depth = 0;
if (c_parser_require (parser, type, msgid, matching_location))
return;
/* Skip tokens until the desired token is found. */
while (true)
{
/* Peek at the next token. */
c_token *token = c_parser_peek_token (parser);
/* If we've reached the token we want, consume it and stop. */
if (token->type == type && !nesting_depth)
{
c_parser_consume_token (parser);
break;
}
/* If we've run out of tokens, stop. */
if (token->type == CPP_EOF)
return;
if (token->type == CPP_PRAGMA_EOL && parser->in_pragma)
return;
if (token->type == CPP_OPEN_BRACE
|| token->type == CPP_OPEN_PAREN
|| token->type == CPP_OPEN_SQUARE)
++nesting_depth;
else if (token->type == CPP_CLOSE_BRACE
|| token->type == CPP_CLOSE_PAREN
|| token->type == CPP_CLOSE_SQUARE)
{
if (nesting_depth-- == 0)
break;
}
/* Consume this token. */
c_parser_consume_token (parser);
}
parser->error = false;
}
/* Skip tokens until the end of a parameter is found, but do not
consume the comma, semicolon or closing delimiter. */
static void
c_parser_skip_to_end_of_parameter (c_parser *parser)
{
unsigned nesting_depth = 0;
while (true)
{
c_token *token = c_parser_peek_token (parser);
if ((token->type == CPP_COMMA || token->type == CPP_SEMICOLON)
&& !nesting_depth)
break;
/* If we've run out of tokens, stop. */
if (token->type == CPP_EOF)
return;
if (token->type == CPP_PRAGMA_EOL && parser->in_pragma)
return;
if (token->type == CPP_OPEN_BRACE
|| token->type == CPP_OPEN_PAREN
|| token->type == CPP_OPEN_SQUARE)
++nesting_depth;
else if (token->type == CPP_CLOSE_BRACE
|| token->type == CPP_CLOSE_PAREN
|| token->type == CPP_CLOSE_SQUARE)
{
if (nesting_depth-- == 0)
break;
}
/* Consume this token. */
c_parser_consume_token (parser);
}
parser->error = false;
}
/* Expect to be at the end of the pragma directive and consume an
end of line marker. */
static void
c_parser_skip_to_pragma_eol (c_parser *parser, bool error_if_not_eol = true)
{
gcc_assert (parser->in_pragma);
parser->in_pragma = false;
if (error_if_not_eol && c_parser_peek_token (parser)->type != CPP_PRAGMA_EOL)
c_parser_error (parser, "expected end of line");
cpp_ttype token_type;
do
{
c_token *token = c_parser_peek_token (parser);
token_type = token->type;
if (token_type == CPP_EOF)
break;
c_parser_consume_token (parser);
}
while (token_type != CPP_PRAGMA_EOL);
parser->error = false;
}
/* Skip tokens until we have consumed an entire block, or until we
have consumed a non-nested ';'. */
static void
c_parser_skip_to_end_of_block_or_statement (c_parser *parser)
{
unsigned nesting_depth = 0;
bool save_error = parser->error;
while (true)
{
c_token *token;
/* Peek at the next token. */
token = c_parser_peek_token (parser);
switch (token->type)
{
case CPP_EOF:
return;
case CPP_PRAGMA_EOL:
if (parser->in_pragma)
return;
break;
case CPP_SEMICOLON:
/* If the next token is a ';', we have reached the
end of the statement. */
if (!nesting_depth)
{
/* Consume the ';'. */
c_parser_consume_token (parser);
goto finished;
}
break;
case CPP_CLOSE_BRACE:
/* If the next token is a non-nested '}', then we have
reached the end of the current block. */
if (nesting_depth == 0 || --nesting_depth == 0)
{
c_parser_consume_token (parser);
goto finished;
}
break;
case CPP_OPEN_BRACE:
/* If it the next token is a '{', then we are entering a new
block. Consume the entire block. */
++nesting_depth;
break;
case CPP_PRAGMA:
/* If we see a pragma, consume the whole thing at once. We
have some safeguards against consuming pragmas willy-nilly.
Normally, we'd expect to be here with parser->error set,
which disables these safeguards. But it's possible to get
here for secondary error recovery, after parser->error has
been cleared. */
c_parser_consume_pragma (parser);
c_parser_skip_to_pragma_eol (parser);
parser->error = save_error;
continue;
default:
break;
}
c_parser_consume_token (parser);
}
finished:
parser->error = false;
}
/* CPP's options (initialized by c-opts.c). */
extern cpp_options *cpp_opts;
/* Save the warning flags which are controlled by __extension__. */
static inline int
disable_extension_diagnostics (void)
{
int ret = (pedantic
| (warn_pointer_arith << 1)
| (warn_traditional << 2)
| (flag_iso << 3)
| (warn_long_long << 4)
| (warn_cxx_compat << 5)
| (warn_overlength_strings << 6)
/* warn_c90_c99_compat has three states: -1/0/1, so we must
play tricks to properly restore it. */
| ((warn_c90_c99_compat == 1) << 7)
| ((warn_c90_c99_compat == -1) << 8)
/* Similarly for warn_c99_c11_compat. */
| ((warn_c99_c11_compat == 1) << 9)
| ((warn_c99_c11_compat == -1) << 10)
/* Similarly for warn_c11_c2x_compat. */
| ((warn_c11_c2x_compat == 1) << 11)
| ((warn_c11_c2x_compat == -1) << 12)
);
cpp_opts->cpp_pedantic = pedantic = 0;
warn_pointer_arith = 0;
cpp_opts->cpp_warn_traditional = warn_traditional = 0;
flag_iso = 0;
cpp_opts->cpp_warn_long_long = warn_long_long = 0;
warn_cxx_compat = 0;
warn_overlength_strings = 0;
warn_c90_c99_compat = 0;
warn_c99_c11_compat = 0;
warn_c11_c2x_compat = 0;
return ret;
}
/* Restore the warning flags which are controlled by __extension__.
FLAGS is the return value from disable_extension_diagnostics. */
static inline void
restore_extension_diagnostics (int flags)
{
cpp_opts->cpp_pedantic = pedantic = flags & 1;
warn_pointer_arith = (flags >> 1) & 1;
cpp_opts->cpp_warn_traditional = warn_traditional = (flags >> 2) & 1;
flag_iso = (flags >> 3) & 1;
cpp_opts->cpp_warn_long_long = warn_long_long = (flags >> 4) & 1;
warn_cxx_compat = (flags >> 5) & 1;
warn_overlength_strings = (flags >> 6) & 1;
/* See above for why is this needed. */
warn_c90_c99_compat = (flags >> 7) & 1 ? 1 : ((flags >> 8) & 1 ? -1 : 0);
warn_c99_c11_compat = (flags >> 9) & 1 ? 1 : ((flags >> 10) & 1 ? -1 : 0);
warn_c11_c2x_compat = (flags >> 11) & 1 ? 1 : ((flags >> 12) & 1 ? -1 : 0);
}
/* Helper data structure for parsing #pragma acc routine. */
struct oacc_routine_data {
bool error_seen; /* Set if error has been reported. */
bool fndecl_seen; /* Set if one fn decl/definition has been seen already. */
tree clauses;
location_t loc;
};
/* Used for parsing objc foreach statements. */
static tree objc_foreach_break_label, objc_foreach_continue_label;
static bool c_parser_nth_token_starts_std_attributes (c_parser *,
unsigned int);
static tree c_parser_std_attribute_specifier_sequence (c_parser *);
static void c_parser_external_declaration (c_parser *);
static void c_parser_asm_definition (c_parser *);
static void c_parser_declaration_or_fndef (c_parser *, bool, bool, bool,
bool, bool, tree *, vec<c_token>,
bool have_attrs = false,
tree attrs = NULL,
struct oacc_routine_data * = NULL,
bool * = NULL);
static void c_parser_static_assert_declaration_no_semi (c_parser *);
static void c_parser_static_assert_declaration (c_parser *);
static struct c_typespec c_parser_enum_specifier (c_parser *);
static struct c_typespec c_parser_struct_or_union_specifier (c_parser *);
static tree c_parser_struct_declaration (c_parser *);
static struct c_typespec c_parser_typeof_specifier (c_parser *);
static tree c_parser_alignas_specifier (c_parser *);
static struct c_declarator *c_parser_direct_declarator (c_parser *, bool,
c_dtr_syn, bool *);
static struct c_declarator *c_parser_direct_declarator_inner (c_parser *,
bool,
struct c_declarator *);
static struct c_arg_info *c_parser_parms_declarator (c_parser *, bool, tree,
bool);
static struct c_arg_info *c_parser_parms_list_declarator (c_parser *, tree,
tree, bool);
static struct c_parm *c_parser_parameter_declaration (c_parser *, tree, bool);
static tree c_parser_simple_asm_expr (c_parser *);
static tree c_parser_gnu_attributes (c_parser *);
static struct c_expr c_parser_initializer (c_parser *);
static struct c_expr c_parser_braced_init (c_parser *, tree, bool,
struct obstack *);
static void c_parser_initelt (c_parser *, struct obstack *);
static void c_parser_initval (c_parser *, struct c_expr *,
struct obstack *);
static tree c_parser_compound_statement (c_parser *, location_t * = NULL);
static location_t c_parser_compound_statement_nostart (c_parser *);
static void c_parser_label (c_parser *);
static void c_parser_statement (c_parser *, bool *, location_t * = NULL);
static void c_parser_statement_after_labels (c_parser *, bool *,
vec<tree> * = NULL);
static tree c_parser_c99_block_statement (c_parser *, bool *,
location_t * = NULL);
static void c_parser_if_statement (c_parser *, bool *, vec<tree> *);
static void c_parser_switch_statement (c_parser *, bool *);
static void c_parser_while_statement (c_parser *, bool, unsigned short, bool *);
static void c_parser_do_statement (c_parser *, bool, unsigned short);
static void c_parser_for_statement (c_parser *, bool, unsigned short, bool *);
static tree c_parser_asm_statement (c_parser *);
static tree c_parser_asm_operands (c_parser *);
static tree c_parser_asm_goto_operands (c_parser *);
static tree c_parser_asm_clobbers (c_parser *);
static struct c_expr c_parser_expr_no_commas (c_parser *, struct c_expr *,
tree = NULL_TREE);
static struct c_expr c_parser_conditional_expression (c_parser *,
struct c_expr *, tree);
static struct c_expr c_parser_binary_expression (c_parser *, struct c_expr *,
tree);
static struct c_expr c_parser_cast_expression (c_parser *, struct c_expr *);
static struct c_expr c_parser_unary_expression (c_parser *);
static struct c_expr c_parser_sizeof_expression (c_parser *);
static struct c_expr c_parser_alignof_expression (c_parser *);
static struct c_expr c_parser_postfix_expression (c_parser *);
static struct c_expr c_parser_postfix_expression_after_paren_type (c_parser *,
struct c_type_name *,
location_t);
static struct c_expr c_parser_postfix_expression_after_primary (c_parser *,
location_t loc,
struct c_expr);
static tree c_parser_transaction (c_parser *, enum rid);
static struct c_expr c_parser_transaction_expression (c_parser *, enum rid);
static tree c_parser_transaction_cancel (c_parser *);
static struct c_expr c_parser_expression (c_parser *);
static struct c_expr c_parser_expression_conv (c_parser *);
static vec<tree, va_gc> *c_parser_expr_list (c_parser *, bool, bool,
vec<tree, va_gc> **, location_t *,
tree *, vec<location_t> *,
unsigned int * = NULL);
static struct c_expr c_parser_has_attribute_expression (c_parser *);
static void c_parser_oacc_declare (c_parser *);
static void c_parser_oacc_enter_exit_data (c_parser *, bool);
static void c_parser_oacc_update (c_parser *);
static void c_parser_omp_construct (c_parser *, bool *);
static void c_parser_omp_threadprivate (c_parser *);
static void c_parser_omp_barrier (c_parser *);
static void c_parser_omp_depobj (c_parser *);
static void c_parser_omp_flush (c_parser *);
static tree c_parser_omp_for_loop (location_t, c_parser *, enum tree_code,
tree, tree *, bool *);
static void c_parser_omp_taskwait (c_parser *);
static void c_parser_omp_taskyield (c_parser *);
static void c_parser_omp_cancel (c_parser *);
enum pragma_context { pragma_external, pragma_struct, pragma_param,
pragma_stmt, pragma_compound };
static bool c_parser_pragma (c_parser *, enum pragma_context, bool *);
static void c_parser_omp_cancellation_point (c_parser *, enum pragma_context);
static bool c_parser_omp_target (c_parser *, enum pragma_context, bool *);
static void c_parser_omp_end_declare_target (c_parser *);
static void c_parser_omp_declare (c_parser *, enum pragma_context);
static void c_parser_omp_requires (c_parser *);
static bool c_parser_omp_ordered (c_parser *, enum pragma_context, bool *);
static void c_parser_oacc_routine (c_parser *, enum pragma_context);
/* These Objective-C parser functions are only ever called when
compiling Objective-C. */
static void c_parser_objc_class_definition (c_parser *, tree);
static void c_parser_objc_class_instance_variables (c_parser *);
static void c_parser_objc_class_declaration (c_parser *);
static void c_parser_objc_alias_declaration (c_parser *);
static void c_parser_objc_protocol_definition (c_parser *, tree);
static bool c_parser_objc_method_type (c_parser *);
static void c_parser_objc_method_definition (c_parser *);
static void c_parser_objc_methodprotolist (c_parser *);
static void c_parser_objc_methodproto (c_parser *);
static tree c_parser_objc_method_decl (c_parser *, bool, tree *, tree *);
static tree c_parser_objc_type_name (c_parser *);
static tree c_parser_objc_protocol_refs (c_parser *);
static void c_parser_objc_try_catch_finally_statement (c_parser *);
static void c_parser_objc_synchronized_statement (c_parser *);
static tree c_parser_objc_selector (c_parser *);
static tree c_parser_objc_selector_arg (c_parser *);
static tree c_parser_objc_receiver (c_parser *);
static tree c_parser_objc_message_args (c_parser *);
static tree c_parser_objc_keywordexpr (c_parser *);
static void c_parser_objc_at_property_declaration (c_parser *);
static void c_parser_objc_at_synthesize_declaration (c_parser *);
static void c_parser_objc_at_dynamic_declaration (c_parser *);
static bool c_parser_objc_diagnose_bad_element_prefix
(c_parser *, struct c_declspecs *);
static location_t c_parser_parse_rtl_body (c_parser *, char *);
/* Parse a translation unit (C90 6.7, C99 6.9, C11 6.9).
translation-unit:
external-declarations
external-declarations:
external-declaration
external-declarations external-declaration
GNU extensions:
translation-unit:
empty
*/
static void
c_parser_translation_unit (c_parser *parser)
{
if (c_parser_next_token_is (parser, CPP_EOF))
{
pedwarn (c_parser_peek_token (parser)->location, OPT_Wpedantic,
"ISO C forbids an empty translation unit");
}
else
{
void *obstack_position = obstack_alloc (&parser_obstack, 0);
mark_valid_location_for_stdc_pragma (false);
do
{
ggc_collect ();
c_parser_external_declaration (parser);
obstack_free (&parser_obstack, obstack_position);
}
while (c_parser_next_token_is_not (parser, CPP_EOF));
}
unsigned int i;
tree decl;
FOR_EACH_VEC_ELT (incomplete_record_decls, i, decl)
if (DECL_SIZE (decl) == NULL_TREE && TREE_TYPE (decl) != error_mark_node)
error ("storage size of %q+D isn%'t known", decl);
if (current_omp_declare_target_attribute)
{
if (!errorcount)
error ("%<#pragma omp declare target%> without corresponding "
"%<#pragma omp end declare target%>");
current_omp_declare_target_attribute = 0;
}
}
/* Parse an external declaration (C90 6.7, C99 6.9, C11 6.9).
external-declaration:
function-definition
declaration
GNU extensions:
external-declaration:
asm-definition
;
__extension__ external-declaration
Objective-C:
external-declaration:
objc-class-definition
objc-class-declaration
objc-alias-declaration
objc-protocol-definition
objc-method-definition
@end
*/
static void
c_parser_external_declaration (c_parser *parser)
{
int ext;
switch (c_parser_peek_token (parser)->type)
{
case CPP_KEYWORD:
switch (c_parser_peek_token (parser)->keyword)
{
case RID_EXTENSION:
ext = disable_extension_diagnostics ();
c_parser_consume_token (parser);
c_parser_external_declaration (parser);
restore_extension_diagnostics (ext);
break;
case RID_ASM:
c_parser_asm_definition (parser);
break;
case RID_AT_INTERFACE:
case RID_AT_IMPLEMENTATION:
gcc_assert (c_dialect_objc ());
c_parser_objc_class_definition (parser, NULL_TREE);
break;
case RID_AT_CLASS:
gcc_assert (c_dialect_objc ());
c_parser_objc_class_declaration (parser);
break;
case RID_AT_ALIAS:
gcc_assert (c_dialect_objc ());
c_parser_objc_alias_declaration (parser);
break;
case RID_AT_PROTOCOL:
gcc_assert (c_dialect_objc ());
c_parser_objc_protocol_definition (parser, NULL_TREE);
break;
case RID_AT_PROPERTY:
gcc_assert (c_dialect_objc ());
c_parser_objc_at_property_declaration (parser);
break;
case RID_AT_SYNTHESIZE:
gcc_assert (c_dialect_objc ());
c_parser_objc_at_synthesize_declaration (parser);
break;
case RID_AT_DYNAMIC:
gcc_assert (c_dialect_objc ());
c_parser_objc_at_dynamic_declaration (parser);
break;
case RID_AT_END:
gcc_assert (c_dialect_objc ());
c_parser_consume_token (parser);
objc_finish_implementation ();
break;
default:
goto decl_or_fndef;
}
break;
case CPP_SEMICOLON:
pedwarn (c_parser_peek_token (parser)->location, OPT_Wpedantic,
"ISO C does not allow extra %<;%> outside of a function");
c_parser_consume_token (parser);
break;
case CPP_PRAGMA:
mark_valid_location_for_stdc_pragma (true);
c_parser_pragma (parser, pragma_external, NULL);
mark_valid_location_for_stdc_pragma (false);
break;
case CPP_PLUS:
case CPP_MINUS:
if (c_dialect_objc ())
{
c_parser_objc_method_definition (parser);
break;
}
/* Else fall through, and yield a syntax error trying to parse
as a declaration or function definition. */
/* FALLTHRU */
default:
decl_or_fndef:
/* A declaration or a function definition (or, in Objective-C,
an @interface or @protocol with prefix attributes). We can
only tell which after parsing the declaration specifiers, if
any, and the first declarator. */
c_parser_declaration_or_fndef (parser, true, true, true, false, true,
NULL, vNULL);
break;
}
}
static void c_finish_omp_declare_simd (c_parser *, tree, tree, vec<c_token>);
static void c_finish_oacc_routine (struct oacc_routine_data *, tree, bool);
/* Build and add a DEBUG_BEGIN_STMT statement with location LOC. */
static void
add_debug_begin_stmt (location_t loc)
{
/* Don't add DEBUG_BEGIN_STMTs outside of functions, see PR84721. */
if (!MAY_HAVE_DEBUG_MARKER_STMTS || !building_stmt_list_p ())
return;
tree stmt = build0 (DEBUG_BEGIN_STMT, void_type_node);
SET_EXPR_LOCATION (stmt, loc);
add_stmt (stmt);
}
/* Parse a declaration or function definition (C90 6.5, 6.7.1, C99
6.7, 6.9.1, C11 6.7, 6.9.1). If FNDEF_OK is true, a function definition
is accepted; otherwise (old-style parameter declarations) only other
declarations are accepted. If STATIC_ASSERT_OK is true, a static
assertion is accepted; otherwise (old-style parameter declarations)
it is not. If NESTED is true, we are inside a function or parsing
old-style parameter declarations; any functions encountered are
nested functions and declaration specifiers are required; otherwise
we are at top level and functions are normal functions and
declaration specifiers may be optional. If EMPTY_OK is true, empty
declarations are OK (subject to all other constraints); otherwise
(old-style parameter declarations) they are diagnosed. If
START_ATTR_OK is true, the declaration specifiers may start with
attributes (GNU or standard); otherwise they may not.
OBJC_FOREACH_OBJECT_DECLARATION can be used to get back the parsed
declaration when parsing an Objective-C foreach statement.
FALLTHRU_ATTR_P is used to signal whether this function parsed
"__attribute__((fallthrough));". ATTRS are any standard attributes
parsed in the caller (in contexts where such attributes had to be
parsed to determine whether what follows is a declaration or a
statement); HAVE_ATTRS says whether there were any such attributes
(even empty).
declaration:
declaration-specifiers init-declarator-list[opt] ;
static_assert-declaration
function-definition:
declaration-specifiers[opt] declarator declaration-list[opt]
compound-statement
declaration-list:
declaration
declaration-list declaration
init-declarator-list:
init-declarator
init-declarator-list , init-declarator
init-declarator:
declarator simple-asm-expr[opt] gnu-attributes[opt]
declarator simple-asm-expr[opt] gnu-attributes[opt] = initializer
GNU extensions:
nested-function-definition:
declaration-specifiers declarator declaration-list[opt]
compound-statement
attribute ;
Objective-C:
gnu-attributes objc-class-definition
gnu-attributes objc-category-definition
gnu-attributes objc-protocol-definition
The simple-asm-expr and gnu-attributes are GNU extensions.
This function does not handle __extension__; that is handled in its
callers. ??? Following the old parser, __extension__ may start
external declarations, declarations in functions and declarations
at the start of "for" loops, but not old-style parameter
declarations.
C99 requires declaration specifiers in a function definition; the
absence is diagnosed through the diagnosis of implicit int. In GNU
C we also allow but diagnose declarations without declaration
specifiers, but only at top level (elsewhere they conflict with
other syntax).
In Objective-C, declarations of the looping variable in a foreach
statement are exceptionally terminated by 'in' (for example, 'for
(NSObject *object in array) { ... }').
OpenMP:
declaration:
threadprivate-directive
GIMPLE:
gimple-function-definition:
declaration-specifiers[opt] __GIMPLE (gimple-or-rtl-pass-list) declarator
declaration-list[opt] compound-statement
rtl-function-definition:
declaration-specifiers[opt] __RTL (gimple-or-rtl-pass-list) declarator
declaration-list[opt] compound-statement */
static void
c_parser_declaration_or_fndef (c_parser *parser, bool fndef_ok,
bool static_assert_ok, bool empty_ok,
bool nested, bool start_attr_ok,
tree *objc_foreach_object_declaration,
vec<c_token> omp_declare_simd_clauses,
bool have_attrs, tree attrs,
struct oacc_routine_data *oacc_routine_data,
bool *fallthru_attr_p)
{
struct c_declspecs *specs;
tree prefix_attrs;
tree all_prefix_attrs;
bool diagnosed_no_specs = false;
location_t here = c_parser_peek_token (parser)->location;
add_debug_begin_stmt (c_parser_peek_token (parser)->location);
if (static_assert_ok
&& c_parser_next_token_is_keyword (parser, RID_STATIC_ASSERT))
{
c_parser_static_assert_declaration (parser);
return;
}
specs = build_null_declspecs ();
/* Handle any standard attributes parsed in the caller. */
if (have_attrs)
{
declspecs_add_attrs (here, specs, attrs);
specs->non_std_attrs_seen_p = false;
}
/* Try to detect an unknown type name when we have "A B" or "A *B". */
if (c_parser_peek_token (parser)->type == CPP_NAME
&& c_parser_peek_token (parser)->id_kind == C_ID_ID
&& (c_parser_peek_2nd_token (parser)->type == CPP_NAME
|| c_parser_peek_2nd_token (parser)->type == CPP_MULT)
&& (!nested || !lookup_name (c_parser_peek_token (parser)->value)))
{
tree name = c_parser_peek_token (parser)->value;
/* Issue a warning about NAME being an unknown type name, perhaps
with some kind of hint.
If the user forgot a "struct" etc, suggest inserting
it. Otherwise, attempt to look for misspellings. */
gcc_rich_location richloc (here);
if (tag_exists_p (RECORD_TYPE, name))
{
/* This is not C++ with its implicit typedef. */
richloc.add_fixit_insert_before ("struct ");
error_at (&richloc,
"unknown type name %qE;"
" use %<struct%> keyword to refer to the type",
name);
}
else if (tag_exists_p (UNION_TYPE, name))
{
richloc.add_fixit_insert_before ("union ");
error_at (&richloc,
"unknown type name %qE;"
" use %<union%> keyword to refer to the type",
name);
}
else if (tag_exists_p (ENUMERAL_TYPE, name))
{
richloc.add_fixit_insert_before ("enum ");
error_at (&richloc,
"unknown type name %qE;"
" use %<enum%> keyword to refer to the type",
name);
}
else
{
auto_diagnostic_group d;
name_hint hint = lookup_name_fuzzy (name, FUZZY_LOOKUP_TYPENAME,
here);
if (const char *suggestion = hint.suggestion ())
{
richloc.add_fixit_replace (suggestion);
error_at (&richloc,
"unknown type name %qE; did you mean %qs?",
name, suggestion);
}
else
error_at (here, "unknown type name %qE", name);
}
/* Parse declspecs normally to get a correct pointer type, but avoid
a further "fails to be a type name" error. Refuse nested functions
since it is not how the user likely wants us to recover. */
c_parser_peek_token (parser)->type = CPP_KEYWORD;
c_parser_peek_token (parser)->keyword = RID_VOID;
c_parser_peek_token (parser)->value = error_mark_node;
fndef_ok = !nested;
}
/* When there are standard attributes at the start of the
declaration (to apply to the entity being declared), an
init-declarator-list or function definition must be present. */
if (c_parser_nth_token_starts_std_attributes (parser, 1))
have_attrs = true;
c_parser_declspecs (parser, specs, true, true, start_attr_ok,
true, true, start_attr_ok, true, cla_nonabstract_decl);
if (parser->error)
{
c_parser_skip_to_end_of_block_or_statement (parser);
return;
}
if (nested && !specs->declspecs_seen_p)
{
c_parser_error (parser, "expected declaration specifiers");
c_parser_skip_to_end_of_block_or_statement (parser);
return;
}
finish_declspecs (specs);
bool auto_type_p = specs->typespec_word == cts_auto_type;
if (c_parser_next_token_is (parser, CPP_SEMICOLON))
{
if (auto_type_p)
error_at (here, "%<__auto_type%> in empty declaration");
else if (specs->typespec_kind == ctsk_none
&& attribute_fallthrough_p (specs->attrs))
{
if (fallthru_attr_p != NULL)
*fallthru_attr_p = true;
if (nested)
{
tree fn = build_call_expr_internal_loc (here, IFN_FALLTHROUGH,
void_type_node, 0);
add_stmt (fn);
}
else
pedwarn (here, OPT_Wattributes,
"%<fallthrough%> attribute at top level");
}
else if (empty_ok && !(have_attrs
&& specs->non_std_attrs_seen_p))
shadow_tag (specs);
else
{
shadow_tag_warned (specs, 1);
pedwarn (here, 0, "empty declaration");
}
c_parser_consume_token (parser);
if (oacc_routine_data)
c_finish_oacc_routine (oacc_routine_data, NULL_TREE, false);
return;
}
/* Provide better error recovery. Note that a type name here is usually
better diagnosed as a redeclaration. */
if (empty_ok
&& specs->typespec_kind == ctsk_tagdef
&& c_parser_next_token_starts_declspecs (parser)
&& !c_parser_next_token_is (parser, CPP_NAME))
{
c_parser_error (parser, "expected %<;%>, identifier or %<(%>");
parser->error = false;
shadow_tag_warned (specs, 1);
return;
}
else if (c_dialect_objc () && !auto_type_p)
{
/* Prefix attributes are an error on method decls. */
switch (c_parser_peek_token (parser)->type)
{
case CPP_PLUS:
case CPP_MINUS:
if (c_parser_objc_diagnose_bad_element_prefix (parser, specs))
return;
if (specs->attrs)
{
warning_at (c_parser_peek_token (parser)->location,
OPT_Wattributes,
"prefix attributes are ignored for methods");
specs->attrs = NULL_TREE;
}
if (fndef_ok)
c_parser_objc_method_definition (parser);
else
c_parser_objc_methodproto (parser);
return;
break;
default:
break;
}
/* This is where we parse 'attributes @interface ...',
'attributes @implementation ...', 'attributes @protocol ...'
(where attributes could be, for example, __attribute__
((deprecated)).
*/
switch (c_parser_peek_token (parser)->keyword)
{
case RID_AT_INTERFACE:
{
if (c_parser_objc_diagnose_bad_element_prefix (parser, specs))
return;
c_parser_objc_class_definition (parser, specs->attrs);
return;
}
break;
case RID_AT_IMPLEMENTATION:
{
if (c_parser_objc_diagnose_bad_element_prefix (parser, specs))
return;
if (specs->attrs)
{
warning_at (c_parser_peek_token (parser)->location,
OPT_Wattributes,
"prefix attributes are ignored for implementations");
specs->attrs = NULL_TREE;
}
c_parser_objc_class_definition (parser, NULL_TREE);
return;
}
break;
case RID_AT_PROTOCOL:
{
if (c_parser_objc_diagnose_bad_element_prefix (parser, specs))
return;
c_parser_objc_protocol_definition (parser, specs->attrs);
return;
}
break;
case RID_AT_ALIAS:
case RID_AT_CLASS:
case RID_AT_END:
case RID_AT_PROPERTY:
if (specs->attrs)
{
c_parser_error (parser, "unexpected attribute");
specs->attrs = NULL;
}
break;
default:
break;
}
}
else if (attribute_fallthrough_p (specs->attrs))
warning_at (here, OPT_Wattributes,
"%<fallthrough%> attribute not followed by %<;%>");
pending_xref_error ();
prefix_attrs = specs->attrs;
all_prefix_attrs = prefix_attrs;
specs->attrs = NULL_TREE;
while (true)
{
struct c_declarator *declarator;
bool dummy = false;
timevar_id_t tv;
tree fnbody = NULL_TREE;
/* Declaring either one or more declarators (in which case we
should diagnose if there were no declaration specifiers) or a
function definition (in which case the diagnostic for
implicit int suffices). */
declarator = c_parser_declarator (parser,
specs->typespec_kind != ctsk_none,
C_DTR_NORMAL, &dummy);
if (declarator == NULL)
{
if (omp_declare_simd_clauses.exists ())
c_finish_omp_declare_simd (parser, NULL_TREE, NULL_TREE,
omp_declare_simd_clauses);
if (oacc_routine_data)
c_finish_oacc_routine (oacc_routine_data, NULL_TREE, false);
c_parser_skip_to_end_of_block_or_statement (parser);
return;
}
if (auto_type_p && declarator->kind != cdk_id)
{
error_at (here,
"%<__auto_type%> requires a plain identifier"
" as declarator");
c_parser_skip_to_end_of_block_or_statement (parser);
return;
}
if (c_parser_next_token_is (parser, CPP_EQ)
|| c_parser_next_token_is (parser, CPP_COMMA)
|| c_parser_next_token_is (parser, CPP_SEMICOLON)
|| c_parser_next_token_is_keyword (parser, RID_ASM)
|| c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE)
|| c_parser_next_token_is_keyword (parser, RID_IN))
{
tree asm_name = NULL_TREE;
tree postfix_attrs = NULL_TREE;
if (!diagnosed_no_specs && !specs->declspecs_seen_p)
{
diagnosed_no_specs = true;
pedwarn (here, 0, "data definition has no type or storage class");
}
/* Having seen a data definition, there cannot now be a
function definition. */
fndef_ok = false;
if (c_parser_next_token_is_keyword (parser, RID_ASM))
asm_name = c_parser_simple_asm_expr (parser);
if (c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE))
{
postfix_attrs = c_parser_gnu_attributes (parser);
if (c_parser_next_token_is (parser, CPP_OPEN_BRACE))
{
/* This means there is an attribute specifier after
the declarator in a function definition. Provide
some more information for the user. */
error_at (here, "attributes should be specified before the "
"declarator in a function definition");
c_parser_skip_to_end_of_block_or_statement (parser);
return;
}
}
if (c_parser_next_token_is (parser, CPP_EQ))
{
tree d;
struct c_expr init;
location_t init_loc;
c_parser_consume_token (parser);
if (auto_type_p)
{
init_loc = c_parser_peek_token (parser)->location;
rich_location richloc (line_table, init_loc);
start_init (NULL_TREE, asm_name, global_bindings_p (), &richloc);
/* A parameter is initialized, which is invalid. Don't
attempt to instrument the initializer. */
int flag_sanitize_save = flag_sanitize;
if (nested && !empty_ok)
flag_sanitize = 0;
init = c_parser_expr_no_commas (parser, NULL);
flag_sanitize = flag_sanitize_save;
if (TREE_CODE (init.value) == COMPONENT_REF
&& DECL_C_BIT_FIELD (TREE_OPERAND (init.value, 1)))
error_at (here,
"%<__auto_type%> used with a bit-field"
" initializer");
init = convert_lvalue_to_rvalue (init_loc, init, true, true);
tree init_type = TREE_TYPE (init.value);
/* As with typeof, remove all qualifiers from atomic types. */
if (init_type != error_mark_node && TYPE_ATOMIC (init_type))
init_type
= c_build_qualified_type (init_type, TYPE_UNQUALIFIED);
bool vm_type = variably_modified_type_p (init_type,
NULL_TREE);
if (vm_type)
init.value = save_expr (init.value);
finish_init ();
specs->typespec_kind = ctsk_typeof;
specs->locations[cdw_typedef] = init_loc;
specs->typedef_p = true;
specs->type = init_type;
if (vm_type)
{
bool maybe_const = true;
tree type_expr = c_fully_fold (init.value, false,
&maybe_const);
specs->expr_const_operands &= maybe_const;
if (specs->expr)
specs->expr = build2 (COMPOUND_EXPR,
TREE_TYPE (type_expr),
specs->expr, type_expr);
else
specs->expr = type_expr;
}
d = start_decl (declarator, specs, true,
chainon (postfix_attrs, all_prefix_attrs));
if (!d)
d = error_mark_node;
if (omp_declare_simd_clauses.exists ())
c_finish_omp_declare_simd (parser, d, NULL_TREE,
omp_declare_simd_clauses);
}
else
{
/* The declaration of the variable is in effect while
its initializer is parsed. */
d = start_decl (declarator, specs, true,
chainon (postfix_attrs, all_prefix_attrs));
if (!d)
d = error_mark_node;
if (omp_declare_simd_clauses.exists ())
c_finish_omp_declare_simd (parser, d, NULL_TREE,
omp_declare_simd_clauses);
init_loc = c_parser_peek_token (parser)->location;
rich_location richloc (line_table, init_loc);
start_init (d, asm_name, global_bindings_p (), &richloc);
/* A parameter is initialized, which is invalid. Don't
attempt to instrument the initializer. */
int flag_sanitize_save = flag_sanitize;
if (TREE_CODE (d) == PARM_DECL)
flag_sanitize = 0;
init = c_parser_initializer (parser);
flag_sanitize = flag_sanitize_save;
finish_init ();
}
if (oacc_routine_data)
c_finish_oacc_routine (oacc_routine_data, d, false);
if (d != error_mark_node)
{
maybe_warn_string_init (init_loc, TREE_TYPE (d), init);
finish_decl (d, init_loc, init.value,
init.original_type, asm_name);
}
}
else
{
if (auto_type_p)
{
error_at (here,
"%<__auto_type%> requires an initialized "
"data declaration");
c_parser_skip_to_end_of_block_or_statement (parser);
return;
}
location_t lastloc = UNKNOWN_LOCATION;
tree attrs = chainon (postfix_attrs, all_prefix_attrs);
tree d = start_decl (declarator, specs, false, attrs, &lastloc);
if (d && TREE_CODE (d) == FUNCTION_DECL)
{
/* Find the innermost declarator that is neither cdk_id
nor cdk_attrs. */
const struct c_declarator *decl = declarator;
const struct c_declarator *last_non_id_attrs = NULL;
while (decl)
switch (decl->kind)
{
case cdk_array:
case cdk_function:
case cdk_pointer:
last_non_id_attrs = decl;
decl = decl->declarator;
break;
case cdk_attrs:
decl = decl->declarator;
break;
case cdk_id:
decl = 0;
break;
default:
gcc_unreachable ();
}
/* If it exists and is cdk_function declaration whose
arguments have not been set yet, use its arguments. */
if (last_non_id_attrs
&& last_non_id_attrs->kind == cdk_function)
{
tree parms = last_non_id_attrs->u.arg_info->parms;
if (DECL_ARGUMENTS (d) == NULL_TREE
&& DECL_INITIAL (d) == NULL_TREE)
DECL_ARGUMENTS (d) = parms;
warn_parm_array_mismatch (lastloc, d, parms);
}
}
if (omp_declare_simd_clauses.exists ())
{
tree parms = NULL_TREE;
if (d && TREE_CODE (d) == FUNCTION_DECL)
{
struct c_declarator *ce = declarator;
while (ce != NULL)
if (ce->kind == cdk_function)
{
parms = ce->u.arg_info->parms;
break;
}
else
ce = ce->declarator;
}
if (parms)
temp_store_parm_decls (d, parms);
c_finish_omp_declare_simd (parser, d, parms,
omp_declare_simd_clauses);
if (parms)
temp_pop_parm_decls ();
}
if (oacc_routine_data)
c_finish_oacc_routine (oacc_routine_data, d, false);
if (d)
finish_decl (d, UNKNOWN_LOCATION, NULL_TREE,
NULL_TREE, asm_name);
if (c_parser_next_token_is_keyword (parser, RID_IN))
{
if (d)
*objc_foreach_object_declaration = d;
else
*objc_foreach_object_declaration = error_mark_node;
}
}
if (c_parser_next_token_is (parser, CPP_COMMA))
{
if (auto_type_p)
{
error_at (here,
"%<__auto_type%> may only be used with"
" a single declarator");
c_parser_skip_to_end_of_block_or_statement (parser);
return;
}
c_parser_consume_token (parser);
if (c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE))
all_prefix_attrs = chainon (c_parser_gnu_attributes (parser),
prefix_attrs);
else
all_prefix_attrs = prefix_attrs;
continue;
}
else if (c_parser_next_token_is (parser, CPP_SEMICOLON))
{
c_parser_consume_token (parser);
return;
}
else if (c_parser_next_token_is_keyword (parser, RID_IN))
{
/* This can only happen in Objective-C: we found the
'in' that terminates the declaration inside an
Objective-C foreach statement. Do not consume the
token, so that the caller can use it to determine
that this indeed is a foreach context. */
return;
}
else
{
c_parser_error (parser, "expected %<,%> or %<;%>");
c_parser_skip_to_end_of_block_or_statement (parser);
return;
}
}
else if (auto_type_p)
{
error_at (here,
"%<__auto_type%> requires an initialized data declaration");
c_parser_skip_to_end_of_block_or_statement (parser);
return;
}
else if (!fndef_ok)
{
c_parser_error (parser, "expected %<=%>, %<,%>, %<;%>, "
"%<asm%> or %<__attribute__%>");
c_parser_skip_to_end_of_block_or_statement (parser);
return;
}
/* Function definition (nested or otherwise). */
if (nested)
{
pedwarn (here, OPT_Wpedantic, "ISO C forbids nested functions");
c_push_function_context ();
}
if (!start_function (specs, declarator, all_prefix_attrs))
{
/* At this point we've consumed:
declaration-specifiers declarator
and the next token isn't CPP_EQ, CPP_COMMA, CPP_SEMICOLON,
RID_ASM, RID_ATTRIBUTE, or RID_IN,
but the
declaration-specifiers declarator
aren't grokkable as a function definition, so we have
an error. */
gcc_assert (!c_parser_next_token_is (parser, CPP_SEMICOLON));
if (c_parser_next_token_starts_declspecs (parser))
{
/* If we have
declaration-specifiers declarator decl-specs
then assume we have a missing semicolon, which would
give us:
declaration-specifiers declarator decl-specs
^
;
<~~~~~~~~~ declaration ~~~~~~~~~~>
Use c_parser_require to get an error with a fix-it hint. */
c_parser_require (parser, CPP_SEMICOLON, "expected %<;%>");
parser->error = false;
}
else
{
/* This can appear in many cases looking nothing like a
function definition, so we don't give a more specific
error suggesting there was one. */
c_parser_error (parser, "expected %<=%>, %<,%>, %<;%>, %<asm%> "
"or %<__attribute__%>");
}
if (nested)
c_pop_function_context ();
break;
}
if (DECL_DECLARED_INLINE_P (current_function_decl))
tv = TV_PARSE_INLINE;
else
tv = TV_PARSE_FUNC;
auto_timevar at (g_timer, tv);
/* Parse old-style parameter declarations. ??? Attributes are
not allowed to start declaration specifiers here because of a
syntax conflict between a function declaration with attribute
suffix and a function definition with an attribute prefix on
first old-style parameter declaration. Following the old
parser, they are not accepted on subsequent old-style
parameter declarations either. However, there is no
ambiguity after the first declaration, nor indeed on the
first as long as we don't allow postfix attributes after a
declarator with a nonempty identifier list in a definition;
and postfix attributes have never been accepted here in
function definitions either. */
while (c_parser_next_token_is_not (parser, CPP_EOF)
&& c_parser_next_token_is_not (parser, CPP_OPEN_BRACE))
c_parser_declaration_or_fndef (parser, false, false, false,
true, false, NULL, vNULL);
store_parm_decls ();
if (omp_declare_simd_clauses.exists ())
c_finish_omp_declare_simd (parser, current_function_decl, NULL_TREE,
omp_declare_simd_clauses);
if (oacc_routine_data)
c_finish_oacc_routine (oacc_routine_data, current_function_decl, true);
location_t startloc = c_parser_peek_token (parser)->location;
DECL_STRUCT_FUNCTION (current_function_decl)->function_start_locus
= startloc;
location_t endloc = startloc;
/* If the definition was marked with __RTL, use the RTL parser now,
consuming the function body. */
if (specs->declspec_il == cdil_rtl)
{
endloc = c_parser_parse_rtl_body (parser, specs->gimple_or_rtl_pass);
/* Normally, store_parm_decls sets next_is_function_body,
anticipating a function body. We need a push_scope/pop_scope
pair to flush out this state, or subsequent function parsing
will go wrong. */
push_scope ();
pop_scope ();
finish_function (endloc);
return;
}
/* If the definition was marked with __GIMPLE then parse the
function body as GIMPLE. */
else if (specs->declspec_il != cdil_none)
{
bool saved = in_late_binary_op;
in_late_binary_op = true;
c_parser_parse_gimple_body (parser, specs->gimple_or_rtl_pass,
specs->declspec_il,
specs->entry_bb_count);
in_late_binary_op = saved;
}
else
fnbody = c_parser_compound_statement (parser, &endloc);
tree fndecl = current_function_decl;
if (nested)
{
tree decl = current_function_decl;
/* Mark nested functions as needing static-chain initially.
lower_nested_functions will recompute it but the
DECL_STATIC_CHAIN flag is also used before that happens,
by initializer_constant_valid_p. See gcc.dg/nested-fn-2.c. */
DECL_STATIC_CHAIN (decl) = 1;
add_stmt (fnbody);
finish_function (endloc);
c_pop_function_context ();
add_stmt (build_stmt (DECL_SOURCE_LOCATION (decl), DECL_EXPR, decl));
}
else
{
if (fnbody)
add_stmt (fnbody);
finish_function (endloc);
}
/* Get rid of the empty stmt list for GIMPLE/RTL. */
if (specs->declspec_il != cdil_none)
DECL_SAVED_TREE (fndecl) = NULL_TREE;
break;
}
}
/* Parse an asm-definition (asm() outside a function body). This is a
GNU extension.
asm-definition:
simple-asm-expr ;
*/
static void
c_parser_asm_definition (c_parser *parser)
{
tree asm_str = c_parser_simple_asm_expr (parser);
if (asm_str)
symtab->finalize_toplevel_asm (asm_str);
c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>");
}
/* Parse a static assertion (C11 6.7.10).
static_assert-declaration:
static_assert-declaration-no-semi ;
*/
static void
c_parser_static_assert_declaration (c_parser *parser)
{
c_parser_static_assert_declaration_no_semi (parser);
if (parser->error
|| !c_parser_require (parser, CPP_SEMICOLON, "expected %<;%>"))
c_parser_skip_to_end_of_block_or_statement (parser);
}
/* Parse a static assertion (C11 6.7.10), without the trailing
semicolon.
static_assert-declaration-no-semi:
_Static_assert ( constant-expression , string-literal )
C2X:
static_assert-declaration-no-semi:
_Static_assert ( constant-expression )
*/
static void
c_parser_static_assert_declaration_no_semi (c_parser *parser)
{
location_t assert_loc, value_loc;
tree value;
tree string = NULL_TREE;
gcc_assert (c_parser_next_token_is_keyword (parser, RID_STATIC_ASSERT));
assert_loc = c_parser_peek_token (parser)->location;
if (flag_isoc99)
pedwarn_c99 (assert_loc, OPT_Wpedantic,
"ISO C99 does not support %<_Static_assert%>");
else
pedwarn_c99 (assert_loc, OPT_Wpedantic,
"ISO C90 does not support %<_Static_assert%>");
c_parser_consume_token (parser);
matching_parens parens;
if (!parens.require_open (parser))
return;
location_t value_tok_loc = c_parser_peek_token (parser)->location;
value = c_parser_expr_no_commas (parser, NULL).value;
value_loc = EXPR_LOC_OR_LOC (value, value_tok_loc);
if (c_parser_next_token_is (parser, CPP_COMMA))
{
c_parser_consume_token (parser);
switch (c_parser_peek_token (parser)->type)
{
case CPP_STRING:
case CPP_STRING16:
case CPP_STRING32:
case CPP_WSTRING:
case CPP_UTF8STRING:
string = c_parser_string_literal (parser, false, true).value;
break;
default:
c_parser_error (parser, "expected string literal");
return;
}
}
else if (flag_isoc11)
/* If pedantic for pre-C11, the use of _Static_assert itself will
have been diagnosed, so do not also diagnose the use of this
new C2X feature of _Static_assert. */
pedwarn_c11 (assert_loc, OPT_Wpedantic,
"ISO C11 does not support omitting the string in "
"%<_Static_assert%>");
parens.require_close (parser);
if (!INTEGRAL_TYPE_P (TREE_TYPE (value)))
{
error_at (value_loc, "expression in static assertion is not an integer");
return;
}
if (TREE_CODE (value) != INTEGER_CST)
{
value = c_fully_fold (value, false, NULL);
/* Strip no-op conversions. */
STRIP_TYPE_NOPS (value);
if (TREE_CODE (value) == INTEGER_CST)
pedwarn (value_loc, OPT_Wpedantic, "expression in static assertion "
"is not an integer constant expression");
}
if (TREE_CODE (value) != INTEGER_CST)
{
error_at (value_loc, "expression in static assertion is not constant");
return;
}
constant_expression_warning (value);
if (integer_zerop (value))
{
if (string)
error_at (assert_loc, "static assertion failed: %E", string);
else
error_at (assert_loc, "static assertion failed");
}
}
/* Parse some declaration specifiers (possibly none) (C90 6.5, C99
6.7, C11 6.7), adding them to SPECS (which may already include some).
Storage class specifiers are accepted iff SCSPEC_OK; type
specifiers are accepted iff TYPESPEC_OK; alignment specifiers are
accepted iff ALIGNSPEC_OK; gnu-attributes are accepted at the start
iff START_ATTR_OK; __auto_type is accepted iff AUTO_TYPE_OK. In
addition to the syntax shown, standard attributes are accepted at
the start iff START_STD_ATTR_OK and at the end iff END_STD_ATTR_OK;
unlike gnu-attributes, they are not accepted in the middle of the
list. (This combines various different syntax productions in the C
standard, and in some cases gnu-attributes and standard attributes
at the start may already have been parsed before this function is
called.)
declaration-specifiers:
storage-class-specifier declaration-specifiers[opt]
type-specifier declaration-specifiers[opt]
type-qualifier declaration-specifiers[opt]
function-specifier declaration-specifiers[opt]
alignment-specifier declaration-specifiers[opt]
Function specifiers (inline) are from C99, and are currently
handled as storage class specifiers, as is __thread. Alignment
specifiers are from C11.
C90 6.5.1, C99 6.7.1, C11 6.7.1:
storage-class-specifier:
typedef
extern
static
auto
register
_Thread_local
(_Thread_local is new in C11.)
C99 6.7.4, C11 6.7.4:
function-specifier:
inline
_Noreturn
(_Noreturn is new in C11.)
C90 6.5.2, C99 6.7.2, C11 6.7.2:
type-specifier:
void
char
short
int
long
float
double
signed
unsigned
_Bool
_Complex
[_Imaginary removed in C99 TC2]
struct-or-union-specifier
enum-specifier
typedef-name
atomic-type-specifier
(_Bool and _Complex are new in C99.)
(atomic-type-specifier is new in C11.)
C90 6.5.3, C99 6.7.3, C11 6.7.3:
type-qualifier:
const
restrict
volatile
address-space-qualifier
_Atomic
(restrict is new in C99.)
(_Atomic is new in C11.)
GNU extensions:
declaration-specifiers:
gnu-attributes declaration-specifiers[opt]
type-qualifier:
address-space
address-space:
identifier recognized by the target
storage-class-specifier:
__thread
type-specifier:
typeof-specifier
__auto_type
__intN
_Decimal32
_Decimal64
_Decimal128
_Fract
_Accum
_Sat
(_Fract, _Accum, and _Sat are new from ISO/IEC DTR 18037:
http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1169.pdf)
atomic-type-specifier
_Atomic ( type-name )
Objective-C:
type-specifier:
class-name objc-protocol-refs[opt]
typedef-name objc-protocol-refs
objc-protocol-refs
*/
void
c_parser_declspecs (c_parser *parser, struct c_declspecs *specs,
bool scspec_ok, bool typespec_ok, bool start_attr_ok,
bool alignspec_ok, bool auto_type_ok,
bool start_std_attr_ok, bool end_std_attr_ok,
enum c_lookahead_kind la)
{
bool attrs_ok = start_attr_ok;
bool seen_type = specs->typespec_kind != ctsk_none;
if (!typespec_ok)
gcc_assert (la == cla_prefer_id);
if (start_std_attr_ok
&& c_parser_nth_token_starts_std_attributes (parser, 1))
{
gcc_assert (!specs->non_std_attrs_seen_p);
location_t loc = c_parser_peek_token (parser)->location;
tree attrs = c_parser_std_attribute_specifier_sequence (parser);
declspecs_add_attrs (loc, specs, attrs);
specs->non_std_attrs_seen_p = false;
}
while (c_parser_next_token_is (parser, CPP_NAME)
|| c_parser_next_token_is (parser, CPP_KEYWORD)
|| (c_dialect_objc () && c_parser_next_token_is (parser, CPP_LESS)))
{
struct c_typespec t;
tree attrs;
tree align;
location_t loc = c_parser_peek_token (parser)->location;
/* If we cannot accept a type, exit if the next token must start
one. Also, if we already have seen a tagged definition,
a typename would be an error anyway and likely the user
has simply forgotten a semicolon, so we exit. */
if ((!typespec_ok || specs->typespec_kind == ctsk_tagdef)
&& c_parser_next_tokens_start_typename (parser, la)
&& !c_parser_next_token_is_qualifier (parser)
&& !c_parser_next_token_is_keyword (parser, RID_ALIGNAS))
break;
if (c_parser_next_token_is (parser, CPP_NAME))
{
c_token *name_token = c_parser_peek_token (parser);
tree value = name_token->value;
c_id_kind kind = name_token->id_kind;
if (kind == C_ID_ADDRSPACE)
{
addr_space_t as
= name_token->keyword - RID_FIRST_ADDR_SPACE;
declspecs_add_addrspace (name_token->location, specs, as);
c_parser_consume_token (parser);
attrs_ok = true;
continue;
}
gcc_assert (!c_parser_next_token_is_qualifier (parser));
/* If we cannot accept a type, and the next token must start one,
exit. Do the same if we already have seen a tagged definition,
since it would be an error anyway and likely the user has simply
forgotten a semicolon. */
if (seen_type || !c_parser_next_tokens_start_typename (parser, la))
break;
/* Now at an unknown typename (C_ID_ID), a C_ID_TYPENAME or
a C_ID_CLASSNAME. */
c_parser_consume_token (parser);
seen_type = true;
attrs_ok = true;
if (kind == C_ID_ID)
{
error_at (loc, "unknown type name %qE", value);
t.kind = ctsk_typedef;
t.spec = error_mark_node;
}
else if (kind == C_ID_TYPENAME
&& (!c_dialect_objc ()
|| c_parser_next_token_is_not (parser, CPP_LESS)))
{
t.kind = ctsk_typedef;
/* For a typedef name, record the meaning, not the name.
In case of 'foo foo, bar;'. */
t.spec = lookup_name (value);
}
else
{
tree proto = NULL_TREE;
gcc_assert (c_dialect_objc ());
t.kind = ctsk_objc;
if (c_parser_next_token_is (parser, CPP_LESS))
proto = c_parser_objc_protocol_refs (parser);
t.spec = objc_get_protocol_qualified_type (value, proto);
}
t.expr = NULL_TREE;
t.expr_const_operands = true;
declspecs_add_type (name_token->location, specs, t);
continue;
}
if (c_parser_next_token_is (parser, CPP_LESS))
{
/* Make "<SomeProtocol>" equivalent to "id <SomeProtocol>" -
nisse@lysator.liu.se. */
tree proto;
gcc_assert (c_dialect_objc ());
if (!typespec_ok || seen_type)
break;
proto = c_parser_objc_protocol_refs (parser);
t.kind = ctsk_objc;
t.spec = objc_get_protocol_qualified_type (NULL_TREE, proto);
t.expr = NULL_TREE;
t.expr_const_operands = true;
declspecs_add_type (loc, specs, t);
continue;
}
gcc_assert (c_parser_next_token_is (parser, CPP_KEYWORD));
switch (c_parser_peek_token (parser)->keyword)
{
case RID_STATIC:
case RID_EXTERN:
case RID_REGISTER:
case RID_TYPEDEF:
case RID_INLINE:
case RID_NORETURN:
case RID_AUTO:
case RID_THREAD:
if (!scspec_ok)
goto out;
attrs_ok = true;
/* TODO: Distinguish between function specifiers (inline, noreturn)
and storage class specifiers, either here or in
declspecs_add_scspec. */
declspecs_add_scspec (loc, specs,
c_parser_peek_token (parser)->value);
c_parser_consume_token (parser);
break;
case RID_AUTO_TYPE:
if (!auto_type_ok)
goto out;
/* Fall through. */
case RID_UNSIGNED:
case RID_LONG:
case RID_SHORT:
case RID_SIGNED:
case RID_COMPLEX:
case RID_INT:
case RID_CHAR:
case RID_FLOAT:
case RID_DOUBLE:
case RID_VOID:
case RID_DFLOAT32:
case RID_DFLOAT64:
case RID_DFLOAT128:
CASE_RID_FLOATN_NX:
case RID_BOOL:
case RID_FRACT:
case RID_ACCUM:
case RID_SAT:
case RID_INT_N_0:
case RID_INT_N_1:
case RID_INT_N_2:
case RID_INT_N_3:
if (!typespec_ok)
goto out;
attrs_ok = true;
seen_type = true;
if (c_dialect_objc ())
parser->objc_need_raw_identifier = true;
t.kind = ctsk_resword;
t.spec = c_parser_peek_token (parser)->value;
t.expr = NULL_TREE;
t.expr_const_operands = true;
declspecs_add_type (loc, specs, t);
c_parser_consume_token (parser);
break;
case RID_ENUM:
if (!typespec_ok)
goto out;
attrs_ok = true;
seen_type = true;
t = c_parser_enum_specifier (parser);
invoke_plugin_callbacks (PLUGIN_FINISH_TYPE, t.spec);
declspecs_add_type (loc, specs, t);
break;
case RID_STRUCT:
case RID_UNION:
if (!typespec_ok)
goto out;
attrs_ok = true;
seen_type = true;
t = c_parser_struct_or_union_specifier (parser);
invoke_plugin_callbacks (PLUGIN_FINISH_TYPE, t.spec);
declspecs_add_type (loc, specs, t);
break;
case RID_TYPEOF:
/* ??? The old parser rejected typeof after other type
specifiers, but is a syntax error the best way of
handling this? */
if (!typespec_ok || seen_type)
goto out;
attrs_ok = true;
seen_type = true;
t = c_parser_typeof_specifier (parser);
declspecs_add_type (loc, specs, t);
break;
case RID_ATOMIC:
/* C parser handling of Objective-C constructs needs
checking for correct lvalue-to-rvalue conversions, and
the code in build_modify_expr handling various
Objective-C cases, and that in build_unary_op handling
Objective-C cases for increment / decrement, also needs
updating; uses of TYPE_MAIN_VARIANT in objc_compare_types
and objc_types_are_equivalent may also need updates. */
if (c_dialect_objc ())
sorry ("%<_Atomic%> in Objective-C");
if (flag_isoc99)
pedwarn_c99 (loc, OPT_Wpedantic,
"ISO C99 does not support the %<_Atomic%> qualifier");
else
pedwarn_c99 (loc, OPT_Wpedantic,
"ISO C90 does not support the %<_Atomic%> qualifier");
attrs_ok = true;
tree value;
value = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
if (typespec_ok && c_parser_next_token_is (parser, CPP_OPEN_PAREN))
{
/* _Atomic ( type-name ). */
seen_type = true;
c_parser_consume_token (parser);
struct c_type_name *type = c_parser_type_name (parser);
t.kind = ctsk_typeof;
t.spec = error_mark_node;
t.expr = NULL_TREE;
t.expr_const_operands = true;
if (type != NULL)
t.spec = groktypename (type, &t.expr,
&t.expr_const_operands);
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<)%>");
if (t.spec != error_mark_node)
{
if (TREE_CODE (t.spec) == ARRAY_TYPE)
error_at (loc, "%<_Atomic%>-qualified array type");
else if (TREE_CODE (t.spec) == FUNCTION_TYPE)
error_at (loc, "%<_Atomic%>-qualified function type");
else if (TYPE_QUALS (t.spec) != TYPE_UNQUALIFIED)
error_at (loc, "%<_Atomic%> applied to a qualified type");
else
t.spec = c_build_qualified_type (t.spec, TYPE_QUAL_ATOMIC);
}
declspecs_add_type (loc, specs, t);
}
else
declspecs_add_qual (loc, specs, value);
break;
case RID_CONST:
case RID_VOLATILE:
case RID_RESTRICT:
attrs_ok = true;
declspecs_add_qual (loc, specs, c_parser_peek_token (parser)->value);
c_parser_consume_token (parser);
break;
case RID_ATTRIBUTE:
if (!attrs_ok)
goto out;
attrs = c_parser_gnu_attributes (parser);
declspecs_add_attrs (loc, specs, attrs);
break;
case RID_ALIGNAS:
if (!alignspec_ok)
goto out;
align = c_parser_alignas_specifier (parser);
declspecs_add_alignas (loc, specs, align);
break;
case RID_GIMPLE:
if (! flag_gimple)
error_at (loc, "%<__GIMPLE%> only valid with %<-fgimple%>");
c_parser_consume_token (parser);
specs->declspec_il = cdil_gimple;
specs->locations[cdw_gimple] = loc;
c_parser_gimple_or_rtl_pass_list (parser, specs);
break;
case RID_RTL:
c_parser_consume_token (parser);
specs->declspec_il = cdil_rtl;
specs->locations[cdw_rtl] = loc;
c_parser_gimple_or_rtl_pass_list (parser, specs);
break;
default:
goto out;
}
}
out:
if (end_std_attr_ok
&& c_parser_nth_token_starts_std_attributes (parser, 1))
specs->postfix_attrs = c_parser_std_attribute_specifier_sequence (parser);
}
/* Parse an enum specifier (C90 6.5.2.2, C99 6.7.2.2, C11 6.7.2.2).
enum-specifier:
enum gnu-attributes[opt] identifier[opt] { enumerator-list }
gnu-attributes[opt]
enum gnu-attributes[opt] identifier[opt] { enumerator-list , }
gnu-attributes[opt]
enum gnu-attributes[opt] identifier
The form with trailing comma is new in C99. The forms with
gnu-attributes are GNU extensions. In GNU C, we accept any expression
without commas in the syntax (assignment expressions, not just
conditional expressions); assignment expressions will be diagnosed
as non-constant.
enumerator-list:
enumerator
enumerator-list , enumerator
enumerator:
enumeration-constant attribute-specifier-sequence[opt]
enumeration-constant attribute-specifier-sequence[opt]
= constant-expression
GNU Extensions:
enumerator:
enumeration-constant attribute-specifier-sequence[opt] gnu-attributes[opt]
enumeration-constant attribute-specifier-sequence[opt] gnu-attributes[opt]
= constant-expression
*/
static struct c_typespec
c_parser_enum_specifier (c_parser *parser)
{
struct c_typespec ret;
bool have_std_attrs;
tree std_attrs = NULL_TREE;
tree attrs;
tree ident = NULL_TREE;
location_t enum_loc;
location_t ident_loc = UNKNOWN_LOCATION; /* Quiet warning. */
gcc_assert (c_parser_next_token_is_keyword (parser, RID_ENUM));
c_parser_consume_token (parser);
have_std_attrs = c_parser_nth_token_starts_std_attributes (parser, 1);
if (have_std_attrs)
std_attrs = c_parser_std_attribute_specifier_sequence (parser);
attrs = c_parser_gnu_attributes (parser);
enum_loc = c_parser_peek_token (parser)->location;
/* Set the location in case we create a decl now. */
c_parser_set_source_position_from_token (c_parser_peek_token (parser));
if (c_parser_next_token_is (parser, CPP_NAME))
{
ident = c_parser_peek_token (parser)->value;
ident_loc = c_parser_peek_token (parser)->location;
enum_loc = ident_loc;
c_parser_consume_token (parser);
}
if (c_parser_next_token_is (parser, CPP_OPEN_BRACE))
{
/* Parse an enum definition. */
struct c_enum_contents the_enum;
tree type;
tree postfix_attrs;
/* We chain the enumerators in reverse order, then put them in
forward order at the end. */
tree values;
timevar_push (TV_PARSE_ENUM);
type = start_enum (enum_loc, &the_enum, ident);
values = NULL_TREE;
c_parser_consume_token (parser);
while (true)
{
tree enum_id;
tree enum_value;
tree enum_decl;
bool seen_comma;
c_token *token;
location_t comma_loc = UNKNOWN_LOCATION; /* Quiet warning. */
location_t decl_loc, value_loc;
if (c_parser_next_token_is_not (parser, CPP_NAME))
{
/* Give a nicer error for "enum {}". */
if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE)
&& !parser->error)
{
error_at (c_parser_peek_token (parser)->location,
"empty enum is invalid");
parser->error = true;
}
else
c_parser_error (parser, "expected identifier");
c_parser_skip_until_found (parser, CPP_CLOSE_BRACE, NULL);
values = error_mark_node;
break;
}
token = c_parser_peek_token (parser);
enum_id = token->value;
/* Set the location in case we create a decl now. */
c_parser_set_source_position_from_token (token);
decl_loc = value_loc = token->location;
c_parser_consume_token (parser);
/* Parse any specified attributes. */
tree std_attrs = NULL_TREE;
if (c_parser_nth_token_starts_std_attributes (parser, 1))
std_attrs = c_parser_std_attribute_specifier_sequence (parser);
tree enum_attrs = chainon (std_attrs,
c_parser_gnu_attributes (parser));
if (c_parser_next_token_is (parser, CPP_EQ))
{
c_parser_consume_token (parser);
value_loc = c_parser_peek_token (parser)->location;
enum_value = c_parser_expr_no_commas (parser, NULL).value;
}
else
enum_value = NULL_TREE;
enum_decl = build_enumerator (decl_loc, value_loc,
&the_enum, enum_id, enum_value);
if (enum_attrs)
decl_attributes (&TREE_PURPOSE (enum_decl), enum_attrs, 0);
TREE_CHAIN (enum_decl) = values;
values = enum_decl;
seen_comma = false;
if (c_parser_next_token_is (parser, CPP_COMMA))
{
comma_loc = c_parser_peek_token (parser)->location;
seen_comma = true;
c_parser_consume_token (parser);
}
if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE))
{
if (seen_comma)
pedwarn_c90 (comma_loc, OPT_Wpedantic,
"comma at end of enumerator list");
c_parser_consume_token (parser);
break;
}
if (!seen_comma)
{
c_parser_error (parser, "expected %<,%> or %<}%>");
c_parser_skip_until_found (parser, CPP_CLOSE_BRACE, NULL);
values = error_mark_node;
break;
}
}
postfix_attrs = c_parser_gnu_attributes (parser);
ret.spec = finish_enum (type, nreverse (values),
chainon (std_attrs,
chainon (attrs, postfix_attrs)));
ret.kind = ctsk_tagdef;
ret.expr = NULL_TREE;
ret.expr_const_operands = true;
timevar_pop (TV_PARSE_ENUM);
return ret;
}
else if (!ident)
{
c_parser_error (parser, "expected %<{%>");
ret.spec = error_mark_node;
ret.kind = ctsk_tagref;
ret.expr = NULL_TREE;
ret.expr_const_operands = true;
return ret;
}
/* Attributes may only appear when the members are defined or in
certain forward declarations (treat enum forward declarations in
GNU C analogously to struct and union forward declarations in
standard C). */
if (have_std_attrs && c_parser_next_token_is_not (parser, CPP_SEMICOLON))
c_parser_error (parser, "expected %<;%>");
ret = parser_xref_tag (ident_loc, ENUMERAL_TYPE, ident, have_std_attrs,
std_attrs);
/* In ISO C, enumerated types can be referred to only if already
defined. */
if (pedantic && !COMPLETE_TYPE_P (ret.spec))
{
gcc_assert (ident);
pedwarn (enum_loc, OPT_Wpedantic,
"ISO C forbids forward references to %<enum%> types");
}
return ret;
}
/* Parse a struct or union specifier (C90 6.5.2.1, C99 6.7.2.1, C11 6.7.2.1).
struct-or-union-specifier:
struct-or-union attribute-specifier-sequence[opt] gnu-attributes[opt]
identifier[opt] { struct-contents } gnu-attributes[opt]
struct-or-union attribute-specifier-sequence[opt] gnu-attributes[opt]
identifier
struct-contents:
struct-declaration-list
struct-declaration-list:
struct-declaration ;
struct-declaration-list struct-declaration ;
GNU extensions:
struct-contents:
empty
struct-declaration
struct-declaration-list struct-declaration
struct-declaration-list:
struct-declaration-list ;
;
(Note that in the syntax here, unlike that in ISO C, the semicolons
are included here rather than in struct-declaration, in order to
describe the syntax with extra semicolons and missing semicolon at
end.)
Objective-C:
struct-declaration-list:
@defs ( class-name )
(Note this does not include a trailing semicolon, but can be
followed by further declarations, and gets a pedwarn-if-pedantic
when followed by a semicolon.) */
static struct c_typespec
c_parser_struct_or_union_specifier (c_parser *parser)
{
struct c_typespec ret;
bool have_std_attrs;
tree std_attrs = NULL_TREE;
tree attrs;
tree ident = NULL_TREE;
location_t struct_loc;
location_t ident_loc = UNKNOWN_LOCATION;
enum tree_code code;
switch (c_parser_peek_token (parser)->keyword)
{
case RID_STRUCT:
code = RECORD_TYPE;
break;
case RID_UNION:
code = UNION_TYPE;
break;
default:
gcc_unreachable ();
}
struct_loc = c_parser_peek_token (parser)->location;
c_parser_consume_token (parser);
have_std_attrs = c_parser_nth_token_starts_std_attributes (parser, 1);
if (have_std_attrs)
std_attrs = c_parser_std_attribute_specifier_sequence (parser);
attrs = c_parser_gnu_attributes (parser);
/* Set the location in case we create a decl now. */
c_parser_set_source_position_from_token (c_parser_peek_token (parser));
if (c_parser_next_token_is (parser, CPP_NAME))
{
ident = c_parser_peek_token (parser)->value;
ident_loc = c_parser_peek_token (parser)->location;
struct_loc = ident_loc;
c_parser_consume_token (parser);
}
if (c_parser_next_token_is (parser, CPP_OPEN_BRACE))
{
/* Parse a struct or union definition. Start the scope of the
tag before parsing components. */
class c_struct_parse_info *struct_info;
tree type = start_struct (struct_loc, code, ident, &struct_info);
tree postfix_attrs;
/* We chain the components in reverse order, then put them in
forward order at the end. Each struct-declaration may
declare multiple components (comma-separated), so we must use
chainon to join them, although when parsing each
struct-declaration we can use TREE_CHAIN directly.
The theory behind all this is that there will be more
semicolon separated fields than comma separated fields, and
so we'll be minimizing the number of node traversals required
by chainon. */
tree contents;
timevar_push (TV_PARSE_STRUCT);
contents = NULL_TREE;
c_parser_consume_token (parser);
/* Handle the Objective-C @defs construct,
e.g. foo(sizeof(struct{ @defs(ClassName) }));. */
if (c_parser_next_token_is_keyword (parser, RID_AT_DEFS))
{
tree name;
gcc_assert (c_dialect_objc ());
c_parser_consume_token (parser);
matching_parens parens;
if (!parens.require_open (parser))
goto end_at_defs;
if (c_parser_next_token_is (parser, CPP_NAME)
&& c_parser_peek_token (parser)->id_kind == C_ID_CLASSNAME)
{
name = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
}
else
{
c_parser_error (parser, "expected class name");
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL);
goto end_at_defs;
}
parens.skip_until_found_close (parser);
contents = nreverse (objc_get_class_ivars (name));
}
end_at_defs:
/* Parse the struct-declarations and semicolons. Problems with
semicolons are diagnosed here; empty structures are diagnosed
elsewhere. */
while (true)
{
tree decls;
/* Parse any stray semicolon. */
if (c_parser_next_token_is (parser, CPP_SEMICOLON))
{
location_t semicolon_loc
= c_parser_peek_token (parser)->location;
gcc_rich_location richloc (semicolon_loc);
richloc.add_fixit_remove ();
pedwarn (&richloc, OPT_Wpedantic,
"extra semicolon in struct or union specified");
c_parser_consume_token (parser);
continue;
}
/* Stop if at the end of the struct or union contents. */
if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE))
{
c_parser_consume_token (parser);
break;
}
/* Accept #pragmas at struct scope. */
if (c_parser_next_token_is (parser, CPP_PRAGMA))
{
c_parser_pragma (parser, pragma_struct, NULL);
continue;
}
/* Parse some comma-separated declarations, but not the
trailing semicolon if any. */
decls = c_parser_struct_declaration (parser);
contents = chainon (decls, contents);
/* If no semicolon follows, either we have a parse error or
are at the end of the struct or union and should
pedwarn. */
if (c_parser_next_token_is (parser, CPP_SEMICOLON))
c_parser_consume_token (parser);
else
{
if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE))
pedwarn (c_parser_peek_token (parser)->location, 0,
"no semicolon at end of struct or union");
else if (parser->error
|| !c_parser_next_token_starts_declspecs (parser))
{
c_parser_error (parser, "expected %<;%>");
c_parser_skip_until_found (parser, CPP_CLOSE_BRACE, NULL);
break;
}
/* If we come here, we have already emitted an error
for an expected `;', identifier or `(', and we also
recovered already. Go on with the next field. */
}
}
postfix_attrs = c_parser_gnu_attributes (parser);
ret.spec = finish_struct (struct_loc, type, nreverse (contents),
chainon (std_attrs,
chainon (attrs, postfix_attrs)),
struct_info);
ret.kind = ctsk_tagdef;
ret.expr = NULL_TREE;
ret.expr_const_operands = true;
timevar_pop (TV_PARSE_STRUCT);
return ret;
}
else if (!ident)
{
c_parser_error (parser, "expected %<{%>");
ret.spec = error_mark_node;
ret.kind = ctsk_tagref;
ret.expr = NULL_TREE;
ret.expr_const_operands = true;
return ret;
}
/* Attributes may only appear when the members are defined or in
certain forward declarations. */
if (have_std_attrs && c_parser_next_token_is_not (parser, CPP_SEMICOLON))
c_parser_error (parser, "expected %<;%>");
/* ??? Existing practice is that GNU attributes are ignored after
the struct or union keyword when not defining the members. */
ret = parser_xref_tag (ident_loc, code, ident, have_std_attrs, std_attrs);
return ret;
}
/* Parse a struct-declaration (C90 6.5.2.1, C99 6.7.2.1, C11 6.7.2.1),
*without* the trailing semicolon.
struct-declaration:
attribute-specifier-sequence[opt] specifier-qualifier-list
attribute-specifier-sequence[opt] struct-declarator-list
static_assert-declaration-no-semi
specifier-qualifier-list:
type-specifier specifier-qualifier-list[opt]
type-qualifier specifier-qualifier-list[opt]
alignment-specifier specifier-qualifier-list[opt]
gnu-attributes specifier-qualifier-list[opt]
struct-declarator-list:
struct-declarator
struct-declarator-list , gnu-attributes[opt] struct-declarator
struct-declarator:
declarator gnu-attributes[opt]
declarator[opt] : constant-expression gnu-attributes[opt]
GNU extensions:
struct-declaration:
__extension__ struct-declaration
specifier-qualifier-list
Unlike the ISO C syntax, semicolons are handled elsewhere. The use
of gnu-attributes where shown is a GNU extension. In GNU C, we accept
any expression without commas in the syntax (assignment
expressions, not just conditional expressions); assignment
expressions will be diagnosed as non-constant. */
static tree
c_parser_struct_declaration (c_parser *parser)
{
struct c_declspecs *specs;
tree prefix_attrs;
tree all_prefix_attrs;
tree decls;
location_t decl_loc;
if (c_parser_next_token_is_keyword (parser, RID_EXTENSION))
{
int ext;
tree decl;
ext = disable_extension_diagnostics ();
c_parser_consume_token (parser);
decl = c_parser_struct_declaration (parser);
restore_extension_diagnostics (ext);
return decl;
}
if (c_parser_next_token_is_keyword (parser, RID_STATIC_ASSERT))
{
c_parser_static_assert_declaration_no_semi (parser);
return NULL_TREE;
}
specs = build_null_declspecs ();
decl_loc = c_parser_peek_token (parser)->location;
/* Strictly by the standard, we shouldn't allow _Alignas here,
but it appears to have been intended to allow it there, so
we're keeping it as it is until WG14 reaches a conclusion
of N1731.
<http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1731.pdf> */
c_parser_declspecs (parser, specs, false, true, true,
true, false, true, true, cla_nonabstract_decl);
if (parser->error)
return NULL_TREE;
if (!specs->declspecs_seen_p)
{
c_parser_error (parser, "expected specifier-qualifier-list");
return NULL_TREE;
}
finish_declspecs (specs);
if (c_parser_next_token_is (parser, CPP_SEMICOLON)
|| c_parser_next_token_is (parser, CPP_CLOSE_BRACE))
{
tree ret;
if (specs->typespec_kind == ctsk_none)
{
pedwarn (decl_loc, OPT_Wpedantic,
"ISO C forbids member declarations with no members");
shadow_tag_warned (specs, pedantic);
ret = NULL_TREE;
}
else
{
/* Support for unnamed structs or unions as members of
structs or unions (which is [a] useful and [b] supports
MS P-SDK). */
tree attrs = NULL;
ret = grokfield (c_parser_peek_token (parser)->location,
build_id_declarator (NULL_TREE), specs,
NULL_TREE, &attrs);
if (ret)
decl_attributes (&ret, attrs, 0);
}
return ret;
}
/* Provide better error recovery. Note that a type name here is valid,
and will be treated as a field name. */
if (specs->typespec_kind == ctsk_tagdef
&& TREE_CODE (specs->type) != ENUMERAL_TYPE
&& c_parser_next_token_starts_declspecs (parser)
&& !c_parser_next_token_is (parser, CPP_NAME))
{
c_parser_error (parser, "expected %<;%>, identifier or %<(%>");
parser->error = false;
return NULL_TREE;
}
pending_xref_error ();
prefix_attrs = specs->attrs;
all_prefix_attrs = prefix_attrs;
specs->attrs = NULL_TREE;
decls = NULL_TREE;
while (true)
{
/* Declaring one or more declarators or un-named bit-fields. */
struct c_declarator *declarator;
bool dummy = false;
if (c_parser_next_token_is (parser, CPP_COLON))
declarator = build_id_declarator (NULL_TREE);
else
declarator = c_parser_declarator (parser,
specs->typespec_kind != ctsk_none,
C_DTR_NORMAL, &dummy);
if (declarator == NULL)
{
c_parser_skip_to_end_of_block_or_statement (parser);
break;
}
if (c_parser_next_token_is (parser, CPP_COLON)
|| c_parser_next_token_is (parser, CPP_COMMA)
|| c_parser_next_token_is (parser, CPP_SEMICOLON)
|| c_parser_next_token_is (parser, CPP_CLOSE_BRACE)
|| c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE))
{
tree postfix_attrs = NULL_TREE;
tree width = NULL_TREE;
tree d;
if (c_parser_next_token_is (parser, CPP_COLON))
{
c_parser_consume_token (parser);
width = c_parser_expr_no_commas (parser, NULL).value;
}
if (c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE))
postfix_attrs = c_parser_gnu_attributes (parser);
d = grokfield (c_parser_peek_token (parser)->location,
declarator, specs, width, &all_prefix_attrs);
decl_attributes (&d, chainon (postfix_attrs,
all_prefix_attrs), 0);
DECL_CHAIN (d) = decls;
decls = d;
if (c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE))
all_prefix_attrs = chainon (c_parser_gnu_attributes (parser),
prefix_attrs);
else
all_prefix_attrs = prefix_attrs;
if (c_parser_next_token_is (parser, CPP_COMMA))
c_parser_consume_token (parser);
else if (c_parser_next_token_is (parser, CPP_SEMICOLON)
|| c_parser_next_token_is (parser, CPP_CLOSE_BRACE))
{
/* Semicolon consumed in caller. */
break;
}
else
{
c_parser_error (parser, "expected %<,%>, %<;%> or %<}%>");
break;
}
}
else
{
c_parser_error (parser,
"expected %<:%>, %<,%>, %<;%>, %<}%> or "
"%<__attribute__%>");
break;
}
}
return decls;
}
/* Parse a typeof specifier (a GNU extension).
typeof-specifier:
typeof ( expression )
typeof ( type-name )
*/
static struct c_typespec
c_parser_typeof_specifier (c_parser *parser)
{
struct c_typespec ret;
ret.kind = ctsk_typeof;
ret.spec = error_mark_node;
ret.expr = NULL_TREE;
ret.expr_const_operands = true;
gcc_assert (c_parser_next_token_is_keyword (parser, RID_TYPEOF));
c_parser_consume_token (parser);
c_inhibit_evaluation_warnings++;
in_typeof++;
matching_parens parens;
if (!parens.require_open (parser))
{
c_inhibit_evaluation_warnings--;
in_typeof--;
return ret;
}
if (c_parser_next_tokens_start_typename (parser, cla_prefer_id))
{
struct c_type_name *type = c_parser_type_name (parser);
c_inhibit_evaluation_warnings--;
in_typeof--;
if (type != NULL)
{
ret.spec = groktypename (type, &ret.expr, &ret.expr_const_operands);
pop_maybe_used (variably_modified_type_p (ret.spec, NULL_TREE));
}
}
else
{
bool was_vm;
location_t here = c_parser_peek_token (parser)->location;
struct c_expr expr = c_parser_expression (parser);
c_inhibit_evaluation_warnings--;
in_typeof--;
if (TREE_CODE (expr.value) == COMPONENT_REF
&& DECL_C_BIT_FIELD (TREE_OPERAND (expr.value, 1)))
error_at (here, "%<typeof%> applied to a bit-field");
mark_exp_read (expr.value);
ret.spec = TREE_TYPE (expr.value);
was_vm = variably_modified_type_p (ret.spec, NULL_TREE);
/* This is returned with the type so that when the type is
evaluated, this can be evaluated. */
if (was_vm)
ret.expr = c_fully_fold (expr.value, false, &ret.expr_const_operands);
pop_maybe_used (was_vm);
/* For use in macros such as those in <stdatomic.h>, remove all
qualifiers from atomic types. (const can be an issue for more macros
using typeof than just the <stdatomic.h> ones.) */
if (ret.spec != error_mark_node && TYPE_ATOMIC (ret.spec))
ret.spec = c_build_qualified_type (ret.spec, TYPE_UNQUALIFIED);
}
parens.skip_until_found_close (parser);
return ret;
}
/* Parse an alignment-specifier.
C11 6.7.5:
alignment-specifier:
_Alignas ( type-name )
_Alignas ( constant-expression )
*/
static tree
c_parser_alignas_specifier (c_parser * parser)
{
tree ret = error_mark_node;
location_t loc = c_parser_peek_token (parser)->location;
gcc_assert (c_parser_next_token_is_keyword (parser, RID_ALIGNAS));
c_parser_consume_token (parser);
if (flag_isoc99)
pedwarn_c99 (loc, OPT_Wpedantic,
"ISO C99 does not support %<_Alignas%>");
else
pedwarn_c99 (loc, OPT_Wpedantic,
"ISO C90 does not support %<_Alignas%>");
matching_parens parens;
if (!parens.require_open (parser))
return ret;
if (c_parser_next_tokens_start_typename (parser, cla_prefer_id))
{
struct c_type_name *type = c_parser_type_name (parser);
if (type != NULL)
ret = c_sizeof_or_alignof_type (loc, groktypename (type, NULL, NULL),
false, true, 1);
}
else
ret = c_parser_expr_no_commas (parser, NULL).value;
parens.skip_until_found_close (parser);
return ret;
}
/* Parse a declarator, possibly an abstract declarator (C90 6.5.4,
6.5.5, C99 6.7.5, 6.7.6, C11 6.7.6, 6.7.7). If TYPE_SEEN_P then
a typedef name may be redeclared; otherwise it may not. KIND
indicates which kind of declarator is wanted. Returns a valid
declarator except in the case of a syntax error in which case NULL is
returned. *SEEN_ID is set to true if an identifier being declared is
seen; this is used to diagnose bad forms of abstract array declarators
and to determine whether an identifier list is syntactically permitted.
declarator:
pointer[opt] direct-declarator
direct-declarator:
identifier
( gnu-attributes[opt] declarator )
direct-declarator array-declarator
direct-declarator ( parameter-type-list )
direct-declarator ( identifier-list[opt] )
pointer:
* type-qualifier-list[opt]
* type-qualifier-list[opt] pointer
type-qualifier-list:
type-qualifier
gnu-attributes
type-qualifier-list type-qualifier
type-qualifier-list gnu-attributes
array-declarator:
[ type-qualifier-list[opt] assignment-expression[opt] ]
[ static type-qualifier-list[opt] assignment-expression ]
[ type-qualifier-list static assignment-expression ]
[ type-qualifier-list[opt] * ]
parameter-type-list:
parameter-list
parameter-list , ...
parameter-list:
parameter-declaration
parameter-list , parameter-declaration
parameter-declaration:
declaration-specifiers declarator gnu-attributes[opt]
declaration-specifiers abstract-declarator[opt] gnu-attributes[opt]
identifier-list:
identifier
identifier-list , identifier
abstract-declarator:
pointer
pointer[opt] direct-abstract-declarator
direct-abstract-declarator:
( gnu-attributes[opt] abstract-declarator )
direct-abstract-declarator[opt] array-declarator
direct-abstract-declarator[opt] ( parameter-type-list[opt] )
GNU extensions:
direct-declarator:
direct-declarator ( parameter-forward-declarations
parameter-type-list[opt] )
direct-abstract-declarator:
direct-abstract-declarator[opt] ( parameter-forward-declarations
parameter-type-list[opt] )
parameter-forward-declarations:
parameter-list ;
parameter-forward-declarations parameter-list ;
The uses of gnu-attributes shown above are GNU extensions.
Some forms of array declarator are not included in C99 in the
syntax for abstract declarators; these are disallowed elsewhere.
This may be a defect (DR#289).
This function also accepts an omitted abstract declarator as being
an abstract declarator, although not part of the formal syntax. */
struct c_declarator *
c_parser_declarator (c_parser *parser, bool type_seen_p, c_dtr_syn kind,
bool *seen_id)
{
/* Parse any initial pointer part. */
if (c_parser_next_token_is (parser, CPP_MULT))
{
struct c_declspecs *quals_attrs = build_null_declspecs ();
struct c_declarator *inner;
c_parser_consume_token (parser);
c_parser_declspecs (parser, quals_attrs, false, false, true,
false, false, true, false, cla_prefer_id);
inner = c_parser_declarator (parser, type_seen_p, kind, seen_id);
if (inner == NULL)
return NULL;
else
return make_pointer_declarator (quals_attrs, inner);
}
/* Now we have a direct declarator, direct abstract declarator or
nothing (which counts as a direct abstract declarator here). */
return c_parser_direct_declarator (parser, type_seen_p, kind, seen_id);
}
/* Parse a direct declarator or direct abstract declarator; arguments
as c_parser_declarator. */
static struct c_declarator *
c_parser_direct_declarator (c_parser *parser, bool type_seen_p, c_dtr_syn kind,
bool *seen_id)
{
/* The direct declarator must start with an identifier (possibly
omitted) or a parenthesized declarator (possibly abstract). In
an ordinary declarator, initial parentheses must start a
parenthesized declarator. In an abstract declarator or parameter
declarator, they could start a parenthesized declarator or a
parameter list. To tell which, the open parenthesis and any
following gnu-attributes must be read. If a declaration
specifier or standard attributes follow, then it is a parameter
list; if the specifier is a typedef name, there might be an
ambiguity about redeclaring it, which is resolved in the
direction of treating it as a typedef name. If a close
parenthesis follows, it is also an empty parameter list, as the
syntax does not permit empty abstract declarators. Otherwise, it
is a parenthesized declarator (in which case the analysis may be
repeated inside it, recursively).
??? There is an ambiguity in a parameter declaration "int
(__attribute__((foo)) x)", where x is not a typedef name: it
could be an abstract declarator for a function, or declare x with
parentheses. The proper resolution of this ambiguity needs
documenting. At present we follow an accident of the old
parser's implementation, whereby the first parameter must have
some declaration specifiers other than just gnu-attributes. Thus as
a parameter declaration it is treated as a parenthesized
parameter named x, and as an abstract declarator it is
rejected.
??? Also following the old parser, gnu-attributes inside an empty
parameter list are ignored, making it a list not yielding a
prototype, rather than giving an error or making it have one
parameter with implicit type int.
??? Also following the old parser, typedef names may be
redeclared in declarators, but not Objective-C class names. */
if (kind != C_DTR_ABSTRACT
&& c_parser_next_token_is (parser, CPP_NAME)
&& ((type_seen_p
&& (c_parser_peek_token (parser)->id_kind == C_ID_TYPENAME
|| c_parser_peek_token (parser)->id_kind == C_ID_CLASSNAME))
|| c_parser_peek_token (parser)->id_kind == C_ID_ID))
{
struct c_declarator *inner
= build_id_declarator (c_parser_peek_token (parser)->value);
*seen_id = true;
inner->id_loc = c_parser_peek_token (parser)->location;
c_parser_consume_token (parser);
if (c_parser_nth_token_starts_std_attributes (parser, 1))
inner->u.id.attrs = c_parser_std_attribute_specifier_sequence (parser);
return c_parser_direct_declarator_inner (parser, *seen_id, inner);
}
if (kind != C_DTR_NORMAL
&& c_parser_next_token_is (parser, CPP_OPEN_SQUARE)
&& !c_parser_nth_token_starts_std_attributes (parser, 1))
{
struct c_declarator *inner = build_id_declarator (NULL_TREE);
inner->id_loc = c_parser_peek_token (parser)->location;
return c_parser_direct_declarator_inner (parser, *seen_id, inner);
}
/* Either we are at the end of an abstract declarator, or we have
parentheses. */
if (c_parser_next_token_is (parser, CPP_OPEN_PAREN))
{
tree attrs;
struct c_declarator *inner;
c_parser_consume_token (parser);
bool have_gnu_attrs = c_parser_next_token_is_keyword (parser,
RID_ATTRIBUTE);
attrs = c_parser_gnu_attributes (parser);
if (kind != C_DTR_NORMAL
&& (c_parser_next_token_starts_declspecs (parser)
|| (!have_gnu_attrs
&& c_parser_nth_token_starts_std_attributes (parser, 1))
|| c_parser_next_token_is (parser, CPP_CLOSE_PAREN)))
{
struct c_arg_info *args
= c_parser_parms_declarator (parser, kind == C_DTR_NORMAL,
attrs, have_gnu_attrs);
if (args == NULL)
return NULL;
else
{
inner = build_id_declarator (NULL_TREE);
if (!(args->types
&& args->types != error_mark_node
&& TREE_CODE (TREE_VALUE (args->types)) == IDENTIFIER_NODE)
&& c_parser_nth_token_starts_std_attributes (parser, 1))
{
tree std_attrs
= c_parser_std_attribute_specifier_sequence (parser);
if (std_attrs)
inner = build_attrs_declarator (std_attrs, inner);
}
inner = build_function_declarator (args, inner);
return c_parser_direct_declarator_inner (parser, *seen_id,
inner);
}
}
/* A parenthesized declarator. */
inner = c_parser_declarator (parser, type_seen_p, kind, seen_id);
if (inner != NULL && attrs != NULL)
inner = build_attrs_declarator (attrs, inner);
if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN))
{
c_parser_consume_token (parser);
if (inner == NULL)
return NULL;
else
return c_parser_direct_declarator_inner (parser, *seen_id, inner);
}
else
{
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<)%>");
return NULL;
}
}
else
{
if (kind == C_DTR_NORMAL)
{
c_parser_error (parser, "expected identifier or %<(%>");
return NULL;
}
else
return build_id_declarator (NULL_TREE);
}
}
/* Parse part of a direct declarator or direct abstract declarator,
given that some (in INNER) has already been parsed; ID_PRESENT is
true if an identifier is present, false for an abstract
declarator. */
static struct c_declarator *
c_parser_direct_declarator_inner (c_parser *parser, bool id_present,
struct c_declarator *inner)
{
/* Parse a sequence of array declarators and parameter lists. */
if (c_parser_next_token_is (parser, CPP_OPEN_SQUARE)
&& !c_parser_nth_token_starts_std_attributes (parser, 1))
{
location_t brace_loc = c_parser_peek_token (parser)->location;
struct c_declarator *declarator;
struct c_declspecs *quals_attrs = build_null_declspecs ();
bool static_seen;
bool star_seen;
struct c_expr dimen;
dimen.value = NULL_TREE;
dimen.original_code = ERROR_MARK;
dimen.original_type = NULL_TREE;
c_parser_consume_token (parser);
c_parser_declspecs (parser, quals_attrs, false, false, true,
false, false, false, false, cla_prefer_id);
static_seen = c_parser_next_token_is_keyword (parser, RID_STATIC);
if (static_seen)
c_parser_consume_token (parser);
if (static_seen && !quals_attrs->declspecs_seen_p)
c_parser_declspecs (parser, quals_attrs, false, false, true,
false, false, false, false, cla_prefer_id);
if (!quals_attrs->declspecs_seen_p)
quals_attrs = NULL;
/* If "static" is present, there must be an array dimension.
Otherwise, there may be a dimension, "*", or no
dimension. */
if (static_seen)
{
star_seen = false;
dimen = c_parser_expr_no_commas (parser, NULL);
}
else
{
if (c_parser_next_token_is (parser, CPP_CLOSE_SQUARE))
{
dimen.value = NULL_TREE;
star_seen = false;
}
else if (c_parser_next_token_is (parser, CPP_MULT))
{
if (c_parser_peek_2nd_token (parser)->type == CPP_CLOSE_SQUARE)
{
dimen.value = NULL_TREE;
star_seen = true;
c_parser_consume_token (parser);
}
else
{
star_seen = false;
dimen = c_parser_expr_no_commas (parser, NULL);
}
}
else
{
star_seen = false;
dimen = c_parser_expr_no_commas (parser, NULL);
}
}
if (c_parser_next_token_is (parser, CPP_CLOSE_SQUARE))
c_parser_consume_token (parser);
else
{
c_parser_skip_until_found (parser, CPP_CLOSE_SQUARE,
"expected %<]%>");
return NULL;
}
if (dimen.value)
dimen = convert_lvalue_to_rvalue (brace_loc, dimen, true, true);
declarator = build_array_declarator (brace_loc, dimen.value, quals_attrs,
static_seen, star_seen);
if (declarator == NULL)
return NULL;
if (c_parser_nth_token_starts_std_attributes (parser, 1))
{
tree std_attrs
= c_parser_std_attribute_specifier_sequence (parser);
if (std_attrs)
inner = build_attrs_declarator (std_attrs, inner);
}
inner = set_array_declarator_inner (declarator, inner);
return c_parser_direct_declarator_inner (parser, id_present, inner);
}
else if (c_parser_next_token_is (parser, CPP_OPEN_PAREN))
{
tree attrs;
struct c_arg_info *args;
c_parser_consume_token (parser);
bool have_gnu_attrs = c_parser_next_token_is_keyword (parser,
RID_ATTRIBUTE);
attrs = c_parser_gnu_attributes (parser);
args = c_parser_parms_declarator (parser, id_present, attrs,
have_gnu_attrs);
if (args == NULL)
return NULL;
else
{
if (!(args->types
&& args->types != error_mark_node
&& TREE_CODE (TREE_VALUE (args->types)) == IDENTIFIER_NODE)
&& c_parser_nth_token_starts_std_attributes (parser, 1))
{
tree std_attrs
= c_parser_std_attribute_specifier_sequence (parser);
if (std_attrs)
inner = build_attrs_declarator (std_attrs, inner);
}
inner = build_function_declarator (args, inner);
return c_parser_direct_declarator_inner (parser, id_present, inner);
}
}
return inner;
}
/* Parse a parameter list or identifier list, including the closing
parenthesis but not the opening one. ATTRS are the gnu-attributes
at the start of the list. ID_LIST_OK is true if an identifier list
is acceptable; such a list must not have attributes at the start.
HAVE_GNU_ATTRS says whether any gnu-attributes (including empty
attributes) were present (in which case standard attributes cannot
occur). */
static struct c_arg_info *
c_parser_parms_declarator (c_parser *parser, bool id_list_ok, tree attrs,
bool have_gnu_attrs)
{
push_scope ();
declare_parm_level ();
/* If the list starts with an identifier, it is an identifier list.
Otherwise, it is either a prototype list or an empty list. */
if (id_list_ok
&& !attrs
&& c_parser_next_token_is (parser, CPP_NAME)
&& c_parser_peek_token (parser)->id_kind == C_ID_ID
/* Look ahead to detect typos in type names. */
&& c_parser_peek_2nd_token (parser)->type != CPP_NAME
&& c_parser_peek_2nd_token (parser)->type != CPP_MULT
&& c_parser_peek_2nd_token (parser)->type != CPP_OPEN_PAREN
&& c_parser_peek_2nd_token (parser)->type != CPP_OPEN_SQUARE
&& c_parser_peek_2nd_token (parser)->type != CPP_KEYWORD)
{
tree list = NULL_TREE, *nextp = &list;
while (c_parser_next_token_is (parser, CPP_NAME)
&& c_parser_peek_token (parser)->id_kind == C_ID_ID)
{
*nextp = build_tree_list (NULL_TREE,
c_parser_peek_token (parser)->value);
nextp = & TREE_CHAIN (*nextp);
c_parser_consume_token (parser);
if (c_parser_next_token_is_not (parser, CPP_COMMA))
break;
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN))
{
c_parser_error (parser, "expected identifier");
break;
}
}
if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN))
{
struct c_arg_info *ret = build_arg_info ();
ret->types = list;
c_parser_consume_token (parser);
pop_scope ();
return ret;
}
else
{
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<)%>");
pop_scope ();
return NULL;
}
}
else
{
struct c_arg_info *ret
= c_parser_parms_list_declarator (parser, attrs, NULL, have_gnu_attrs);
pop_scope ();
return ret;
}
}
/* Parse a parameter list (possibly empty), including the closing
parenthesis but not the opening one. ATTRS are the gnu-attributes
at the start of the list; if HAVE_GNU_ATTRS, there were some such
attributes (possibly empty, in which case ATTRS is NULL_TREE),
which means standard attributes cannot start the list. EXPR is
NULL or an expression that needs to be evaluated for the side
effects of array size expressions in the parameters. */
static struct c_arg_info *
c_parser_parms_list_declarator (c_parser *parser, tree attrs, tree expr,
bool have_gnu_attrs)
{
bool bad_parm = false;
/* ??? Following the old parser, forward parameter declarations may
use abstract declarators, and if no real parameter declarations
follow the forward declarations then this is not diagnosed. Also
note as above that gnu-attributes are ignored as the only contents of
the parentheses, or as the only contents after forward
declarations. */
if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN))
{
struct c_arg_info *ret = build_arg_info ();
c_parser_consume_token (parser);
return ret;
}
if (c_parser_next_token_is (parser, CPP_ELLIPSIS))
{
struct c_arg_info *ret = build_arg_info ();
if (flag_allow_parameterless_variadic_functions)
{
/* F (...) is allowed. */
ret->types = NULL_TREE;
}
else
{
/* Suppress -Wold-style-definition for this case. */
ret->types = error_mark_node;
error_at (c_parser_peek_token (parser)->location,
"ISO C requires a named argument before %<...%>");
}
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN))
{
c_parser_consume_token (parser);
return ret;
}
else
{
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<)%>");
return NULL;
}
}
/* Nonempty list of parameters, either terminated with semicolon
(forward declarations; recurse) or with close parenthesis (normal
function) or with ", ... )" (variadic function). */
while (true)
{
/* Parse a parameter. */
struct c_parm *parm = c_parser_parameter_declaration (parser, attrs,
have_gnu_attrs);
attrs = NULL_TREE;
have_gnu_attrs = false;
if (parm == NULL)
bad_parm = true;
else
push_parm_decl (parm, &expr);
if (c_parser_next_token_is (parser, CPP_SEMICOLON))
{
tree new_attrs;
c_parser_consume_token (parser);
mark_forward_parm_decls ();
bool new_have_gnu_attrs
= c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE);
new_attrs = c_parser_gnu_attributes (parser);
return c_parser_parms_list_declarator (parser, new_attrs, expr,
new_have_gnu_attrs);
}
if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN))
{
c_parser_consume_token (parser);
if (bad_parm)
return NULL;
else
return get_parm_info (false, expr);
}
if (!c_parser_require (parser, CPP_COMMA,
"expected %<;%>, %<,%> or %<)%>",
UNKNOWN_LOCATION, false))
{
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL);
return NULL;
}
if (c_parser_next_token_is (parser, CPP_ELLIPSIS))
{
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN))
{
c_parser_consume_token (parser);
if (bad_parm)
return NULL;
else
return get_parm_info (true, expr);
}
else
{
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<)%>");
return NULL;
}
}
}
}
/* Parse a parameter declaration. ATTRS are the gnu-attributes at the
start of the declaration if it is the first parameter;
HAVE_GNU_ATTRS is true if there were any gnu-attributes there (even
empty) there. */
static struct c_parm *
c_parser_parameter_declaration (c_parser *parser, tree attrs,
bool have_gnu_attrs)
{
struct c_declspecs *specs;
struct c_declarator *declarator;
tree prefix_attrs;
tree postfix_attrs = NULL_TREE;
bool dummy = false;
/* Accept #pragmas between parameter declarations. */
while (c_parser_next_token_is (parser, CPP_PRAGMA))
c_parser_pragma (parser, pragma_param, NULL);
if (!c_parser_next_token_starts_declspecs (parser)
&& !c_parser_nth_token_starts_std_attributes (parser, 1))
{
c_token *token = c_parser_peek_token (parser);
if (parser->error)
return NULL;
c_parser_set_source_position_from_token (token);
if (c_parser_next_tokens_start_typename (parser, cla_prefer_type))
{
auto_diagnostic_group d;
name_hint hint = lookup_name_fuzzy (token->value,
FUZZY_LOOKUP_TYPENAME,
token->location);
if (const char *suggestion = hint.suggestion ())
{
gcc_rich_location richloc (token->location);
richloc.add_fixit_replace (suggestion);
error_at (&richloc,
"unknown type name %qE; did you mean %qs?",
token->value, suggestion);
}
else
error_at (token->location, "unknown type name %qE", token->value);
parser->error = true;
}
/* ??? In some Objective-C cases '...' isn't applicable so there
should be a different message. */
else
c_parser_error (parser,
"expected declaration specifiers or %<...%>");
c_parser_skip_to_end_of_parameter (parser);
return NULL;
}
location_t start_loc = c_parser_peek_token (parser)->location;
specs = build_null_declspecs ();
if (attrs)
{
declspecs_add_attrs (input_location, specs, attrs);
attrs = NULL_TREE;
}
c_parser_declspecs (parser, specs, true, true, true, true, false,
!have_gnu_attrs, true, cla_nonabstract_decl);
finish_declspecs (specs);
pending_xref_error ();
prefix_attrs = specs->attrs;
specs->attrs = NULL_TREE;
declarator = c_parser_declarator (parser,
specs->typespec_kind != ctsk_none,
C_DTR_PARM, &dummy);
if (declarator == NULL)
{
c_parser_skip_until_found (parser, CPP_COMMA, NULL);
return NULL;
}
if (c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE))
postfix_attrs = c_parser_gnu_attributes (parser);
/* Generate a location for the parameter, ranging from the start of the
initial token to the end of the final token.
If we have a identifier, then use it for the caret location, e.g.
extern int callee (int one, int (*two)(int, int), float three);
~~~~~~^~~~~~~~~~~~~~
otherwise, reuse the start location for the caret location e.g.:
extern int callee (int one, int (*)(int, int), float three);
^~~~~~~~~~~~~~~~~
*/
location_t end_loc = parser->last_token_location;
/* Find any cdk_id declarator; determine if we have an identifier. */
c_declarator *id_declarator = declarator;
while (id_declarator && id_declarator->kind != cdk_id)
id_declarator = id_declarator->declarator;
location_t caret_loc = (id_declarator->u.id.id
? id_declarator->id_loc
: start_loc);
location_t param_loc = make_location (caret_loc, start_loc, end_loc);
return build_c_parm (specs, chainon (postfix_attrs, prefix_attrs),
declarator, param_loc);
}
/* Parse a string literal in an asm expression. It should not be
translated, and wide string literals are an error although
permitted by the syntax. This is a GNU extension.
asm-string-literal:
string-literal
*/
static tree
c_parser_asm_string_literal (c_parser *parser)
{
tree str;
int save_flag = warn_overlength_strings;
warn_overlength_strings = 0;
str = c_parser_string_literal (parser, false, false).value;
warn_overlength_strings = save_flag;
return str;
}
/* Parse a simple asm expression. This is used in restricted
contexts, where a full expression with inputs and outputs does not
make sense. This is a GNU extension.
simple-asm-expr:
asm ( asm-string-literal )
*/
static tree
c_parser_simple_asm_expr (c_parser *parser)
{
tree str;
gcc_assert (c_parser_next_token_is_keyword (parser, RID_ASM));
c_parser_consume_token (parser);
matching_parens parens;
if (!parens.require_open (parser))
return NULL_TREE;
str = c_parser_asm_string_literal (parser);
if (!parens.require_close (parser))
{
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL);
return NULL_TREE;
}
return str;
}
static tree
c_parser_gnu_attribute_any_word (c_parser *parser)
{
tree attr_name = NULL_TREE;
if (c_parser_next_token_is (parser, CPP_KEYWORD))
{
/* ??? See comment above about what keywords are accepted here. */
bool ok;
switch (c_parser_peek_token (parser)->keyword)
{
case RID_STATIC:
case RID_UNSIGNED:
case RID_LONG:
case RID_CONST:
case RID_EXTERN:
case RID_REGISTER:
case RID_TYPEDEF:
case RID_SHORT:
case RID_INLINE:
case RID_NORETURN:
case RID_VOLATILE:
case RID_SIGNED:
case RID_AUTO:
case RID_RESTRICT:
case RID_COMPLEX:
case RID_THREAD:
case RID_INT:
case RID_CHAR:
case RID_FLOAT:
case RID_DOUBLE:
case RID_VOID:
case RID_DFLOAT32:
case RID_DFLOAT64:
case RID_DFLOAT128:
CASE_RID_FLOATN_NX:
case RID_BOOL:
case RID_FRACT:
case RID_ACCUM:
case RID_SAT:
case RID_TRANSACTION_ATOMIC:
case RID_TRANSACTION_CANCEL:
case RID_ATOMIC:
case RID_AUTO_TYPE:
case RID_INT_N_0:
case RID_INT_N_1:
case RID_INT_N_2:
case RID_INT_N_3:
ok = true;
break;
default:
ok = false;
break;
}
if (!ok)
return NULL_TREE;
/* Accept __attribute__((__const)) as __attribute__((const)) etc. */
attr_name = ridpointers[(int) c_parser_peek_token (parser)->keyword];
}
else if (c_parser_next_token_is (parser, CPP_NAME))
attr_name = c_parser_peek_token (parser)->value;
return attr_name;
}
/* Parse attribute arguments. This is a common form of syntax
covering all currently valid GNU and standard attributes.
gnu-attribute-arguments:
identifier
identifier , nonempty-expr-list
expr-list
where the "identifier" must not be declared as a type. ??? Why not
allow identifiers declared as types to start the arguments? */
static tree
c_parser_attribute_arguments (c_parser *parser, bool takes_identifier,
bool require_string, bool allow_empty_args)
{
vec<tree, va_gc> *expr_list;
tree attr_args;
/* Parse the attribute contents. If they start with an
identifier which is followed by a comma or close
parenthesis, then the arguments start with that
identifier; otherwise they are an expression list.
In objective-c the identifier may be a classname. */
if (c_parser_next_token_is (parser, CPP_NAME)
&& (c_parser_peek_token (parser)->id_kind == C_ID_ID
|| (c_dialect_objc ()
&& c_parser_peek_token (parser)->id_kind
== C_ID_CLASSNAME))
&& ((c_parser_peek_2nd_token (parser)->type == CPP_COMMA)
|| (c_parser_peek_2nd_token (parser)->type
== CPP_CLOSE_PAREN))
&& (takes_identifier
|| (c_dialect_objc ()
&& c_parser_peek_token (parser)->id_kind
== C_ID_CLASSNAME)))
{
tree arg1 = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN))
attr_args = build_tree_list (NULL_TREE, arg1);
else
{
tree tree_list;
c_parser_consume_token (parser);
expr_list = c_parser_expr_list (parser, false, true,
NULL, NULL, NULL, NULL);
tree_list = build_tree_list_vec (expr_list);
attr_args = tree_cons (NULL_TREE, arg1, tree_list);
release_tree_vector (expr_list);
}
}
else
{
if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN))
{
if (!allow_empty_args)
error_at (c_parser_peek_token (parser)->location,
"parentheses must be omitted if "
"attribute argument list is empty");
attr_args = NULL_TREE;
}
else if (require_string)
{
/* The only valid argument for this attribute is a string
literal. Handle this specially here to avoid accepting
string literals with excess parentheses. */
tree string = c_parser_string_literal (parser, false, true).value;
attr_args = build_tree_list (NULL_TREE, string);
}
else
{
expr_list = c_parser_expr_list (parser, false, true,
NULL, NULL, NULL, NULL);
attr_args = build_tree_list_vec (expr_list);
release_tree_vector (expr_list);
}
}
return attr_args;
}
/* Parse (possibly empty) gnu-attributes. This is a GNU extension.
gnu-attributes:
empty
gnu-attributes gnu-attribute
gnu-attribute:
__attribute__ ( ( gnu-attribute-list ) )
gnu-attribute-list:
gnu-attrib
gnu-attribute_list , gnu-attrib
gnu-attrib:
empty
any-word
any-word ( gnu-attribute-arguments )
where "any-word" may be any identifier (including one declared as a
type), a reserved word storage class specifier, type specifier or
type qualifier. ??? This still leaves out most reserved keywords
(following the old parser), shouldn't we include them?
When EXPECT_COMMA is true, expect the attribute to be preceded
by a comma and fail if it isn't.
When EMPTY_OK is true, allow and consume any number of consecutive
commas with no attributes in between. */
static tree
c_parser_gnu_attribute (c_parser *parser, tree attrs,
bool expect_comma = false, bool empty_ok = true)
{
bool comma_first = c_parser_next_token_is (parser, CPP_COMMA);
if (!comma_first
&& !c_parser_next_token_is (parser, CPP_NAME)
&& !c_parser_next_token_is (parser, CPP_KEYWORD))
return NULL_TREE;
while (c_parser_next_token_is (parser, CPP_COMMA))
{
c_parser_consume_token (parser);
if (!empty_ok)
return attrs;
}
tree attr_name = c_parser_gnu_attribute_any_word (parser);
if (attr_name == NULL_TREE)
return NULL_TREE;
attr_name = canonicalize_attr_name (attr_name);
c_parser_consume_token (parser);
tree attr;
if (c_parser_next_token_is_not (parser, CPP_OPEN_PAREN))
{
if (expect_comma && !comma_first)
{
/* A comma is missing between the last attribute on the chain
and this one. */
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<)%>");
return error_mark_node;
}
attr = build_tree_list (attr_name, NULL_TREE);
/* Add this attribute to the list. */
attrs = chainon (attrs, attr);
return attrs;
}
c_parser_consume_token (parser);
tree attr_args
= c_parser_attribute_arguments (parser,
attribute_takes_identifier_p (attr_name),
false, true);
attr = build_tree_list (attr_name, attr_args);
if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN))
c_parser_consume_token (parser);
else
{
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<)%>");
return error_mark_node;
}
if (expect_comma && !comma_first)
{
/* A comma is missing between the last attribute on the chain
and this one. */
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<)%>");
return error_mark_node;
}
/* Add this attribute to the list. */
attrs = chainon (attrs, attr);
return attrs;
}
static tree
c_parser_gnu_attributes (c_parser *parser)
{
tree attrs = NULL_TREE;
while (c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE))
{
bool save_translate_strings_p = parser->translate_strings_p;
parser->translate_strings_p = false;
/* Consume the `__attribute__' keyword. */
c_parser_consume_token (parser);
/* Look for the two `(' tokens. */
if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>"))
{
parser->translate_strings_p = save_translate_strings_p;
return attrs;
}
if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>"))
{
parser->translate_strings_p = save_translate_strings_p;
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL);
return attrs;
}
/* Parse the attribute list. Require a comma between successive
(possibly empty) attributes. */
for (bool expect_comma = false; ; expect_comma = true)
{
/* Parse a single attribute. */
tree attr = c_parser_gnu_attribute (parser, attrs, expect_comma);
if (attr == error_mark_node)
return attrs;
if (!attr)
break;
attrs = attr;
}
/* Look for the two `)' tokens. */
if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN))
c_parser_consume_token (parser);
else
{
parser->translate_strings_p = save_translate_strings_p;
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<)%>");
return attrs;
}
if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN))
c_parser_consume_token (parser);
else
{
parser->translate_strings_p = save_translate_strings_p;
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<)%>");
return attrs;
}
parser->translate_strings_p = save_translate_strings_p;
}
return attrs;
}
/* Parse an optional balanced token sequence.
balanced-token-sequence:
balanced-token
balanced-token-sequence balanced-token
balanced-token:
( balanced-token-sequence[opt] )
[ balanced-token-sequence[opt] ]
{ balanced-token-sequence[opt] }
any token other than ()[]{}
*/
static void
c_parser_balanced_token_sequence (c_parser *parser)
{
while (true)
{
c_token *token = c_parser_peek_token (parser);
switch (token->type)
{
case CPP_OPEN_BRACE:
{
matching_braces braces;
braces.consume_open (parser);
c_parser_balanced_token_sequence (parser);
braces.require_close (parser);
break;
}
case CPP_OPEN_PAREN:
{
matching_parens parens;
parens.consume_open (parser);
c_parser_balanced_token_sequence (parser);
parens.require_close (parser);
break;
}
case CPP_OPEN_SQUARE:
c_parser_consume_token (parser);
c_parser_balanced_token_sequence (parser);
c_parser_require (parser, CPP_CLOSE_SQUARE, "expected %<]%>");
break;
case CPP_CLOSE_BRACE:
case CPP_CLOSE_PAREN:
case CPP_CLOSE_SQUARE:
case CPP_EOF:
return;
default:
c_parser_consume_token (parser);
break;
}
}
}
/* Parse standard (C2X) attributes (including GNU attributes in the
gnu:: namespace).
attribute-specifier-sequence:
attribute-specifier-sequence[opt] attribute-specifier
attribute-specifier:
[ [ attribute-list ] ]
attribute-list:
attribute[opt]
attribute-list, attribute[opt]
attribute:
attribute-token attribute-argument-clause[opt]
attribute-token:
standard-attribute
attribute-prefixed-token
standard-attribute:
identifier
attribute-prefixed-token:
attribute-prefix :: identifier
attribute-prefix:
identifier
attribute-argument-clause:
( balanced-token-sequence[opt] )
Keywords are accepted as identifiers for this purpose.
*/
static tree
c_parser_std_attribute (c_parser *parser, bool for_tm)
{
c_token *token = c_parser_peek_token (parser);
tree ns, name, attribute;
/* Parse the attribute-token. */
if (token->type != CPP_NAME && token->type != CPP_KEYWORD)
{
c_parser_error (parser, "expected identifier");
return error_mark_node;
}
name = canonicalize_attr_name (token->value);
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_SCOPE))
{
ns = name;
c_parser_consume_token (parser);
token = c_parser_peek_token (parser);
if (token->type != CPP_NAME && token->type != CPP_KEYWORD)
{
c_parser_error (parser, "expected identifier");
return error_mark_node;
}
name = canonicalize_attr_name (token->value);
c_parser_consume_token (parser);
}
else
ns = NULL_TREE;
attribute = build_tree_list (build_tree_list (ns, name), NULL_TREE);
/* Parse the arguments, if any. */
const attribute_spec *as = lookup_attribute_spec (TREE_PURPOSE (attribute));
if (c_parser_next_token_is_not (parser, CPP_OPEN_PAREN))
goto out;
{
location_t open_loc = c_parser_peek_token (parser)->location;
matching_parens parens;
parens.consume_open (parser);
if ((as && as->max_length == 0)
/* Special-case the transactional-memory attribute "outer",
which is specially handled but not registered as an
attribute, to avoid allowing arbitrary balanced token
sequences as arguments. */
|| is_attribute_p ("outer", name))
{
error_at (open_loc, "%qE attribute does not take any arguments", name);
parens.skip_until_found_close (parser);
return error_mark_node;
}
if (as)
{
bool takes_identifier
= (ns != NULL_TREE
&& strcmp (IDENTIFIER_POINTER (ns), "gnu") == 0
&& attribute_takes_identifier_p (name));
bool require_string
= (ns == NULL_TREE
&& strcmp (IDENTIFIER_POINTER (name), "deprecated") == 0);
TREE_VALUE (attribute)
= c_parser_attribute_arguments (parser, takes_identifier,
require_string, false);
}
else
c_parser_balanced_token_sequence (parser);
parens.require_close (parser);
}
out:
if (ns == NULL_TREE && !for_tm && !as && !is_attribute_p ("nodiscard", name))
{
/* An attribute with standard syntax and no namespace specified
is a constraint violation if it is not one of the known
standard attributes (of which nodiscard is the only one
without a handler in GCC). Diagnose it here with a pedwarn
and then discard it to prevent a duplicate warning later. */
pedwarn (input_location, OPT_Wattributes, "%qE attribute ignored",
name);
return error_mark_node;
}
return attribute;
}
static tree
c_parser_std_attribute_specifier (c_parser *parser, bool for_tm)
{
bool seen_deprecated = false;
bool seen_fallthrough = false;
bool seen_maybe_unused = false;
location_t loc = c_parser_peek_token (parser)->location;
if (!c_parser_require (parser, CPP_OPEN_SQUARE, "expected %<[%>"))
return NULL_TREE;
if (!c_parser_require (parser, CPP_OPEN_SQUARE, "expected %<[%>"))
{
c_parser_skip_until_found (parser, CPP_CLOSE_SQUARE, "expected %<]%>");
return NULL_TREE;
}
if (!for_tm)
pedwarn_c11 (loc, OPT_Wpedantic,
"ISO C does not support %<[[]]%> attributes before C2X");
tree attributes = NULL_TREE;
while (true)
{
c_token *token = c_parser_peek_token (parser);
if (token->type == CPP_CLOSE_SQUARE)
break;
if (token->type == CPP_COMMA)
{
c_parser_consume_token (parser);
continue;
}
tree attribute = c_parser_std_attribute (parser, for_tm);
if (attribute != error_mark_node)
{
bool duplicate = false;
tree name = get_attribute_name (attribute);
tree ns = get_attribute_namespace (attribute);
if (ns == NULL_TREE)
{
/* Some standard attributes may appear at most once in
each attribute list. Diagnose duplicates and remove
them from the list to avoid subsequent diagnostics
such as the more general one for multiple
"fallthrough" attributes in the same place (including
in separate attribute lists in the same attribute
specifier sequence, which is not a constraint
violation). */
if (is_attribute_p ("deprecated", name))
{
if (seen_deprecated)
{
error ("attribute %<deprecated%> can appear at most "
"once in an attribute-list");
duplicate = true;
}
seen_deprecated = true;
}
else if (is_attribute_p ("fallthrough", name))
{
if (seen_fallthrough)
{
error ("attribute %<fallthrough%> can appear at most "
"once in an attribute-list");
duplicate = true;
}
seen_fallthrough = true;
}
else if (is_attribute_p ("maybe_unused", name))
{
if (seen_maybe_unused)
{
error ("attribute %<maybe_unused%> can appear at most "
"once in an attribute-list");
duplicate = true;
}
seen_maybe_unused = true;
}
}
if (!duplicate)
{
TREE_CHAIN (attribute) = attributes;
attributes = attribute;
}
}
if (c_parser_next_token_is_not (parser, CPP_COMMA))
break;
}
c_parser_skip_until_found (parser, CPP_CLOSE_SQUARE, "expected %<]%>");
c_parser_skip_until_found (parser, CPP_CLOSE_SQUARE, "expected %<]%>");
return nreverse (attributes);
}
/* Look past an optional balanced token sequence of raw look-ahead
tokens starting with the *Nth token. *N is updated to point to the
following token. Return true if such a sequence was found, false
if the tokens parsed were not balanced. */
static bool
c_parser_check_balanced_raw_token_sequence (c_parser *parser, unsigned int *n)
{
while (true)
{
c_token *token = c_parser_peek_nth_token_raw (parser, *n);
switch (token->type)
{
case CPP_OPEN_BRACE:
{
++*n;
if (c_parser_check_balanced_raw_token_sequence (parser, n))
{
token = c_parser_peek_nth_token_raw (parser, *n);
if (token->type == CPP_CLOSE_BRACE)
++*n;
else
return false;
}
else
return false;
break;
}
case CPP_OPEN_PAREN:
{
++*n;
if (c_parser_check_balanced_raw_token_sequence (parser, n))
{
token = c_parser_peek_nth_token_raw (parser, *n);
if (token->type == CPP_CLOSE_PAREN)
++*n;
else
return false;
}
else
return false;
break;
}
case CPP_OPEN_SQUARE:
{
++*n;
if (c_parser_check_balanced_raw_token_sequence (parser, n))
{
token = c_parser_peek_nth_token_raw (parser, *n);
if (token->type == CPP_CLOSE_SQUARE)
++*n;
else
return false;
}
else
return false;
break;
}
case CPP_CLOSE_BRACE:
case CPP_CLOSE_PAREN:
case CPP_CLOSE_SQUARE:
case CPP_EOF:
return true;
default:
++*n;
break;
}
}
}
/* Return whether standard attributes start with the Nth token. */
static bool
c_parser_nth_token_starts_std_attributes (c_parser *parser, unsigned int n)
{
if (!(c_parser_peek_nth_token (parser, n)->type == CPP_OPEN_SQUARE
&& c_parser_peek_nth_token (parser, n + 1)->type == CPP_OPEN_SQUARE))
return false;
/* In C, '[[' must start attributes. In Objective-C, we need to
check whether '[[' is matched by ']]'. */
if (!c_dialect_objc ())
return true;
n += 2;
if (!c_parser_check_balanced_raw_token_sequence (parser, &n))
return false;
c_token *token = c_parser_peek_nth_token_raw (parser, n);
if (token->type != CPP_CLOSE_SQUARE)
return false;
token = c_parser_peek_nth_token_raw (parser, n + 1);
return token->type == CPP_CLOSE_SQUARE;
}
static tree
c_parser_std_attribute_specifier_sequence (c_parser *parser)
{
tree attributes = NULL_TREE;
do
{
tree attrs = c_parser_std_attribute_specifier (parser, false);
attributes = chainon (attributes, attrs);
}
while (c_parser_nth_token_starts_std_attributes (parser, 1));
return attributes;
}
/* Parse a type name (C90 6.5.5, C99 6.7.6, C11 6.7.7). ALIGNAS_OK
says whether alignment specifiers are OK (only in cases that might
be the type name of a compound literal).
type-name:
specifier-qualifier-list abstract-declarator[opt]
*/
struct c_type_name *
c_parser_type_name (c_parser *parser, bool alignas_ok)
{
struct c_declspecs *specs = build_null_declspecs ();
struct c_declarator *declarator;
struct c_type_name *ret;
bool dummy = false;
c_parser_declspecs (parser, specs, false, true, true, alignas_ok, false,
false, true, cla_prefer_type);
if (!specs->declspecs_seen_p)
{
c_parser_error (parser, "expected specifier-qualifier-list");
return NULL;
}
if (specs->type != error_mark_node)
{
pending_xref_error ();
finish_declspecs (specs);
}
declarator = c_parser_declarator (parser,
specs->typespec_kind != ctsk_none,
C_DTR_ABSTRACT, &dummy);
if (declarator == NULL)
return NULL;
ret = XOBNEW (&parser_obstack, struct c_type_name);
ret->specs = specs;
ret->declarator = declarator;
return ret;
}
/* Parse an initializer (C90 6.5.7, C99 6.7.8, C11 6.7.9).
initializer:
assignment-expression
{ initializer-list }
{ initializer-list , }
initializer-list:
designation[opt] initializer
initializer-list , designation[opt] initializer
designation:
designator-list =
designator-list:
designator
designator-list designator
designator:
array-designator
. identifier
array-designator:
[ constant-expression ]
GNU extensions:
initializer:
{ }
designation:
array-designator
identifier :
array-designator:
[ constant-expression ... constant-expression ]
Any expression without commas is accepted in the syntax for the
constant-expressions, with non-constant expressions rejected later.
This function is only used for top-level initializers; for nested
ones, see c_parser_initval. */
static struct c_expr
c_parser_initializer (c_parser *parser)
{
if (c_parser_next_token_is (parser, CPP_OPEN_BRACE))
return c_parser_braced_init (parser, NULL_TREE, false, NULL);
else
{
struct c_expr ret;
location_t loc = c_parser_peek_token (parser)->location;
ret = c_parser_expr_no_commas (parser, NULL);
if (TREE_CODE (ret.value) != STRING_CST
&& TREE_CODE (ret.value) != COMPOUND_LITERAL_EXPR)
ret = convert_lvalue_to_rvalue (loc, ret, true, true);
return ret;
}
}
/* The location of the last comma within the current initializer list,
or UNKNOWN_LOCATION if not within one. */
location_t last_init_list_comma;
/* Parse a braced initializer list. TYPE is the type specified for a
compound literal, and NULL_TREE for other initializers and for
nested braced lists. NESTED_P is true for nested braced lists,
false for the list of a compound literal or the list that is the
top-level initializer in a declaration. */
static struct c_expr
c_parser_braced_init (c_parser *parser, tree type, bool nested_p,
struct obstack *outer_obstack)
{
struct c_expr ret;
struct obstack braced_init_obstack;
location_t brace_loc = c_parser_peek_token (parser)->location;
gcc_obstack_init (&braced_init_obstack);
gcc_assert (c_parser_next_token_is (parser, CPP_OPEN_BRACE));
matching_braces braces;
braces.consume_open (parser);
if (nested_p)
{
finish_implicit_inits (brace_loc, outer_obstack);
push_init_level (brace_loc, 0, &braced_init_obstack);
}
else
really_start_incremental_init (type);
if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE))
{
pedwarn (brace_loc, OPT_Wpedantic, "ISO C forbids empty initializer braces");
}
else
{
/* Parse a non-empty initializer list, possibly with a trailing
comma. */
while (true)
{
c_parser_initelt (parser, &braced_init_obstack);
if (parser->error)
break;
if (c_parser_next_token_is (parser, CPP_COMMA))
{
last_init_list_comma = c_parser_peek_token (parser)->location;
c_parser_consume_token (parser);
}
else
break;
if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE))
break;
}
}
c_token *next_tok = c_parser_peek_token (parser);
if (next_tok->type != CPP_CLOSE_BRACE)
{
ret.set_error ();
ret.original_code = ERROR_MARK;
ret.original_type = NULL;
braces.skip_until_found_close (parser);
pop_init_level (brace_loc, 0, &braced_init_obstack, last_init_list_comma);
obstack_free (&braced_init_obstack, NULL);
return ret;
}
location_t close_loc = next_tok->location;
c_parser_consume_token (parser);
ret = pop_init_level (brace_loc, 0, &braced_init_obstack, close_loc);
obstack_free (&braced_init_obstack, NULL);
set_c_expr_source_range (&ret, brace_loc, close_loc);
return ret;
}
/* Parse a nested initializer, including designators. */
static void
c_parser_initelt (c_parser *parser, struct obstack * braced_init_obstack)
{
/* Parse any designator or designator list. A single array
designator may have the subsequent "=" omitted in GNU C, but a
longer list or a structure member designator may not. */
if (c_parser_next_token_is (parser, CPP_NAME)
&& c_parser_peek_2nd_token (parser)->type == CPP_COLON)
{
/* Old-style structure member designator. */
set_init_label (c_parser_peek_token (parser)->location,
c_parser_peek_token (parser)->value,
c_parser_peek_token (parser)->location,
braced_init_obstack);
/* Use the colon as the error location. */
pedwarn (c_parser_peek_2nd_token (parser)->location, OPT_Wpedantic,
"obsolete use of designated initializer with %<:%>");
c_parser_consume_token (parser);
c_parser_consume_token (parser);
}
else
{
/* des_seen is 0 if there have been no designators, 1 if there
has been a single array designator and 2 otherwise. */
int des_seen = 0;
/* Location of a designator. */
location_t des_loc = UNKNOWN_LOCATION; /* Quiet warning. */
while (c_parser_next_token_is (parser, CPP_OPEN_SQUARE)
|| c_parser_next_token_is (parser, CPP_DOT))
{
int des_prev = des_seen;
if (!des_seen)
des_loc = c_parser_peek_token (parser)->location;
if (des_seen < 2)
des_seen++;
if (c_parser_next_token_is (parser, CPP_DOT))
{
des_seen = 2;
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_NAME))
{
set_init_label (des_loc, c_parser_peek_token (parser)->value,
c_parser_peek_token (parser)->location,
braced_init_obstack);
c_parser_consume_token (parser);
}
else
{
struct c_expr init;
init.set_error ();
init.original_code = ERROR_MARK;
init.original_type = NULL;
c_parser_error (parser, "expected identifier");
c_parser_skip_until_found (parser, CPP_COMMA, NULL);
process_init_element (input_location, init, false,
braced_init_obstack);
return;
}
}
else
{
tree first, second;
location_t ellipsis_loc = UNKNOWN_LOCATION; /* Quiet warning. */
location_t array_index_loc = UNKNOWN_LOCATION;
/* ??? Following the old parser, [ objc-receiver
objc-message-args ] is accepted as an initializer,
being distinguished from a designator by what follows
the first assignment expression inside the square
brackets, but after a first array designator a
subsequent square bracket is for Objective-C taken to
start an expression, using the obsolete form of
designated initializer without '=', rather than
possibly being a second level of designation: in LALR
terms, the '[' is shifted rather than reducing
designator to designator-list. */
if (des_prev == 1 && c_dialect_objc ())
{
des_seen = des_prev;
break;
}
if (des_prev == 0 && c_dialect_objc ())
{
/* This might be an array designator or an
Objective-C message expression. If the former,
continue parsing here; if the latter, parse the
remainder of the initializer given the starting
primary-expression. ??? It might make sense to
distinguish when des_prev == 1 as well; see
previous comment. */
tree rec, args;
struct c_expr mexpr;
c_parser_consume_token (parser);
if (c_parser_peek_token (parser)->type == CPP_NAME
&& ((c_parser_peek_token (parser)->id_kind
== C_ID_TYPENAME)
|| (c_parser_peek_token (parser)->id_kind
== C_ID_CLASSNAME)))
{
/* Type name receiver. */
tree id = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
rec = objc_get_class_reference (id);
goto parse_message_args;
}
first = c_parser_expr_no_commas (parser, NULL).value;
mark_exp_read (first);
if (c_parser_next_token_is (parser, CPP_ELLIPSIS)
|| c_parser_next_token_is (parser, CPP_CLOSE_SQUARE))
goto array_desig_after_first;
/* Expression receiver. So far only one part
without commas has been parsed; there might be
more of the expression. */
rec = first;
while (c_parser_next_token_is (parser, CPP_COMMA))
{
struct c_expr next;
location_t comma_loc, exp_loc;
comma_loc = c_parser_peek_token (parser)->location;
c_parser_consume_token (parser);
exp_loc = c_parser_peek_token (parser)->location;
next = c_parser_expr_no_commas (parser, NULL);
next = convert_lvalue_to_rvalue (exp_loc, next,
true, true);
rec = build_compound_expr (comma_loc, rec, next.value);
}
parse_message_args:
/* Now parse the objc-message-args. */
args = c_parser_objc_message_args (parser);
c_parser_skip_until_found (parser, CPP_CLOSE_SQUARE,
"expected %<]%>");
mexpr.value
= objc_build_message_expr (rec, args);
mexpr.original_code = ERROR_MARK;
mexpr.original_type = NULL;
/* Now parse and process the remainder of the
initializer, starting with this message
expression as a primary-expression. */
c_parser_initval (parser, &mexpr, braced_init_obstack);
return;
}
c_parser_consume_token (parser);
array_index_loc = c_parser_peek_token (parser)->location;
first = c_parser_expr_no_commas (parser, NULL).value;
mark_exp_read (first);
array_desig_after_first:
if (c_parser_next_token_is (parser, CPP_ELLIPSIS))
{
ellipsis_loc = c_parser_peek_token (parser)->location;
c_parser_consume_token (parser);
second = c_parser_expr_no_commas (parser, NULL).value;
mark_exp_read (second);
}
else
second = NULL_TREE;
if (c_parser_next_token_is (parser, CPP_CLOSE_SQUARE))
{
c_parser_consume_token (parser);
set_init_index (array_index_loc, first, second,
braced_init_obstack);
if (second)
pedwarn (ellipsis_loc, OPT_Wpedantic,
"ISO C forbids specifying range of elements to initialize");
}
else
c_parser_skip_until_found (parser, CPP_CLOSE_SQUARE,
"expected %<]%>");
}
}
if (des_seen >= 1)
{
if (c_parser_next_token_is (parser, CPP_EQ))
{
pedwarn_c90 (des_loc, OPT_Wpedantic,
"ISO C90 forbids specifying subobject "
"to initialize");
c_parser_consume_token (parser);
}
else
{
if (des_seen == 1)
pedwarn (c_parser_peek_token (parser)->location, OPT_Wpedantic,
"obsolete use of designated initializer without %<=%>");
else
{
struct c_expr init;
init.set_error ();
init.original_code = ERROR_MARK;
init.original_type = NULL;
c_parser_error (parser, "expected %<=%>");
c_parser_skip_until_found (parser, CPP_COMMA, NULL);
process_init_element (input_location, init, false,
braced_init_obstack);
return;
}
}
}
}
c_parser_initval (parser, NULL, braced_init_obstack);
}
/* Parse a nested initializer; as c_parser_initializer but parses
initializers within braced lists, after any designators have been
applied. If AFTER is not NULL then it is an Objective-C message
expression which is the primary-expression starting the
initializer. */
static void
c_parser_initval (c_parser *parser, struct c_expr *after,
struct obstack * braced_init_obstack)
{
struct c_expr init;
gcc_assert (!after || c_dialect_objc ());
location_t loc = c_parser_peek_token (parser)->location;
if (c_parser_next_token_is (parser, CPP_OPEN_BRACE) && !after)
init = c_parser_braced_init (parser, NULL_TREE, true,
braced_init_obstack);
else
{
init = c_parser_expr_no_commas (parser, after);
if (init.value != NULL_TREE
&& TREE_CODE (init.value) != STRING_CST
&& TREE_CODE (init.value) != COMPOUND_LITERAL_EXPR)
init = convert_lvalue_to_rvalue (loc, init, true, true);
}
process_init_element (loc, init, false, braced_init_obstack);
}
/* Parse a compound statement (possibly a function body) (C90 6.6.2,
C99 6.8.2, C11 6.8.2).
compound-statement:
{ block-item-list[opt] }
{ label-declarations block-item-list }
block-item-list:
block-item
block-item-list block-item
block-item:
nested-declaration
statement
nested-declaration:
declaration
GNU extensions:
compound-statement:
{ label-declarations block-item-list }
nested-declaration:
__extension__ nested-declaration
nested-function-definition
label-declarations:
label-declaration
label-declarations label-declaration
label-declaration:
__label__ identifier-list ;
Allowing the mixing of declarations and code is new in C99. The
GNU syntax also permits (not shown above) labels at the end of
compound statements, which yield an error. We don't allow labels
on declarations; this might seem like a natural extension, but
there would be a conflict between gnu-attributes on the label and
prefix gnu-attributes on the declaration. ??? The syntax follows the
old parser in requiring something after label declarations.
Although they are erroneous if the labels declared aren't defined,
is it useful for the syntax to be this way?
OpenACC:
block-item:
openacc-directive
openacc-directive:
update-directive
OpenMP:
block-item:
openmp-directive
openmp-directive:
barrier-directive
flush-directive
taskwait-directive
taskyield-directive
cancel-directive
cancellation-point-directive */
static tree
c_parser_compound_statement (c_parser *parser, location_t *endlocp)
{
tree stmt;
location_t brace_loc;
brace_loc = c_parser_peek_token (parser)->location;
if (!c_parser_require (parser, CPP_OPEN_BRACE, "expected %<{%>"))
{
/* Ensure a scope is entered and left anyway to avoid confusion
if we have just prepared to enter a function body. */
stmt = c_begin_compound_stmt (true);
c_end_compound_stmt (brace_loc, stmt, true);
return error_mark_node;
}
stmt = c_begin_compound_stmt (true);
location_t end_loc = c_parser_compound_statement_nostart (parser);
if (endlocp)
*endlocp = end_loc;
return c_end_compound_stmt (brace_loc, stmt, true);
}
/* Parse a compound statement except for the opening brace. This is
used for parsing both compound statements and statement expressions
(which follow different paths to handling the opening). */
static location_t
c_parser_compound_statement_nostart (c_parser *parser)
{
bool last_stmt = false;
bool last_label = false;
bool save_valid_for_pragma = valid_location_for_stdc_pragma_p ();
location_t label_loc = UNKNOWN_LOCATION; /* Quiet warning. */
if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE))
{
location_t endloc = c_parser_peek_token (parser)->location;
add_debug_begin_stmt (endloc);
c_parser_consume_token (parser);
return endloc;
}
mark_valid_location_for_stdc_pragma (true);
if (c_parser_next_token_is_keyword (parser, RID_LABEL))
{
/* Read zero or more forward-declarations for labels that nested
functions can jump to. */
mark_valid_location_for_stdc_pragma (false);
while (c_parser_next_token_is_keyword (parser, RID_LABEL))
{
label_loc = c_parser_peek_token (parser)->location;
c_parser_consume_token (parser);
/* Any identifiers, including those declared as type names,
are OK here. */
while (true)
{
tree label;
if (c_parser_next_token_is_not (parser, CPP_NAME))
{
c_parser_error (parser, "expected identifier");
break;
}
label
= declare_label (c_parser_peek_token (parser)->value);
C_DECLARED_LABEL_FLAG (label) = 1;
add_stmt (build_stmt (label_loc, DECL_EXPR, label));
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_COMMA))
c_parser_consume_token (parser);
else
break;
}
c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>");
}
pedwarn (label_loc, OPT_Wpedantic, "ISO C forbids label declarations");
}
/* We must now have at least one statement, label or declaration. */
if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE))
{
mark_valid_location_for_stdc_pragma (save_valid_for_pragma);
c_parser_error (parser, "expected declaration or statement");
location_t endloc = c_parser_peek_token (parser)->location;
c_parser_consume_token (parser);
return endloc;
}
while (c_parser_next_token_is_not (parser, CPP_CLOSE_BRACE))
{
location_t loc = c_parser_peek_token (parser)->location;
loc = expansion_point_location_if_in_system_header (loc);
/* Standard attributes may start a statement or a declaration. */
bool have_std_attrs
= c_parser_nth_token_starts_std_attributes (parser, 1);
tree std_attrs = NULL_TREE;
if (have_std_attrs)
std_attrs = c_parser_std_attribute_specifier_sequence (parser);
if (c_parser_next_token_is_keyword (parser, RID_CASE)
|| c_parser_next_token_is_keyword (parser, RID_DEFAULT)
|| (c_parser_next_token_is (parser, CPP_NAME)
&& c_parser_peek_2nd_token (parser)->type == CPP_COLON))
{
c_warn_unused_attributes (std_attrs);
if (c_parser_next_token_is_keyword (parser, RID_CASE))
label_loc = c_parser_peek_2nd_token (parser)->location;
else
label_loc = c_parser_peek_token (parser)->location;
last_label = true;
last_stmt = false;
mark_valid_location_for_stdc_pragma (false);
c_parser_label (parser);
}
else if (!last_label
&& (c_parser_next_tokens_start_declaration (parser)
|| (have_std_attrs
&& c_parser_next_token_is (parser, CPP_SEMICOLON))))
{
last_label = false;
mark_valid_location_for_stdc_pragma (false);
bool fallthru_attr_p = false;
c_parser_declaration_or_fndef (parser, true, !have_std_attrs,
true, true, true, NULL,
vNULL, have_std_attrs, std_attrs,
NULL, &fallthru_attr_p);
if (last_stmt && !fallthru_attr_p)
pedwarn_c90 (loc, OPT_Wdeclaration_after_statement,
"ISO C90 forbids mixed declarations and code");
last_stmt = fallthru_attr_p;
}
else if (!last_label
&& c_parser_next_token_is_keyword (parser, RID_EXTENSION))
{
/* __extension__ can start a declaration, but is also an
unary operator that can start an expression. Consume all
but the last of a possible series of __extension__ to
determine which. If standard attributes have already
been seen, it must start a statement, not a declaration,
but standard attributes starting a declaration may appear
after __extension__. */
while (c_parser_peek_2nd_token (parser)->type == CPP_KEYWORD
&& (c_parser_peek_2nd_token (parser)->keyword
== RID_EXTENSION))
c_parser_consume_token (parser);
if (!have_std_attrs
&& (c_token_starts_declaration (c_parser_peek_2nd_token (parser))
|| c_parser_nth_token_starts_std_attributes (parser, 2)))
{
int ext;
ext = disable_extension_diagnostics ();
c_parser_consume_token (parser);
last_label = false;
mark_valid_location_for_stdc_pragma (false);
c_parser_declaration_or_fndef (parser, true, true, true, true,
true, NULL, vNULL);
/* Following the old parser, __extension__ does not
disable this diagnostic. */
restore_extension_diagnostics (ext);
if (last_stmt)
pedwarn_c90 (loc, OPT_Wdeclaration_after_statement,
"ISO C90 forbids mixed declarations and code");
last_stmt = false;
}
else
goto statement;
}
else if (c_parser_next_token_is (parser, CPP_PRAGMA))
{
if (have_std_attrs)
c_parser_error (parser, "expected declaration or statement");
/* External pragmas, and some omp pragmas, are not associated
with regular c code, and so are not to be considered statements
syntactically. This ensures that the user doesn't put them
places that would turn into syntax errors if the directive
were ignored. */
if (c_parser_pragma (parser,
last_label ? pragma_stmt : pragma_compound,
NULL))
last_label = false, last_stmt = true;
}
else if (c_parser_next_token_is (parser, CPP_EOF))
{
mark_valid_location_for_stdc_pragma (save_valid_for_pragma);
c_parser_error (parser, "expected declaration or statement");
return c_parser_peek_token (parser)->location;
}
else if (c_parser_next_token_is_keyword (parser, RID_ELSE))
{
if (parser->in_if_block)
{
mark_valid_location_for_stdc_pragma (save_valid_for_pragma);
error_at (loc, "expected %<}%> before %<else%>");
return c_parser_peek_token (parser)->location;
}
else
{
error_at (loc, "%<else%> without a previous %<if%>");
c_parser_consume_token (parser);
continue;
}
}
else
{
statement:
c_warn_unused_attributes (std_attrs);
last_label = false;
last_stmt = true;
mark_valid_location_for_stdc_pragma (false);
c_parser_statement_after_labels (parser, NULL);
}
parser->error = false;
}
if (last_label)
error_at (label_loc, "label at end of compound statement");
location_t endloc = c_parser_peek_token (parser)->location;
c_parser_consume_token (parser);
/* Restore the value we started with. */
mark_valid_location_for_stdc_pragma (save_valid_for_pragma);
return endloc;
}
/* Parse all consecutive labels, possibly preceded by standard
attributes. In this context, a statement is required, not a
declaration, so attributes must be followed by a statement that is
not just a semicolon. */
static void
c_parser_all_labels (c_parser *parser)
{
if (c_parser_nth_token_starts_std_attributes (parser, 1))
{
tree std_attrs = c_parser_std_attribute_specifier_sequence (parser);
if (c_parser_next_token_is (parser, CPP_SEMICOLON))
c_parser_error (parser, "expected statement");
else
c_warn_unused_attributes (std_attrs);
}
while (c_parser_next_token_is_keyword (parser, RID_CASE)
|| c_parser_next_token_is_keyword (parser, RID_DEFAULT)
|| (c_parser_next_token_is (parser, CPP_NAME)
&& c_parser_peek_2nd_token (parser)->type == CPP_COLON))
c_parser_label (parser);
}
/* Parse a label (C90 6.6.1, C99 6.8.1, C11 6.8.1).
label:
identifier : gnu-attributes[opt]
case constant-expression :
default :
GNU extensions:
label:
case constant-expression ... constant-expression :
The use of gnu-attributes on labels is a GNU extension. The syntax in
GNU C accepts any expressions without commas, non-constant
expressions being rejected later. Any standard
attribute-specifier-sequence before the first label has been parsed
in the caller, to distinguish statements from declarations. Any
attribute-specifier-sequence after the label is parsed in this
function. */
static void
c_parser_label (c_parser *parser)
{
location_t loc1 = c_parser_peek_token (parser)->location;
tree label = NULL_TREE;
/* Remember whether this case or a user-defined label is allowed to fall
through to. */
bool fallthrough_p = c_parser_peek_token (parser)->flags & PREV_FALLTHROUGH;
if (c_parser_next_token_is_keyword (parser, RID_CASE))
{
tree exp1, exp2;
c_parser_consume_token (parser);
exp1 = c_parser_expr_no_commas (parser, NULL).value;
if (c_parser_next_token_is (parser, CPP_COLON))
{
c_parser_consume_token (parser);
label = do_case (loc1, exp1, NULL_TREE);
}
else if (c_parser_next_token_is (parser, CPP_ELLIPSIS))
{
c_parser_consume_token (parser);
exp2 = c_parser_expr_no_commas (parser, NULL).value;
if (c_parser_require (parser, CPP_COLON, "expected %<:%>"))
label = do_case (loc1, exp1, exp2);
}
else
c_parser_error (parser, "expected %<:%> or %<...%>");
}
else if (c_parser_next_token_is_keyword (parser, RID_DEFAULT))
{
c_parser_consume_token (parser);
if (c_parser_require (parser, CPP_COLON, "expected %<:%>"))
label = do_case (loc1, NULL_TREE, NULL_TREE);
}
else
{
tree name = c_parser_peek_token (parser)->value;
tree tlab;
tree attrs;
location_t loc2 = c_parser_peek_token (parser)->location;
gcc_assert (c_parser_next_token_is (parser, CPP_NAME));
c_parser_consume_token (parser);
gcc_assert (c_parser_next_token_is (parser, CPP_COLON));
c_parser_consume_token (parser);
attrs = c_parser_gnu_attributes (parser);
tlab = define_label (loc2, name);
if (tlab)
{
decl_attributes (&tlab, attrs, 0);
label = add_stmt (build_stmt (loc1, LABEL_EXPR, tlab));
}
}
if (label)
{
if (TREE_CODE (label) == LABEL_EXPR)
FALLTHROUGH_LABEL_P (LABEL_EXPR_LABEL (label)) = fallthrough_p;
else
FALLTHROUGH_LABEL_P (CASE_LABEL (label)) = fallthrough_p;
/* Standard attributes are only allowed here if they start a
statement, not a declaration (including the case of an
attribute-declaration with only attributes). */
bool have_std_attrs
= c_parser_nth_token_starts_std_attributes (parser, 1);
tree std_attrs = NULL_TREE;
if (have_std_attrs)
std_attrs = c_parser_std_attribute_specifier_sequence (parser);
/* Allow '__attribute__((fallthrough));'. */
if (!have_std_attrs
&& c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE))
{
location_t loc = c_parser_peek_token (parser)->location;
tree attrs = c_parser_gnu_attributes (parser);
if (attribute_fallthrough_p (attrs))
{
if (c_parser_next_token_is (parser, CPP_SEMICOLON))
{
tree fn = build_call_expr_internal_loc (loc,
IFN_FALLTHROUGH,
void_type_node, 0);
add_stmt (fn);
}
else
warning_at (loc, OPT_Wattributes, "%<fallthrough%> attribute "
"not followed by %<;%>");
}
else if (attrs != NULL_TREE)
warning_at (loc, OPT_Wattributes, "only attribute %<fallthrough%>"
" can be applied to a null statement");
}
if (c_parser_next_tokens_start_declaration (parser)
|| (have_std_attrs
&& c_parser_next_token_is (parser, CPP_SEMICOLON)))
{
error_at (c_parser_peek_token (parser)->location,
"a label can only be part of a statement and "
"a declaration is not a statement");
c_parser_declaration_or_fndef (parser, /*fndef_ok*/ false,
/*static_assert_ok*/ true,
/*empty_ok*/ true, /*nested*/ true,
/*start_attr_ok*/ true, NULL,
vNULL, have_std_attrs, std_attrs);
}
else if (std_attrs)
/* Nonempty attributes on the following statement are ignored. */
c_warn_unused_attributes (std_attrs);
}
}
/* Parse a statement (C90 6.6, C99 6.8, C11 6.8).
statement:
labeled-statement
attribute-specifier-sequence[opt] compound-statement
expression-statement
attribute-specifier-sequence[opt] selection-statement
attribute-specifier-sequence[opt] iteration-statement
attribute-specifier-sequence[opt] jump-statement
labeled-statement:
attribute-specifier-sequence[opt] label statement
expression-statement:
expression[opt] ;
attribute-specifier-sequence expression ;
selection-statement:
if-statement
switch-statement
iteration-statement:
while-statement
do-statement
for-statement
jump-statement:
goto identifier ;
continue ;
break ;
return expression[opt] ;
GNU extensions:
statement:
attribute-specifier-sequence[opt] asm-statement
jump-statement:
goto * expression ;
expression-statement:
gnu-attributes ;
Objective-C:
statement:
attribute-specifier-sequence[opt] objc-throw-statement
attribute-specifier-sequence[opt] objc-try-catch-statement
attribute-specifier-sequence[opt] objc-synchronized-statement
objc-throw-statement:
@throw expression ;
@throw ;
OpenACC:
statement:
attribute-specifier-sequence[opt] openacc-construct
openacc-construct:
parallel-construct
kernels-construct
data-construct
loop-construct
parallel-construct:
parallel-directive structured-block
kernels-construct:
kernels-directive structured-block
data-construct:
data-directive structured-block
loop-construct:
loop-directive structured-block
OpenMP:
statement:
attribute-specifier-sequence[opt] openmp-construct
openmp-construct:
parallel-construct
for-construct
simd-construct
for-simd-construct
sections-construct
single-construct
parallel-for-construct
parallel-for-simd-construct
parallel-sections-construct
master-construct
critical-construct
atomic-construct
ordered-construct
parallel-construct:
parallel-directive structured-block
for-construct:
for-directive iteration-statement
simd-construct:
simd-directive iteration-statements
for-simd-construct:
for-simd-directive iteration-statements
sections-construct:
sections-directive section-scope
single-construct:
single-directive structured-block
parallel-for-construct:
parallel-for-directive iteration-statement
parallel-for-simd-construct:
parallel-for-simd-directive iteration-statement
parallel-sections-construct:
parallel-sections-directive section-scope
master-construct:
master-directive structured-block
critical-construct:
critical-directive structured-block
atomic-construct:
atomic-directive expression-statement
ordered-construct:
ordered-directive structured-block
Transactional Memory:
statement:
attribute-specifier-sequence[opt] transaction-statement
attribute-specifier-sequence[opt] transaction-cancel-statement
IF_P is used to track whether there's a (possibly labeled) if statement
which is not enclosed in braces and has an else clause. This is used to
implement -Wparentheses. */
static void
c_parser_statement (c_parser *parser, bool *if_p, location_t *loc_after_labels)
{
c_parser_all_labels (parser);
if (loc_after_labels)
*loc_after_labels = c_parser_peek_token (parser)->location;
c_parser_statement_after_labels (parser, if_p, NULL);
}
/* Parse a statement, other than a labeled statement. CHAIN is a vector
of if-else-if conditions. All labels and standard attributes have
been parsed in the caller.
IF_P is used to track whether there's a (possibly labeled) if statement
which is not enclosed in braces and has an else clause. This is used to
implement -Wparentheses. */
static void
c_parser_statement_after_labels (c_parser *parser, bool *if_p,
vec<tree> *chain)
{
location_t loc = c_parser_peek_token (parser)->location;
tree stmt = NULL_TREE;
bool in_if_block = parser->in_if_block;
parser->in_if_block = false;
if (if_p != NULL)
*if_p = false;
if (c_parser_peek_token (parser)->type != CPP_OPEN_BRACE)
add_debug_begin_stmt (loc);
switch (c_parser_peek_token (parser)->type)
{
case CPP_OPEN_BRACE:
add_stmt (c_parser_compound_statement (parser));
break;
case CPP_KEYWORD:
switch (c_parser_peek_token (parser)->keyword)
{
case RID_IF:
c_parser_if_statement (parser, if_p, chain);
break;
case RID_SWITCH:
c_parser_switch_statement (parser, if_p);
break;
case RID_WHILE:
c_parser_while_statement (parser, false, 0, if_p);
break;
case RID_DO:
c_parser_do_statement (parser, false, 0);
break;
case RID_FOR:
c_parser_for_statement (parser, false, 0, if_p);
break;
case RID_GOTO:
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_NAME))
{
stmt = c_finish_goto_label (loc,
c_parser_peek_token (parser)->value);
c_parser_consume_token (parser);
}
else if (c_parser_next_token_is (parser, CPP_MULT))
{
struct c_expr val;
c_parser_consume_token (parser);
val = c_parser_expression (parser);
val = convert_lvalue_to_rvalue (loc, val, false, true);
stmt = c_finish_goto_ptr (loc, val.value);
}
else
c_parser_error (parser, "expected identifier or %<*%>");
goto expect_semicolon;
case RID_CONTINUE:
c_parser_consume_token (parser);
stmt = c_finish_bc_stmt (loc, objc_foreach_continue_label, false);
goto expect_semicolon;
case RID_BREAK:
c_parser_consume_token (parser);
stmt = c_finish_bc_stmt (loc, objc_foreach_break_label, true);
goto expect_semicolon;
case RID_RETURN:
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_SEMICOLON))
{
stmt = c_finish_return (loc, NULL_TREE, NULL_TREE);
c_parser_consume_token (parser);
}
else
{
location_t xloc = c_parser_peek_token (parser)->location;
struct c_expr expr = c_parser_expression_conv (parser);
mark_exp_read (expr.value);
stmt = c_finish_return (EXPR_LOC_OR_LOC (expr.value, xloc),
expr.value, expr.original_type);
goto expect_semicolon;
}
break;
case RID_ASM:
stmt = c_parser_asm_statement (parser);
break;
case RID_TRANSACTION_ATOMIC:
case RID_TRANSACTION_RELAXED:
stmt = c_parser_transaction (parser,
c_parser_peek_token (parser)->keyword);
break;
case RID_TRANSACTION_CANCEL:
stmt = c_parser_transaction_cancel (parser);
goto expect_semicolon;
case RID_AT_THROW:
gcc_assert (c_dialect_objc ());
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_SEMICOLON))
{
stmt = objc_build_throw_stmt (loc, NULL_TREE);
c_parser_consume_token (parser);
}
else
{
struct c_expr expr = c_parser_expression (parser);
expr = convert_lvalue_to_rvalue (loc, expr, false, false);
expr.value = c_fully_fold (expr.value, false, NULL);
stmt = objc_build_throw_stmt (loc, expr.value);
goto expect_semicolon;
}
break;
case RID_AT_TRY:
gcc_assert (c_dialect_objc ());
c_parser_objc_try_catch_finally_statement (parser);
break;
case RID_AT_SYNCHRONIZED:
gcc_assert (c_dialect_objc ());
c_parser_objc_synchronized_statement (parser);
break;
case RID_ATTRIBUTE:
{
/* Allow '__attribute__((fallthrough));'. */
tree attrs = c_parser_gnu_attributes (parser);
if (attribute_fallthrough_p (attrs))
{
if (c_parser_next_token_is (parser, CPP_SEMICOLON))
{
tree fn = build_call_expr_internal_loc (loc,
IFN_FALLTHROUGH,
void_type_node, 0);
add_stmt (fn);
/* Eat the ';'. */
c_parser_consume_token (parser);
}
else
warning_at (loc, OPT_Wattributes,
"%<fallthrough%> attribute not followed "
"by %<;%>");
}
else if (attrs != NULL_TREE)
warning_at (loc, OPT_Wattributes, "only attribute %<fallthrough%>"
" can be applied to a null statement");
break;
}
default:
goto expr_stmt;
}
break;
case CPP_SEMICOLON:
c_parser_consume_token (parser);
break;
case CPP_CLOSE_PAREN:
case CPP_CLOSE_SQUARE:
/* Avoid infinite loop in error recovery:
c_parser_skip_until_found stops at a closing nesting
delimiter without consuming it, but here we need to consume
it to proceed further. */
c_parser_error (parser, "expected statement");
c_parser_consume_token (parser);
break;
case CPP_PRAGMA:
c_parser_pragma (parser, pragma_stmt, if_p);
break;
default:
expr_stmt:
stmt = c_finish_expr_stmt (loc, c_parser_expression_conv (parser).value);
expect_semicolon:
c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>");
break;
}
/* Two cases cannot and do not have line numbers associated: If stmt
is degenerate, such as "2;", then stmt is an INTEGER_CST, which
cannot hold line numbers. But that's OK because the statement
will either be changed to a MODIFY_EXPR during gimplification of
the statement expr, or discarded. If stmt was compound, but
without new variables, we will have skipped the creation of a
BIND and will have a bare STATEMENT_LIST. But that's OK because
(recursively) all of the component statements should already have
line numbers assigned. ??? Can we discard no-op statements
earlier? */
if (EXPR_LOCATION (stmt) == UNKNOWN_LOCATION)
protected_set_expr_location (stmt, loc);
parser->in_if_block = in_if_block;
}
/* Parse the condition from an if, do, while or for statements. */
static tree
c_parser_condition (c_parser *parser)
{
location_t loc = c_parser_peek_token (parser)->location;
tree cond;
cond = c_parser_expression_conv (parser).value;
cond = c_objc_common_truthvalue_conversion (loc, cond);
cond = c_fully_fold (cond, false, NULL);
if (warn_sequence_point)
verify_sequence_points (cond);
return cond;
}
/* Parse a parenthesized condition from an if, do or while statement.
condition:
( expression )
*/
static tree
c_parser_paren_condition (c_parser *parser)
{
tree cond;
matching_parens parens;
if (!parens.require_open (parser))
return error_mark_node;
cond = c_parser_condition (parser);
parens.skip_until_found_close (parser);
return cond;
}
/* Parse a statement which is a block in C99.
IF_P is used to track whether there's a (possibly labeled) if statement
which is not enclosed in braces and has an else clause. This is used to
implement -Wparentheses. */
static tree
c_parser_c99_block_statement (c_parser *parser, bool *if_p,
location_t *loc_after_labels)
{
tree block = c_begin_compound_stmt (flag_isoc99);
location_t loc = c_parser_peek_token (parser)->location;
c_parser_statement (parser, if_p, loc_after_labels);
return c_end_compound_stmt (loc, block, flag_isoc99);
}
/* Parse the body of an if statement. This is just parsing a
statement but (a) it is a block in C99, (b) we track whether the
body is an if statement for the sake of -Wparentheses warnings, (c)
we handle an empty body specially for the sake of -Wempty-body
warnings, and (d) we call parser_compound_statement directly
because c_parser_statement_after_labels resets
parser->in_if_block.
IF_P is used to track whether there's a (possibly labeled) if statement
which is not enclosed in braces and has an else clause. This is used to
implement -Wparentheses. */
static tree
c_parser_if_body (c_parser *parser, bool *if_p,
const token_indent_info &if_tinfo)
{
tree block = c_begin_compound_stmt (flag_isoc99);
location_t body_loc = c_parser_peek_token (parser)->location;
location_t body_loc_after_labels = UNKNOWN_LOCATION;
token_indent_info body_tinfo
= get_token_indent_info (c_parser_peek_token (parser));
c_parser_all_labels (parser);
if (c_parser_next_token_is (parser, CPP_SEMICOLON))
{
location_t loc = c_parser_peek_token (parser)->location;
add_stmt (build_empty_stmt (loc));
c_parser_consume_token (parser);
if (!c_parser_next_token_is_keyword (parser, RID_ELSE))
warning_at (loc, OPT_Wempty_body,
"suggest braces around empty body in an %<if%> statement");
}
else if (c_parser_next_token_is (parser, CPP_OPEN_BRACE))
add_stmt (c_parser_compound_statement (parser));
else
{
body_loc_after_labels = c_parser_peek_token (parser)->location;
c_parser_statement_after_labels (parser, if_p);
}
token_indent_info next_tinfo
= get_token_indent_info (c_parser_peek_token (parser));
warn_for_misleading_indentation (if_tinfo, body_tinfo, next_tinfo);
if (body_loc_after_labels != UNKNOWN_LOCATION
&& next_tinfo.type != CPP_SEMICOLON)
warn_for_multistatement_macros (body_loc_after_labels, next_tinfo.location,
if_tinfo.location, RID_IF);
return c_end_compound_stmt (body_loc, block, flag_isoc99);
}
/* Parse the else body of an if statement. This is just parsing a
statement but (a) it is a block in C99, (b) we handle an empty body
specially for the sake of -Wempty-body warnings. CHAIN is a vector
of if-else-if conditions. */
static tree
c_parser_else_body (c_parser *parser, const token_indent_info &else_tinfo,
vec<tree> *chain)
{
location_t body_loc = c_parser_peek_token (parser)->location;
tree block = c_begin_compound_stmt (flag_isoc99);
token_indent_info body_tinfo
= get_token_indent_info (c_parser_peek_token (parser));
location_t body_loc_after_labels = UNKNOWN_LOCATION;
c_parser_all_labels (parser);
if (c_parser_next_token_is (parser, CPP_SEMICOLON))
{
location_t loc = c_parser_peek_token (parser)->location;
warning_at (loc,
OPT_Wempty_body,
"suggest braces around empty body in an %<else%> statement");
add_stmt (build_empty_stmt (loc));
c_parser_consume_token (parser);
}
else
{
if (!c_parser_next_token_is (parser, CPP_OPEN_BRACE))
body_loc_after_labels = c_parser_peek_token (parser)->location;
c_parser_statement_after_labels (parser, NULL, chain);
}
token_indent_info next_tinfo
= get_token_indent_info (c_parser_peek_token (parser));
warn_for_misleading_indentation (else_tinfo, body_tinfo, next_tinfo);
if (body_loc_after_labels != UNKNOWN_LOCATION
&& next_tinfo.type != CPP_SEMICOLON)
warn_for_multistatement_macros (body_loc_after_labels, next_tinfo.location,
else_tinfo.location, RID_ELSE);
return c_end_compound_stmt (body_loc, block, flag_isoc99);
}
/* We might need to reclassify any previously-lexed identifier, e.g.
when we've left a for loop with an if-statement without else in the
body - we might have used a wrong scope for the token. See PR67784. */
static void
c_parser_maybe_reclassify_token (c_parser *parser)
{
if (c_parser_next_token_is (parser, CPP_NAME))
{
c_token *token = c_parser_peek_token (parser);
if (token->id_kind != C_ID_CLASSNAME)
{
tree decl = lookup_name (token->value);
token->id_kind = C_ID_ID;
if (decl)
{
if (TREE_CODE (decl) == TYPE_DECL)
token->id_kind = C_ID_TYPENAME;
}
else if (c_dialect_objc ())
{
tree objc_interface_decl = objc_is_class_name (token->value);
/* Objective-C class names are in the same namespace as
variables and typedefs, and hence are shadowed by local
declarations. */
if (objc_interface_decl)
{
token->value = objc_interface_decl;
token->id_kind = C_ID_CLASSNAME;
}
}
}
}
}
/* Parse an if statement (C90 6.6.4, C99 6.8.4, C11 6.8.4).
if-statement:
if ( expression ) statement
if ( expression ) statement else statement
CHAIN is a vector of if-else-if conditions.
IF_P is used to track whether there's a (possibly labeled) if statement
which is not enclosed in braces and has an else clause. This is used to
implement -Wparentheses. */
static void
c_parser_if_statement (c_parser *parser, bool *if_p, vec<tree> *chain)
{
tree block;
location_t loc;
tree cond;
bool nested_if = false;
tree first_body, second_body;
bool in_if_block;
gcc_assert (c_parser_next_token_is_keyword (parser, RID_IF));
token_indent_info if_tinfo
= get_token_indent_info (c_parser_peek_token (parser));
c_parser_consume_token (parser);
block = c_begin_compound_stmt (flag_isoc99);
loc = c_parser_peek_token (parser)->location;
cond = c_parser_paren_condition (parser);
in_if_block = parser->in_if_block;
parser->in_if_block = true;
first_body = c_parser_if_body (parser, &nested_if, if_tinfo);
parser->in_if_block = in_if_block;
if (warn_duplicated_cond)
warn_duplicated_cond_add_or_warn (EXPR_LOCATION (cond), cond, &chain);
if (c_parser_next_token_is_keyword (parser, RID_ELSE))
{
token_indent_info else_tinfo
= get_token_indent_info (c_parser_peek_token (parser));
c_parser_consume_token (parser);
if (warn_duplicated_cond)
{
if (c_parser_next_token_is_keyword (parser, RID_IF)
&& chain == NULL)
{
/* We've got "if (COND) else if (COND2)". Start the
condition chain and add COND as the first element. */
chain = new vec<tree> ();
if (!CONSTANT_CLASS_P (cond) && !TREE_SIDE_EFFECTS (cond))
chain->safe_push (cond);
}
else if (!c_parser_next_token_is_keyword (parser, RID_IF))
{
/* This is if-else without subsequent if. Zap the condition
chain; we would have already warned at this point. */
delete chain;
chain = NULL;
}
}
second_body = c_parser_else_body (parser, else_tinfo, chain);
/* Set IF_P to true to indicate that this if statement has an
else clause. This may trigger the Wparentheses warning
below when we get back up to the parent if statement. */
if (if_p != NULL)
*if_p = true;
}
else
{
second_body = NULL_TREE;
/* Diagnose an ambiguous else if if-then-else is nested inside
if-then. */
if (nested_if)
warning_at (loc, OPT_Wdangling_else,
"suggest explicit braces to avoid ambiguous %<else%>");
if (warn_duplicated_cond)
{
/* This if statement does not have an else clause. We don't
need the condition chain anymore. */
delete chain;
chain = NULL;
}
}
c_finish_if_stmt (loc, cond, first_body, second_body);
add_stmt (c_end_compound_stmt (loc, block, flag_isoc99));
c_parser_maybe_reclassify_token (parser);
}
/* Parse a switch statement (C90 6.6.4, C99 6.8.4, C11 6.8.4).
switch-statement:
switch (expression) statement
*/
static void
c_parser_switch_statement (c_parser *parser, bool *if_p)
{
struct c_expr ce;
tree block, expr, body;
unsigned char save_in_statement;
location_t switch_loc = c_parser_peek_token (parser)->location;
location_t switch_cond_loc;
gcc_assert (c_parser_next_token_is_keyword (parser, RID_SWITCH));
c_parser_consume_token (parser);
block = c_begin_compound_stmt (flag_isoc99);
bool explicit_cast_p = false;
matching_parens parens;
if (parens.require_open (parser))
{
switch_cond_loc = c_parser_peek_token (parser)->location;
if (c_parser_next_token_is (parser, CPP_OPEN_PAREN)
&& c_token_starts_typename (c_parser_peek_2nd_token (parser)))
explicit_cast_p = true;
ce = c_parser_expression (parser);
ce = convert_lvalue_to_rvalue (switch_cond_loc, ce, true, true);
expr = ce.value;
/* ??? expr has no valid location? */
parens.skip_until_found_close (parser);
}
else
{
switch_cond_loc = UNKNOWN_LOCATION;
expr = error_mark_node;
ce.original_type = error_mark_node;
}
c_start_switch (switch_loc, switch_cond_loc, expr, explicit_cast_p);
save_in_statement = in_statement;
in_statement |= IN_SWITCH_STMT;
location_t loc_after_labels;
bool open_brace_p = c_parser_peek_token (parser)->type == CPP_OPEN_BRACE;
body = c_parser_c99_block_statement (parser, if_p, &loc_after_labels);
location_t next_loc = c_parser_peek_token (parser)->location;
if (!open_brace_p && c_parser_peek_token (parser)->type != CPP_SEMICOLON)
warn_for_multistatement_macros (loc_after_labels, next_loc, switch_loc,
RID_SWITCH);
c_finish_switch (body, ce.original_type);
in_statement = save_in_statement;
add_stmt (c_end_compound_stmt (switch_loc, block, flag_isoc99));
c_parser_maybe_reclassify_token (parser);
}
/* Parse a while statement (C90 6.6.5, C99 6.8.5, C11 6.8.5).
while-statement:
while (expression) statement
IF_P is used to track whether there's a (possibly labeled) if statement
which is not enclosed in braces and has an else clause. This is used to
implement -Wparentheses. */
static void
c_parser_while_statement (c_parser *parser, bool ivdep, unsigned short unroll,
bool *if_p)
{
tree block, cond, body;
unsigned char save_in_statement;
location_t loc;
gcc_assert (c_parser_next_token_is_keyword (parser, RID_WHILE));
token_indent_info while_tinfo
= get_token_indent_info (c_parser_peek_token (parser));
c_parser_consume_token (parser);
block = c_begin_compound_stmt (flag_isoc99);
loc = c_parser_peek_token (parser)->location;
cond = c_parser_paren_condition (parser);
if (ivdep && cond != error_mark_node)
cond = build3 (ANNOTATE_EXPR, TREE_TYPE (cond), cond,
build_int_cst (integer_type_node,
annot_expr_ivdep_kind),
integer_zero_node);
if (unroll && cond != error_mark_node)
cond = build3 (ANNOTATE_EXPR, TREE_TYPE (cond), cond,
build_int_cst (integer_type_node,
annot_expr_unroll_kind),
build_int_cst (integer_type_node, unroll));
save_in_statement = in_statement;
in_statement = IN_ITERATION_STMT;
token_indent_info body_tinfo
= get_token_indent_info (c_parser_peek_token (parser));
location_t loc_after_labels;
bool open_brace = c_parser_next_token_is (parser, CPP_OPEN_BRACE);
body = c_parser_c99_block_statement (parser, if_p, &loc_after_labels);
add_stmt (build_stmt (loc, WHILE_STMT, cond, body));
add_stmt (c_end_compound_stmt (loc, block, flag_isoc99));
c_parser_maybe_reclassify_token (parser);
token_indent_info next_tinfo
= get_token_indent_info (c_parser_peek_token (parser));
warn_for_misleading_indentation (while_tinfo, body_tinfo, next_tinfo);
if (next_tinfo.type != CPP_SEMICOLON && !open_brace)
warn_for_multistatement_macros (loc_after_labels, next_tinfo.location,
while_tinfo.location, RID_WHILE);
in_statement = save_in_statement;
}
/* Parse a do statement (C90 6.6.5, C99 6.8.5, C11 6.8.5).
do-statement:
do statement while ( expression ) ;
*/
static void
c_parser_do_statement (c_parser *parser, bool ivdep, unsigned short unroll)
{
tree block, cond, body;
unsigned char save_in_statement;
location_t loc;
gcc_assert (c_parser_next_token_is_keyword (parser, RID_DO));
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_SEMICOLON))
warning_at (c_parser_peek_token (parser)->location,
OPT_Wempty_body,
"suggest braces around empty body in %<do%> statement");
block = c_begin_compound_stmt (flag_isoc99);
loc = c_parser_peek_token (parser)->location;
save_in_statement = in_statement;
in_statement = IN_ITERATION_STMT;
body = c_parser_c99_block_statement (parser, NULL);
c_parser_require_keyword (parser, RID_WHILE, "expected %<while%>");
in_statement = save_in_statement;
cond = c_parser_paren_condition (parser);
if (ivdep && cond != error_mark_node)
cond = build3 (ANNOTATE_EXPR, TREE_TYPE (cond), cond,
build_int_cst (integer_type_node,
annot_expr_ivdep_kind),
integer_zero_node);
if (unroll && cond != error_mark_node)
cond = build3 (ANNOTATE_EXPR, TREE_TYPE (cond), cond,
build_int_cst (integer_type_node,
annot_expr_unroll_kind),
build_int_cst (integer_type_node, unroll));
if (!c_parser_require (parser, CPP_SEMICOLON, "expected %<;%>"))
c_parser_skip_to_end_of_block_or_statement (parser);
add_stmt (build_stmt (loc, DO_STMT, cond, body));
add_stmt (c_end_compound_stmt (loc, block, flag_isoc99));
}
/* Parse a for statement (C90 6.6.5, C99 6.8.5, C11 6.8.5).
for-statement:
for ( expression[opt] ; expression[opt] ; expression[opt] ) statement
for ( nested-declaration expression[opt] ; expression[opt] ) statement
The form with a declaration is new in C99.
??? In accordance with the old parser, the declaration may be a
nested function, which is then rejected in check_for_loop_decls,
but does it make any sense for this to be included in the grammar?
Note in particular that the nested function does not include a
trailing ';', whereas the "declaration" production includes one.
Also, can we reject bad declarations earlier and cheaper than
check_for_loop_decls?
In Objective-C, there are two additional variants:
foreach-statement:
for ( expression in expresssion ) statement
for ( declaration in expression ) statement
This is inconsistent with C, because the second variant is allowed
even if c99 is not enabled.
The rest of the comment documents these Objective-C foreach-statement.
Here is the canonical example of the first variant:
for (object in array) { do something with object }
we call the first expression ("object") the "object_expression" and
the second expression ("array") the "collection_expression".
object_expression must be an lvalue of type "id" (a generic Objective-C
object) because the loop works by assigning to object_expression the
various objects from the collection_expression. collection_expression
must evaluate to something of type "id" which responds to the method
countByEnumeratingWithState:objects:count:.
The canonical example of the second variant is:
for (id object in array) { do something with object }
which is completely equivalent to
{
id object;
for (object in array) { do something with object }
}
Note that initizializing 'object' in some way (eg, "for ((object =
xxx) in array) { do something with object }") is possibly
technically valid, but completely pointless as 'object' will be
assigned to something else as soon as the loop starts. We should
most likely reject it (TODO).
The beginning of the Objective-C foreach-statement looks exactly
like the beginning of the for-statement, and we can tell it is a
foreach-statement only because the initial declaration or
expression is terminated by 'in' instead of ';'.
IF_P is used to track whether there's a (possibly labeled) if statement
which is not enclosed in braces and has an else clause. This is used to
implement -Wparentheses. */
static void
c_parser_for_statement (c_parser *parser, bool ivdep, unsigned short unroll,
bool *if_p)
{
tree block, cond, incr, body;
unsigned char save_in_statement;
tree save_objc_foreach_break_label, save_objc_foreach_continue_label;
/* The following are only used when parsing an ObjC foreach statement. */
tree object_expression;
/* Silence the bogus uninitialized warning. */
tree collection_expression = NULL;
location_t loc = c_parser_peek_token (parser)->location;
location_t for_loc = loc;
bool is_foreach_statement = false;
gcc_assert (c_parser_next_token_is_keyword (parser, RID_FOR));
token_indent_info for_tinfo
= get_token_indent_info (c_parser_peek_token (parser));
c_parser_consume_token (parser);
/* Open a compound statement in Objective-C as well, just in case this is
as foreach expression. */
block = c_begin_compound_stmt (flag_isoc99 || c_dialect_objc ());
cond = error_mark_node;
incr = error_mark_node;
matching_parens parens;
if (parens.require_open (parser))
{
/* Parse the initialization declaration or expression. */
object_expression = error_mark_node;
parser->objc_could_be_foreach_context = c_dialect_objc ();
if (c_parser_next_token_is (parser, CPP_SEMICOLON))
{
parser->objc_could_be_foreach_context = false;
c_parser_consume_token (parser);
c_finish_expr_stmt (loc, NULL_TREE);
}
else if (c_parser_next_tokens_start_declaration (parser)
|| c_parser_nth_token_starts_std_attributes (parser, 1))
{
c_parser_declaration_or_fndef (parser, true, true, true, true, true,
&object_expression, vNULL);
parser->objc_could_be_foreach_context = false;
if (c_parser_next_token_is_keyword (parser, RID_IN))
{
c_parser_consume_token (parser);
is_foreach_statement = true;
if (check_for_loop_decls (for_loc, true) == NULL_TREE)
c_parser_error (parser, "multiple iterating variables in "
"fast enumeration");
}
else
check_for_loop_decls (for_loc, flag_isoc99);
}
else if (c_parser_next_token_is_keyword (parser, RID_EXTENSION))
{
/* __extension__ can start a declaration, but is also an
unary operator that can start an expression. Consume all
but the last of a possible series of __extension__ to
determine which. */
while (c_parser_peek_2nd_token (parser)->type == CPP_KEYWORD
&& (c_parser_peek_2nd_token (parser)->keyword
== RID_EXTENSION))
c_parser_consume_token (parser);
if (c_token_starts_declaration (c_parser_peek_2nd_token (parser))
|| c_parser_nth_token_starts_std_attributes (parser, 2))
{
int ext;
ext = disable_extension_diagnostics ();
c_parser_consume_token (parser);
c_parser_declaration_or_fndef (parser, true, true, true, true,
true, &object_expression, vNULL);
parser->objc_could_be_foreach_context = false;
restore_extension_diagnostics (ext);
if (c_parser_next_token_is_keyword (parser, RID_IN))
{
c_parser_consume_token (parser);
is_foreach_statement = true;
if (check_for_loop_decls (for_loc, true) == NULL_TREE)
c_parser_error (parser, "multiple iterating variables in "
"fast enumeration");
}
else
check_for_loop_decls (for_loc, flag_isoc99);
}
else
goto init_expr;
}
else
{
init_expr:
{
struct c_expr ce;
tree init_expression;
ce = c_parser_expression (parser);
init_expression = ce.value;
parser->objc_could_be_foreach_context = false;
if (c_parser_next_token_is_keyword (parser, RID_IN))
{
c_parser_consume_token (parser);
is_foreach_statement = true;
if (! lvalue_p (init_expression))
c_parser_error (parser, "invalid iterating variable in "
"fast enumeration");
object_expression
= c_fully_fold (init_expression, false, NULL);
}
else
{
ce = convert_lvalue_to_rvalue (loc, ce, true, false);
init_expression = ce.value;
c_finish_expr_stmt (loc, init_expression);
c_parser_skip_until_found (parser, CPP_SEMICOLON,
"expected %<;%>");
}
}
}
/* Parse the loop condition. In the case of a foreach
statement, there is no loop condition. */
gcc_assert (!parser->objc_could_be_foreach_context);
if (!is_foreach_statement)
{
if (c_parser_next_token_is (parser, CPP_SEMICOLON))
{
if (ivdep)
{
c_parser_error (parser, "missing loop condition in loop "
"with %<GCC ivdep%> pragma");
cond = error_mark_node;
}
else if (unroll)
{
c_parser_error (parser, "missing loop condition in loop "
"with %<GCC unroll%> pragma");
cond = error_mark_node;
}
else
{
c_parser_consume_token (parser);
cond = NULL_TREE;
}
}
else
{
cond = c_parser_condition (parser);
c_parser_skip_until_found (parser, CPP_SEMICOLON,
"expected %<;%>");
}
if (ivdep && cond != error_mark_node)
cond = build3 (ANNOTATE_EXPR, TREE_TYPE (cond), cond,
build_int_cst (integer_type_node,
annot_expr_ivdep_kind),
integer_zero_node);
if (unroll && cond != error_mark_node)
cond = build3 (ANNOTATE_EXPR, TREE_TYPE (cond), cond,
build_int_cst (integer_type_node,
annot_expr_unroll_kind),
build_int_cst (integer_type_node, unroll));
}
/* Parse the increment expression (the third expression in a
for-statement). In the case of a foreach-statement, this is
the expression that follows the 'in'. */
loc = c_parser_peek_token (parser)->location;
if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN))
{
if (is_foreach_statement)
{
c_parser_error (parser,
"missing collection in fast enumeration");
collection_expression = error_mark_node;
}
else
incr = c_process_expr_stmt (loc, NULL_TREE);
}
else
{
if (is_foreach_statement)
collection_expression
= c_fully_fold (c_parser_expression (parser).value, false, NULL);
else
{
struct c_expr ce = c_parser_expression (parser);
ce = convert_lvalue_to_rvalue (loc, ce, true, false);
incr = c_process_expr_stmt (loc, ce.value);
}
}
parens.skip_until_found_close (parser);
}
save_in_statement = in_statement;
if (is_foreach_statement)
{
in_statement = IN_OBJC_FOREACH;
save_objc_foreach_break_label = objc_foreach_break_label;
save_objc_foreach_continue_label = objc_foreach_continue_label;
objc_foreach_break_label = create_artificial_label (loc);
objc_foreach_continue_label = create_artificial_label (loc);
}
else
in_statement = IN_ITERATION_STMT;
token_indent_info body_tinfo
= get_token_indent_info (c_parser_peek_token (parser));
location_t loc_after_labels;
bool open_brace = c_parser_next_token_is (parser, CPP_OPEN_BRACE);
body = c_parser_c99_block_statement (parser, if_p, &loc_after_labels);
if (is_foreach_statement)
objc_finish_foreach_loop (for_loc, object_expression,
collection_expression, body,
objc_foreach_break_label,
objc_foreach_continue_label);
else
add_stmt (build_stmt (for_loc, FOR_STMT, NULL_TREE, cond, incr,
body, NULL_TREE));
add_stmt (c_end_compound_stmt (for_loc, block,
flag_isoc99 || c_dialect_objc ()));
c_parser_maybe_reclassify_token (parser);
token_indent_info next_tinfo
= get_token_indent_info (c_parser_peek_token (parser));
warn_for_misleading_indentation (for_tinfo, body_tinfo, next_tinfo);
if (next_tinfo.type != CPP_SEMICOLON && !open_brace)
warn_for_multistatement_macros (loc_after_labels, next_tinfo.location,
for_tinfo.location, RID_FOR);
in_statement = save_in_statement;
if (is_foreach_statement)
{
objc_foreach_break_label = save_objc_foreach_break_label;
objc_foreach_continue_label = save_objc_foreach_continue_label;
}
}
/* Parse an asm statement, a GNU extension. This is a full-blown asm
statement with inputs, outputs, clobbers, and volatile, inline, and goto
tags allowed.
asm-qualifier:
volatile
inline
goto
asm-qualifier-list:
asm-qualifier-list asm-qualifier
asm-qualifier
asm-statement:
asm asm-qualifier-list[opt] ( asm-argument ) ;
asm-argument:
asm-string-literal
asm-string-literal : asm-operands[opt]
asm-string-literal : asm-operands[opt] : asm-operands[opt]
asm-string-literal : asm-operands[opt] : asm-operands[opt] \
: asm-clobbers[opt]
asm-string-literal : : asm-operands[opt] : asm-clobbers[opt] \
: asm-goto-operands
The form with asm-goto-operands is valid if and only if the
asm-qualifier-list contains goto, and is the only allowed form in that case.
Duplicate asm-qualifiers are not allowed.
The :: token is considered equivalent to two consecutive : tokens. */
static tree
c_parser_asm_statement (c_parser *parser)
{
tree str, outputs, inputs, clobbers, labels, ret;
bool simple;
location_t asm_loc = c_parser_peek_token (parser)->location;
int section, nsections;
gcc_assert (c_parser_next_token_is_keyword (parser, RID_ASM));
c_parser_consume_token (parser);
/* Handle the asm-qualifier-list. */
location_t volatile_loc = UNKNOWN_LOCATION;
location_t inline_loc = UNKNOWN_LOCATION;
location_t goto_loc = UNKNOWN_LOCATION;
for (;;)
{
c_token *token = c_parser_peek_token (parser);
location_t loc = token->location;
switch (token->keyword)
{
case RID_VOLATILE:
if (volatile_loc)
{
error_at (loc, "duplicate %<asm%> qualifier %qE", token->value);
inform (volatile_loc, "first seen here");
}
else
volatile_loc = loc;
c_parser_consume_token (parser);
continue;
case RID_INLINE:
if (inline_loc)
{
error_at (loc, "duplicate %<asm%> qualifier %qE", token->value);
inform (inline_loc, "first seen here");
}
else
inline_loc = loc;
c_parser_consume_token (parser);
continue;
case RID_GOTO:
if (goto_loc)
{
error_at (loc, "duplicate %<asm%> qualifier %qE", token->value);
inform (goto_loc, "first seen here");
}
else
goto_loc = loc;
c_parser_consume_token (parser);
continue;
case RID_CONST:
case RID_RESTRICT:
error_at (loc, "%qE is not a valid %<asm%> qualifier", token->value);
c_parser_consume_token (parser);
continue;
default:
break;
}
break;
}
bool is_volatile = (volatile_loc != UNKNOWN_LOCATION);
bool is_inline = (inline_loc != UNKNOWN_LOCATION);
bool is_goto = (goto_loc != UNKNOWN_LOCATION);
ret = NULL;
matching_parens parens;
if (!parens.require_open (parser))
goto error;
str = c_parser_asm_string_literal (parser);
if (str == NULL_TREE)
goto error_close_paren;
simple = true;
outputs = NULL_TREE;
inputs = NULL_TREE;
clobbers = NULL_TREE;
labels = NULL_TREE;
if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN) && !is_goto)
goto done_asm;
/* Parse each colon-delimited section of operands. */
nsections = 3 + is_goto;
for (section = 0; section < nsections; ++section)
{
if (c_parser_next_token_is (parser, CPP_SCOPE))
{
++section;
if (section == nsections)
{
c_parser_error (parser, "expected %<)%>");
goto error_close_paren;
}
c_parser_consume_token (parser);
}
else if (!c_parser_require (parser, CPP_COLON,
is_goto
? G_("expected %<:%>")
: G_("expected %<:%> or %<)%>"),
UNKNOWN_LOCATION, is_goto))
goto error_close_paren;
/* Once past any colon, we're no longer a simple asm. */
simple = false;
if ((!c_parser_next_token_is (parser, CPP_COLON)
&& !c_parser_next_token_is (parser, CPP_SCOPE)
&& !c_parser_next_token_is (parser, CPP_CLOSE_PAREN))
|| section == 3)
switch (section)
{
case 0:
/* For asm goto, we don't allow output operands, but reserve
the slot for a future extension that does allow them. */
if (!is_goto)
outputs = c_parser_asm_operands (parser);
break;
case 1:
inputs = c_parser_asm_operands (parser);
break;
case 2:
clobbers = c_parser_asm_clobbers (parser);
break;
case 3:
labels = c_parser_asm_goto_operands (parser);
break;
default:
gcc_unreachable ();
}
if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN) && !is_goto)
goto done_asm;
}
done_asm:
if (!parens.require_close (parser))
{
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL);
goto error;
}
if (!c_parser_require (parser, CPP_SEMICOLON, "expected %<;%>"))
c_parser_skip_to_end_of_block_or_statement (parser);
ret = build_asm_stmt (is_volatile,
build_asm_expr (asm_loc, str, outputs, inputs,
clobbers, labels, simple, is_inline));
error:
return ret;
error_close_paren:
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL);
goto error;
}
/* Parse asm operands, a GNU extension.
asm-operands:
asm-operand
asm-operands , asm-operand
asm-operand:
asm-string-literal ( expression )
[ identifier ] asm-string-literal ( expression )
*/
static tree
c_parser_asm_operands (c_parser *parser)
{
tree list = NULL_TREE;
while (true)
{
tree name, str;
struct c_expr expr;
if (c_parser_next_token_is (parser, CPP_OPEN_SQUARE))
{
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_NAME))
{
tree id = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
name = build_string (IDENTIFIER_LENGTH (id),
IDENTIFIER_POINTER (id));
}
else
{
c_parser_error (parser, "expected identifier");
c_parser_skip_until_found (parser, CPP_CLOSE_SQUARE, NULL);
return NULL_TREE;
}
c_parser_skip_until_found (parser, CPP_CLOSE_SQUARE,
"expected %<]%>");
}
else
name = NULL_TREE;
str = c_parser_asm_string_literal (parser);
if (str == NULL_TREE)
return NULL_TREE;
matching_parens parens;
if (!parens.require_open (parser))
return NULL_TREE;
expr = c_parser_expression (parser);
mark_exp_read (expr.value);
if (!parens.require_close (parser))
{
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL);
return NULL_TREE;
}
list = chainon (list, build_tree_list (build_tree_list (name, str),
expr.value));
if (c_parser_next_token_is (parser, CPP_COMMA))
c_parser_consume_token (parser);
else
break;
}
return list;
}
/* Parse asm clobbers, a GNU extension.
asm-clobbers:
asm-string-literal
asm-clobbers , asm-string-literal
*/
static tree
c_parser_asm_clobbers (c_parser *parser)
{
tree list = NULL_TREE;
while (true)
{
tree str = c_parser_asm_string_literal (parser);
if (str)
list = tree_cons (NULL_TREE, str, list);
else
return NULL_TREE;
if (c_parser_next_token_is (parser, CPP_COMMA))
c_parser_consume_token (parser);
else
break;
}
return list;
}
/* Parse asm goto labels, a GNU extension.
asm-goto-operands:
identifier
asm-goto-operands , identifier
*/
static tree
c_parser_asm_goto_operands (c_parser *parser)
{
tree list = NULL_TREE;
while (true)
{
tree name, label;
if (c_parser_next_token_is (parser, CPP_NAME))
{
c_token *tok = c_parser_peek_token (parser);
name = tok->value;
label = lookup_label_for_goto (tok->location, name);
c_parser_consume_token (parser);
TREE_USED (label) = 1;
}
else
{
c_parser_error (parser, "expected identifier");
return NULL_TREE;
}
name = build_string (IDENTIFIER_LENGTH (name),
IDENTIFIER_POINTER (name));
list = tree_cons (name, label, list);
if (c_parser_next_token_is (parser, CPP_COMMA))
c_parser_consume_token (parser);
else
return nreverse (list);
}
}
/* Parse a possibly concatenated sequence of string literals.
TRANSLATE says whether to translate them to the execution character
set; WIDE_OK says whether any kind of prefixed string literal is
permitted in this context. This code is based on that in
lex_string. */
struct c_expr
c_parser_string_literal (c_parser *parser, bool translate, bool wide_ok)
{
struct c_expr ret;
size_t count;
struct obstack str_ob;
struct obstack loc_ob;
cpp_string str, istr, *strs;
c_token *tok;
location_t loc, last_tok_loc;
enum cpp_ttype type;
tree value, string_tree;
tok = c_parser_peek_token (parser);
loc = tok->location;
last_tok_loc = linemap_resolve_location (line_table, loc,
LRK_MACRO_DEFINITION_LOCATION,
NULL);
type = tok->type;
switch (type)
{
case CPP_STRING:
case CPP_WSTRING:
case CPP_STRING16:
case CPP_STRING32:
case CPP_UTF8STRING:
string_tree = tok->value;
break;
default:
c_parser_error (parser, "expected string literal");
ret.set_error ();
ret.value = NULL_TREE;
ret.original_code = ERROR_MARK;
ret.original_type = NULL_TREE;
return ret;
}
/* Try to avoid the overhead of creating and destroying an obstack
for the common case of just one string. */
switch (c_parser_peek_2nd_token (parser)->type)
{
default:
c_parser_consume_token (parser);
str.text = (const unsigned char *) TREE_STRING_POINTER (string_tree);
str.len = TREE_STRING_LENGTH (string_tree);
count = 1;
strs = &str;
break;
case CPP_STRING:
case CPP_WSTRING:
case CPP_STRING16:
case CPP_STRING32:
case CPP_UTF8STRING:
gcc_obstack_init (&str_ob);
gcc_obstack_init (&loc_ob);
count = 0;
do
{
c_parser_consume_token (parser);
count++;
str.text = (const unsigned char *) TREE_STRING_POINTER (string_tree);
str.len = TREE_STRING_LENGTH (string_tree);
if (type != tok->type)
{
if (type == CPP_STRING)
type = tok->type;
else if (tok->type != CPP_STRING)
error ("unsupported non-standard concatenation "
"of string literals");
}
obstack_grow (&str_ob, &str, sizeof (cpp_string));
obstack_grow (&loc_ob, &last_tok_loc, sizeof (location_t));
tok = c_parser_peek_token (parser);
string_tree = tok->value;
last_tok_loc
= linemap_resolve_location (line_table, tok->location,
LRK_MACRO_DEFINITION_LOCATION, NULL);
}
while (tok->type == CPP_STRING
|| tok->type == CPP_WSTRING
|| tok->type == CPP_STRING16
|| tok->type == CPP_STRING32
|| tok->type == CPP_UTF8STRING);
strs = (cpp_string *) obstack_finish (&str_ob);
}
if (count > 1 && !in_system_header_at (input_location))
warning (OPT_Wtraditional,
"traditional C rejects string constant concatenation");
if ((type == CPP_STRING || wide_ok)
&& ((translate
? cpp_interpret_string : cpp_interpret_string_notranslate)
(parse_in, strs, count, &istr, type)))
{
value = build_string (istr.len, (const char *) istr.text);
free (CONST_CAST (unsigned char *, istr.text));
if (count > 1)
{
location_t *locs = (location_t *) obstack_finish (&loc_ob);
gcc_assert (g_string_concat_db);
g_string_concat_db->record_string_concatenation (count, locs);
}
}
else
{
if (type != CPP_STRING && !wide_ok)
{
error_at (loc, "a wide string is invalid in this context");
type = CPP_STRING;
}
/* Callers cannot generally handle error_mark_node in this
context, so return the empty string instead. An error has
been issued, either above or from cpp_interpret_string. */
switch (type)
{
default:
case CPP_STRING:
case CPP_UTF8STRING:
value = build_string (1, "");
break;
case CPP_STRING16:
value = build_string (TYPE_PRECISION (char16_type_node)
/ TYPE_PRECISION (char_type_node),
"\0"); /* char16_t is 16 bits */
break;
case CPP_STRING32:
value = build_string (TYPE_PRECISION (char32_type_node)
/ TYPE_PRECISION (char_type_node),
"\0\0\0"); /* char32_t is 32 bits */
break;
case CPP_WSTRING:
value = build_string (TYPE_PRECISION (wchar_type_node)
/ TYPE_PRECISION (char_type_node),
"\0\0\0"); /* widest supported wchar_t
is 32 bits */
break;
}
}
switch (type)
{
default:
case CPP_STRING:
case CPP_UTF8STRING:
TREE_TYPE (value) = char_array_type_node;
break;
case CPP_STRING16:
TREE_TYPE (value) = char16_array_type_node;
break;
case CPP_STRING32:
TREE_TYPE (value) = char32_array_type_node;
break;
case CPP_WSTRING:
TREE_TYPE (value) = wchar_array_type_node;
}
value = fix_string_type (value);
if (count > 1)
{
obstack_free (&str_ob, 0);
obstack_free (&loc_ob, 0);
}
ret.value = value;
ret.original_code = STRING_CST;
ret.original_type = NULL_TREE;
set_c_expr_source_range (&ret, get_range_from_loc (line_table, loc));
parser->seen_string_literal = true;
return ret;
}
/* Parse an expression other than a compound expression; that is, an
assignment expression (C90 6.3.16, C99 6.5.16, C11 6.5.16). If
AFTER is not NULL then it is an Objective-C message expression which
is the primary-expression starting the expression as an initializer.
assignment-expression:
conditional-expression
unary-expression assignment-operator assignment-expression
assignment-operator: one of
= *= /= %= += -= <<= >>= &= ^= |=
In GNU C we accept any conditional expression on the LHS and
diagnose the invalid lvalue rather than producing a syntax
error. */
static struct c_expr
c_parser_expr_no_commas (c_parser *parser, struct c_expr *after,
tree omp_atomic_lhs)
{
struct c_expr lhs, rhs, ret;
enum tree_code code;
location_t op_location, exp_location;
bool save_in_omp_for = c_in_omp_for;
c_in_omp_for = false;
gcc_assert (!after || c_dialect_objc ());
lhs = c_parser_conditional_expression (parser, after, omp_atomic_lhs);
op_location = c_parser_peek_token (parser)->location;
switch (c_parser_peek_token (parser)->type)
{
case CPP_EQ:
code = NOP_EXPR;
break;
case CPP_MULT_EQ:
code = MULT_EXPR;
break;
case CPP_DIV_EQ:
code = TRUNC_DIV_EXPR;
break;
case CPP_MOD_EQ:
code = TRUNC_MOD_EXPR;
break;
case CPP_PLUS_EQ:
code = PLUS_EXPR;
break;
case CPP_MINUS_EQ:
code = MINUS_EXPR;
break;
case CPP_LSHIFT_EQ:
code = LSHIFT_EXPR;
break;
case CPP_RSHIFT_EQ:
code = RSHIFT_EXPR;
break;
case CPP_AND_EQ:
code = BIT_AND_EXPR;
break;
case CPP_XOR_EQ:
code = BIT_XOR_EXPR;
break;
case CPP_OR_EQ:
code = BIT_IOR_EXPR;
break;
default:
c_in_omp_for = save_in_omp_for;
return lhs;
}
c_parser_consume_token (parser);
exp_location = c_parser_peek_token (parser)->location;
rhs = c_parser_expr_no_commas (parser, NULL);
rhs = convert_lvalue_to_rvalue (exp_location, rhs, true, true);
ret.value = build_modify_expr (op_location, lhs.value, lhs.original_type,
code, exp_location, rhs.value,
rhs.original_type);
set_c_expr_source_range (&ret, lhs.get_start (), rhs.get_finish ());
if (code == NOP_EXPR)
ret.original_code = MODIFY_EXPR;
else
{
TREE_NO_WARNING (ret.value) = 1;
ret.original_code = ERROR_MARK;
}
ret.original_type = NULL;
c_in_omp_for = save_in_omp_for;
return ret;
}
/* Parse a conditional expression (C90 6.3.15, C99 6.5.15, C11 6.5.15). If
AFTER is not NULL then it is an Objective-C message expression which is
the primary-expression starting the expression as an initializer.
conditional-expression:
logical-OR-expression
logical-OR-expression ? expression : conditional-expression
GNU extensions:
conditional-expression:
logical-OR-expression ? : conditional-expression
*/
static struct c_expr
c_parser_conditional_expression (c_parser *parser, struct c_expr *after,
tree omp_atomic_lhs)
{
struct c_expr cond, exp1, exp2, ret;
location_t start, cond_loc, colon_loc;
gcc_assert (!after || c_dialect_objc ());
cond = c_parser_binary_expression (parser, after, omp_atomic_lhs);
if (c_parser_next_token_is_not (parser, CPP_QUERY))
return cond;
if (cond.value != error_mark_node)
start = cond.get_start ();
else
start = UNKNOWN_LOCATION;
cond_loc = c_parser_peek_token (parser)->location;
cond = convert_lvalue_to_rvalue (cond_loc, cond, true, true);
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_COLON))
{
tree eptype = NULL_TREE;
location_t middle_loc = c_parser_peek_token (parser)->location;
pedwarn (middle_loc, OPT_Wpedantic,
"ISO C forbids omitting the middle term of a %<?:%> expression");
if (TREE_CODE (cond.value) == EXCESS_PRECISION_EXPR)
{
eptype = TREE_TYPE (cond.value);
cond.value = TREE_OPERAND (cond.value, 0);
}
tree e = cond.value;
while (TREE_CODE (e) == COMPOUND_EXPR)
e = TREE_OPERAND (e, 1);
warn_for_omitted_condop (middle_loc, e);
/* Make sure first operand is calculated only once. */
exp1.value = save_expr (default_conversion (cond.value));
if (eptype)
exp1.value = build1 (EXCESS_PRECISION_EXPR, eptype, exp1.value);
exp1.original_type = NULL;
exp1.src_range = cond.src_range;
cond.value = c_objc_common_truthvalue_conversion (cond_loc, exp1.value);
c_inhibit_evaluation_warnings += cond.value == truthvalue_true_node;
}
else
{
cond.value
= c_objc_common_truthvalue_conversion
(cond_loc, default_conversion (cond.value));
c_inhibit_evaluation_warnings += cond.value == truthvalue_false_node;
exp1 = c_parser_expression_conv (parser);
mark_exp_read (exp1.value);
c_inhibit_evaluation_warnings +=
((cond.value == truthvalue_true_node)
- (cond.value == truthvalue_false_node));
}
colon_loc = c_parser_peek_token (parser)->location;
if (!c_parser_require (parser, CPP_COLON, "expected %<:%>"))
{
c_inhibit_evaluation_warnings -= cond.value == truthvalue_true_node;
ret.set_error ();
ret.original_code = ERROR_MARK;
ret.original_type = NULL;
return ret;
}
{
location_t exp2_loc = c_parser_peek_token (parser)->location;
exp2 = c_parser_conditional_expression (parser, NULL, NULL_TREE);
exp2 = convert_lvalue_to_rvalue (exp2_loc, exp2, true, true);
}
c_inhibit_evaluation_warnings -= cond.value == truthvalue_true_node;
location_t loc1 = make_location (exp1.get_start (), exp1.src_range);
location_t loc2 = make_location (exp2.get_start (), exp2.src_range);
ret.value = build_conditional_expr (colon_loc, cond.value,
cond.original_code == C_MAYBE_CONST_EXPR,
exp1.value, exp1.original_type, loc1,
exp2.value, exp2.original_type, loc2);
ret.original_code = ERROR_MARK;
if (exp1.value == error_mark_node || exp2.value == error_mark_node)
ret.original_type = NULL;
else
{
tree t1, t2;
/* If both sides are enum type, the default conversion will have
made the type of the result be an integer type. We want to
remember the enum types we started with. */
t1 = exp1.original_type ? exp1.original_type : TREE_TYPE (exp1.value);
t2 = exp2.original_type ? exp2.original_type : TREE_TYPE (exp2.value);
ret.original_type = ((t1 != error_mark_node
&& t2 != error_mark_node
&& (TYPE_MAIN_VARIANT (t1)
== TYPE_MAIN_VARIANT (t2)))
? t1
: NULL);
}
set_c_expr_source_range (&ret, start, exp2.get_finish ());
return ret;
}
/* Parse a binary expression; that is, a logical-OR-expression (C90
6.3.5-6.3.14, C99 6.5.5-6.5.14, C11 6.5.5-6.5.14). If AFTER is not
NULL then it is an Objective-C message expression which is the
primary-expression starting the expression as an initializer.
OMP_ATOMIC_LHS is NULL, unless parsing OpenMP #pragma omp atomic,
when it should be the unfolded lhs. In a valid OpenMP source,
one of the operands of the toplevel binary expression must be equal
to it. In that case, just return a build2 created binary operation
rather than result of parser_build_binary_op.
multiplicative-expression:
cast-expression
multiplicative-expression * cast-expression
multiplicative-expression / cast-expression
multiplicative-expression % cast-expression
additive-expression:
multiplicative-expression
additive-expression + multiplicative-expression
additive-expression - multiplicative-expression
shift-expression:
additive-expression
shift-expression << additive-expression
shift-expression >> additive-expression
relational-expression:
shift-expression
relational-expression < shift-expression
relational-expression > shift-expression
relational-expression <= shift-expression
relational-expression >= shift-expression
equality-expression:
relational-expression
equality-expression == relational-expression
equality-expression != relational-expression
AND-expression:
equality-expression
AND-expression & equality-expression
exclusive-OR-expression:
AND-expression
exclusive-OR-expression ^ AND-expression
inclusive-OR-expression:
exclusive-OR-expression
inclusive-OR-expression | exclusive-OR-expression
logical-AND-expression:
inclusive-OR-expression
logical-AND-expression && inclusive-OR-expression
logical-OR-expression:
logical-AND-expression
logical-OR-expression || logical-AND-expression
*/
static struct c_expr
c_parser_binary_expression (c_parser *parser, struct c_expr *after,
tree omp_atomic_lhs)
{
/* A binary expression is parsed using operator-precedence parsing,
with the operands being cast expressions. All the binary
operators are left-associative. Thus a binary expression is of
form:
E0 op1 E1 op2 E2 ...
which we represent on a stack. On the stack, the precedence
levels are strictly increasing. When a new operator is
encountered of higher precedence than that at the top of the
stack, it is pushed; its LHS is the top expression, and its RHS
is everything parsed until it is popped. When a new operator is
encountered with precedence less than or equal to that at the top
of the stack, triples E[i-1] op[i] E[i] are popped and replaced
by the result of the operation until the operator at the top of
the stack has lower precedence than the new operator or there is
only one element on the stack; then the top expression is the LHS
of the new operator. In the case of logical AND and OR
expressions, we also need to adjust c_inhibit_evaluation_warnings
as appropriate when the operators are pushed and popped. */
struct {
/* The expression at this stack level. */
struct c_expr expr;
/* The precedence of the operator on its left, PREC_NONE at the
bottom of the stack. */
enum c_parser_prec prec;
/* The operation on its left. */
enum tree_code op;
/* The source location of this operation. */
location_t loc;
/* The sizeof argument if expr.original_code == SIZEOF_EXPR. */
tree sizeof_arg;
} stack[NUM_PRECS];
int sp;
/* Location of the binary operator. */
location_t binary_loc = UNKNOWN_LOCATION; /* Quiet warning. */
#define POP \
do { \
switch (stack[sp].op) \
{ \
case TRUTH_ANDIF_EXPR: \
c_inhibit_evaluation_warnings -= (stack[sp - 1].expr.value \
== truthvalue_false_node); \
break; \
case TRUTH_ORIF_EXPR: \
c_inhibit_evaluation_warnings -= (stack[sp - 1].expr.value \
== truthvalue_true_node); \
break; \
case TRUNC_DIV_EXPR: \
if (stack[sp - 1].expr.original_code == SIZEOF_EXPR \
&& stack[sp].expr.original_code == SIZEOF_EXPR) \
{ \
tree type0 = stack[sp - 1].sizeof_arg; \
tree type1 = stack[sp].sizeof_arg; \
tree first_arg = type0; \
if (!TYPE_P (type0)) \
type0 = TREE_TYPE (type0); \
if (!TYPE_P (type1)) \
type1 = TREE_TYPE (type1); \
if (POINTER_TYPE_P (type0) \
&& comptypes (TREE_TYPE (type0), type1) \
&& !(TREE_CODE (first_arg) == PARM_DECL \
&& C_ARRAY_PARAMETER (first_arg) \
&& warn_sizeof_array_argument)) \
{ \
auto_diagnostic_group d; \
if (warning_at (stack[sp].loc, OPT_Wsizeof_pointer_div, \
"division %<sizeof (%T) / sizeof (%T)%> " \
"does not compute the number of array " \
"elements", \
type0, type1)) \
if (DECL_P (first_arg)) \
inform (DECL_SOURCE_LOCATION (first_arg), \
"first %<sizeof%> operand was declared here"); \
} \
} \
break; \
default: \
break; \
} \
stack[sp - 1].expr \
= convert_lvalue_to_rvalue (stack[sp - 1].loc, \
stack[sp - 1].expr, true, true); \
stack[sp].expr \
= convert_lvalue_to_rvalue (stack[sp].loc, \
stack[sp].expr, true, true); \
if (__builtin_expect (omp_atomic_lhs != NULL_TREE, 0) && sp == 1 \
&& c_parser_peek_token (parser)->type == CPP_SEMICOLON \
&& ((1 << stack[sp].prec) \
& ((1 << PREC_BITOR) | (1 << PREC_BITXOR) | (1 << PREC_BITAND) \
| (1 << PREC_SHIFT) | (1 << PREC_ADD) | (1 << PREC_MULT))) \
&& stack[sp].op != TRUNC_MOD_EXPR \
&& stack[0].expr.value != error_mark_node \
&& stack[1].expr.value != error_mark_node \
&& (c_tree_equal (stack[0].expr.value, omp_atomic_lhs) \
|| c_tree_equal (stack[1].expr.value, omp_atomic_lhs))) \
stack[0].expr.value \
= build2 (stack[1].op, TREE_TYPE (stack[0].expr.value), \
stack[0].expr.value, stack[1].expr.value); \
else \
stack[sp - 1].expr = parser_build_binary_op (stack[sp].loc, \
stack[sp].op, \
stack[sp - 1].expr, \
stack[sp].expr); \
sp--; \
} while (0)
gcc_assert (!after || c_dialect_objc ());
stack[0].loc = c_parser_peek_token (parser)->location;
stack[0].expr = c_parser_cast_expression (parser, after);
stack[0].prec = PREC_NONE;
stack[0].sizeof_arg = c_last_sizeof_arg;
sp = 0;
while (true)
{
enum c_parser_prec oprec;
enum tree_code ocode;
source_range src_range;
if (parser->error)
goto out;
switch (c_parser_peek_token (parser)->type)
{
case CPP_MULT:
oprec = PREC_MULT;
ocode = MULT_EXPR;
break;
case CPP_DIV:
oprec = PREC_MULT;
ocode = TRUNC_DIV_EXPR;
break;
case CPP_MOD:
oprec = PREC_MULT;
ocode = TRUNC_MOD_EXPR;
break;
case CPP_PLUS:
oprec = PREC_ADD;
ocode = PLUS_EXPR;
break;
case CPP_MINUS:
oprec = PREC_ADD;
ocode = MINUS_EXPR;
break;
case CPP_LSHIFT:
oprec = PREC_SHIFT;
ocode = LSHIFT_EXPR;
break;
case CPP_RSHIFT:
oprec = PREC_SHIFT;
ocode = RSHIFT_EXPR;
break;
case CPP_LESS:
oprec = PREC_REL;
ocode = LT_EXPR;
break;
case CPP_GREATER:
oprec = PREC_REL;
ocode = GT_EXPR;
break;
case CPP_LESS_EQ:
oprec = PREC_REL;
ocode = LE_EXPR;
break;
case CPP_GREATER_EQ:
oprec = PREC_REL;
ocode = GE_EXPR;
break;
case CPP_EQ_EQ:
oprec = PREC_EQ;
ocode = EQ_EXPR;
break;
case CPP_NOT_EQ:
oprec = PREC_EQ;
ocode = NE_EXPR;
break;
case CPP_AND:
oprec = PREC_BITAND;
ocode = BIT_AND_EXPR;
break;
case CPP_XOR:
oprec = PREC_BITXOR;
ocode = BIT_XOR_EXPR;
break;
case CPP_OR:
oprec = PREC_BITOR;
ocode = BIT_IOR_EXPR;
break;
case CPP_AND_AND:
oprec = PREC_LOGAND;
ocode = TRUTH_ANDIF_EXPR;
break;
case CPP_OR_OR:
oprec = PREC_LOGOR;
ocode = TRUTH_ORIF_EXPR;
break;
default:
/* Not a binary operator, so end of the binary
expression. */
goto out;
}
binary_loc = c_parser_peek_token (parser)->location;
while (oprec <= stack[sp].prec)
POP;
c_parser_consume_token (parser);
switch (ocode)
{
case TRUTH_ANDIF_EXPR:
src_range = stack[sp].expr.src_range;
stack[sp].expr
= convert_lvalue_to_rvalue (stack[sp].loc,
stack[sp].expr, true, true);
stack[sp].expr.value = c_objc_common_truthvalue_conversion
(stack[sp].loc, default_conversion (stack[sp].expr.value));
c_inhibit_evaluation_warnings += (stack[sp].expr.value
== truthvalue_false_node);
set_c_expr_source_range (&stack[sp].expr, src_range);
break;
case TRUTH_ORIF_EXPR:
src_range = stack[sp].expr.src_range;
stack[sp].expr
= convert_lvalue_to_rvalue (stack[sp].loc,
stack[sp].expr, true, true);
stack[sp].expr.value = c_objc_common_truthvalue_conversion
(stack[sp].loc, default_conversion (stack[sp].expr.value));
c_inhibit_evaluation_warnings += (stack[sp].expr.value
== truthvalue_true_node);
set_c_expr_source_range (&stack[sp].expr, src_range);
break;
default:
break;
}
sp++;
stack[sp].loc = binary_loc;
stack[sp].expr = c_parser_cast_expression (parser, NULL);
stack[sp].prec = oprec;
stack[sp].op = ocode;
stack[sp].sizeof_arg = c_last_sizeof_arg;
}
out:
while (sp > 0)
POP;
return stack[0].expr;
#undef POP
}
/* Parse a cast expression (C90 6.3.4, C99 6.5.4, C11 6.5.4). If AFTER
is not NULL then it is an Objective-C message expression which is the
primary-expression starting the expression as an initializer.
cast-expression:
unary-expression
( type-name ) unary-expression
*/
static struct c_expr
c_parser_cast_expression (c_parser *parser, struct c_expr *after)
{
location_t cast_loc = c_parser_peek_token (parser)->location;
gcc_assert (!after || c_dialect_objc ());
if (after)
return c_parser_postfix_expression_after_primary (parser,
cast_loc, *after);
/* If the expression begins with a parenthesized type name, it may
be either a cast or a compound literal; we need to see whether
the next character is '{' to tell the difference. If not, it is
an unary expression. Full detection of unknown typenames here
would require a 3-token lookahead. */
if (c_parser_next_token_is (parser, CPP_OPEN_PAREN)
&& c_token_starts_typename (c_parser_peek_2nd_token (parser)))
{
struct c_type_name *type_name;
struct c_expr ret;
struct c_expr expr;
matching_parens parens;
parens.consume_open (parser);
type_name = c_parser_type_name (parser, true);
parens.skip_until_found_close (parser);
if (type_name == NULL)
{
ret.set_error ();
ret.original_code = ERROR_MARK;
ret.original_type = NULL;
return ret;
}
/* Save casted types in the function's used types hash table. */
used_types_insert (type_name->specs->type);
if (c_parser_next_token_is (parser, CPP_OPEN_BRACE))
return c_parser_postfix_expression_after_paren_type (parser, type_name,
cast_loc);
if (type_name->specs->alignas_p)
error_at (type_name->specs->locations[cdw_alignas],
"alignment specified for type name in cast");
{
location_t expr_loc = c_parser_peek_token (parser)->location;
expr = c_parser_cast_expression (parser, NULL);
expr = convert_lvalue_to_rvalue (expr_loc, expr, true, true);
}
ret.value = c_cast_expr (cast_loc, type_name, expr.value);
if (ret.value && expr.value)
set_c_expr_source_range (&ret, cast_loc, expr.get_finish ());
ret.original_code = ERROR_MARK;
ret.original_type = NULL;
return ret;
}
else
return c_parser_unary_expression (parser);
}
/* Parse an unary expression (C90 6.3.3, C99 6.5.3, C11 6.5.3).
unary-expression:
postfix-expression
++ unary-expression
-- unary-expression
unary-operator cast-expression
sizeof unary-expression
sizeof ( type-name )
unary-operator: one of
& * + - ~ !
GNU extensions:
unary-expression:
__alignof__ unary-expression
__alignof__ ( type-name )
&& identifier
(C11 permits _Alignof with type names only.)
unary-operator: one of
__extension__ __real__ __imag__
Transactional Memory:
unary-expression:
transaction-expression
In addition, the GNU syntax treats ++ and -- as unary operators, so
they may be applied to cast expressions with errors for non-lvalues
given later. */
static struct c_expr
c_parser_unary_expression (c_parser *parser)
{
int ext;
struct c_expr ret, op;
location_t op_loc = c_parser_peek_token (parser)->location;
location_t exp_loc;
location_t finish;
ret.original_code = ERROR_MARK;
ret.original_type = NULL;
switch (c_parser_peek_token (parser)->type)
{
case CPP_PLUS_PLUS:
c_parser_consume_token (parser);
exp_loc = c_parser_peek_token (parser)->location;
op = c_parser_cast_expression (parser, NULL);
op = default_function_array_read_conversion (exp_loc, op);
return parser_build_unary_op (op_loc, PREINCREMENT_EXPR, op);
case CPP_MINUS_MINUS:
c_parser_consume_token (parser);
exp_loc = c_parser_peek_token (parser)->location;
op = c_parser_cast_expression (parser, NULL);
op = default_function_array_read_conversion (exp_loc, op);
return parser_build_unary_op (op_loc, PREDECREMENT_EXPR, op);
case CPP_AND:
c_parser_consume_token (parser);
op = c_parser_cast_expression (parser, NULL);
mark_exp_read (op.value);
return parser_build_unary_op (op_loc, ADDR_EXPR, op);
case CPP_MULT:
{
c_parser_consume_token (parser);
exp_loc = c_parser_peek_token (parser)->location;
op = c_parser_cast_expression (parser, NULL);
finish = op.get_finish ();
op = convert_lvalue_to_rvalue (exp_loc, op, true, true);
location_t combined_loc = make_location (op_loc, op_loc, finish);
ret.value = build_indirect_ref (combined_loc, op.value, RO_UNARY_STAR);
ret.src_range.m_start = op_loc;
ret.src_range.m_finish = finish;
return ret;
}
case CPP_PLUS:
if (!c_dialect_objc () && !in_system_header_at (input_location))
warning_at (op_loc,
OPT_Wtraditional,
"traditional C rejects the unary plus operator");
c_parser_consume_token (parser);
exp_loc = c_parser_peek_token (parser)->location;
op = c_parser_cast_expression (parser, NULL);
op = convert_lvalue_to_rvalue (exp_loc, op, true, true);
return parser_build_unary_op (op_loc, CONVERT_EXPR, op);
case CPP_MINUS:
c_parser_consume_token (parser);
exp_loc = c_parser_peek_token (parser)->location;
op = c_parser_cast_expression (parser, NULL);
op = convert_lvalue_to_rvalue (exp_loc, op, true, true);
return parser_build_unary_op (op_loc, NEGATE_EXPR, op);
case CPP_COMPL:
c_parser_consume_token (parser);
exp_loc = c_parser_peek_token (parser)->location;
op = c_parser_cast_expression (parser, NULL);
op = convert_lvalue_to_rvalue (exp_loc, op, true, true);
return parser_build_unary_op (op_loc, BIT_NOT_EXPR, op);
case CPP_NOT:
c_parser_consume_token (parser);
exp_loc = c_parser_peek_token (parser)->location;
op = c_parser_cast_expression (parser, NULL);
op = convert_lvalue_to_rvalue (exp_loc, op, true, true);
return parser_build_unary_op (op_loc, TRUTH_NOT_EXPR, op);
case CPP_AND_AND:
/* Refer to the address of a label as a pointer. */
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_NAME))
{
ret.value = finish_label_address_expr
(c_parser_peek_token (parser)->value, op_loc);
set_c_expr_source_range (&ret, op_loc,
c_parser_peek_token (parser)->get_finish ());
c_parser_consume_token (parser);
}
else
{
c_parser_error (parser, "expected identifier");
ret.set_error ();
}
return ret;
case CPP_KEYWORD:
switch (c_parser_peek_token (parser)->keyword)
{
case RID_SIZEOF:
return c_parser_sizeof_expression (parser);
case RID_ALIGNOF:
return c_parser_alignof_expression (parser);
case RID_BUILTIN_HAS_ATTRIBUTE:
return c_parser_has_attribute_expression (parser);
case RID_EXTENSION:
c_parser_consume_token (parser);
ext = disable_extension_diagnostics ();
ret = c_parser_cast_expression (parser, NULL);
restore_extension_diagnostics (ext);
return ret;
case RID_REALPART:
c_parser_consume_token (parser);
exp_loc = c_parser_peek_token (parser)->location;
op = c_parser_cast_expression (parser, NULL);
op = default_function_array_conversion (exp_loc, op);
return parser_build_unary_op (op_loc, REALPART_EXPR, op);
case RID_IMAGPART:
c_parser_consume_token (parser);
exp_loc = c_parser_peek_token (parser)->location;
op = c_parser_cast_expression (parser, NULL);
op = default_function_array_conversion (exp_loc, op);
return parser_build_unary_op (op_loc, IMAGPART_EXPR, op);
case RID_TRANSACTION_ATOMIC:
case RID_TRANSACTION_RELAXED:
return c_parser_transaction_expression (parser,
c_parser_peek_token (parser)->keyword);
default:
return c_parser_postfix_expression (parser);
}
default:
return c_parser_postfix_expression (parser);
}
}
/* Parse a sizeof expression. */
static struct c_expr
c_parser_sizeof_expression (c_parser *parser)
{
struct c_expr expr;
struct c_expr result;
location_t expr_loc;
gcc_assert (c_parser_next_token_is_keyword (parser, RID_SIZEOF));
location_t start;
location_t finish = UNKNOWN_LOCATION;
start = c_parser_peek_token (parser)->location;
c_parser_consume_token (parser);
c_inhibit_evaluation_warnings++;
in_sizeof++;
if (c_parser_next_token_is (parser, CPP_OPEN_PAREN)
&& c_token_starts_typename (c_parser_peek_2nd_token (parser)))
{
/* Either sizeof ( type-name ) or sizeof unary-expression
starting with a compound literal. */
struct c_type_name *type_name;
matching_parens parens;
parens.consume_open (parser);
expr_loc = c_parser_peek_token (parser)->location;
type_name = c_parser_type_name (parser, true);
parens.skip_until_found_close (parser);
finish = parser->tokens_buf[0].location;
if (type_name == NULL)
{
struct c_expr ret;
c_inhibit_evaluation_warnings--;
in_sizeof--;
ret.set_error ();
ret.original_code = ERROR_MARK;
ret.original_type = NULL;
return ret;
}
if (c_parser_next_token_is (parser, CPP_OPEN_BRACE))
{
expr = c_parser_postfix_expression_after_paren_type (parser,
type_name,
expr_loc);
finish = expr.get_finish ();
goto sizeof_expr;
}
/* sizeof ( type-name ). */
if (type_name->specs->alignas_p)
error_at (type_name->specs->locations[cdw_alignas],
"alignment specified for type name in %<sizeof%>");
c_inhibit_evaluation_warnings--;
in_sizeof--;
result = c_expr_sizeof_type (expr_loc, type_name);
}
else
{
expr_loc = c_parser_peek_token (parser)->location;
expr = c_parser_unary_expression (parser);
finish = expr.get_finish ();
sizeof_expr:
c_inhibit_evaluation_warnings--;
in_sizeof--;
mark_exp_read (expr.value);
if (TREE_CODE (expr.value) == COMPONENT_REF
&& DECL_C_BIT_FIELD (TREE_OPERAND (expr.value, 1)))
error_at (expr_loc, "%<sizeof%> applied to a bit-field");
result = c_expr_sizeof_expr (expr_loc, expr);
}
if (finish == UNKNOWN_LOCATION)
finish = start;
set_c_expr_source_range (&result, start, finish);
return result;
}
/* Parse an alignof expression. */
static struct c_expr
c_parser_alignof_expression (c_parser *parser)
{
struct c_expr expr;
location_t start_loc = c_parser_peek_token (parser)->location;
location_t end_loc;
tree alignof_spelling = c_parser_peek_token (parser)->value;
gcc_assert (c_parser_next_token_is_keyword (parser, RID_ALIGNOF));
bool is_c11_alignof = strcmp (IDENTIFIER_POINTER (alignof_spelling),
"_Alignof") == 0;
/* A diagnostic is not required for the use of this identifier in
the implementation namespace; only diagnose it for the C11
spelling because of existing code using the other spellings. */
if (is_c11_alignof)
{
if (flag_isoc99)
pedwarn_c99 (start_loc, OPT_Wpedantic, "ISO C99 does not support %qE",
alignof_spelling);
else
pedwarn_c99 (start_loc, OPT_Wpedantic, "ISO C90 does not support %qE",
alignof_spelling);
}
c_parser_consume_token (parser);
c_inhibit_evaluation_warnings++;
in_alignof++;
if (c_parser_next_token_is (parser, CPP_OPEN_PAREN)
&& c_token_starts_typename (c_parser_peek_2nd_token (parser)))
{
/* Either __alignof__ ( type-name ) or __alignof__
unary-expression starting with a compound literal. */
location_t loc;
struct c_type_name *type_name;
struct c_expr ret;
matching_parens parens;
parens.consume_open (parser);
loc = c_parser_peek_token (parser)->location;
type_name = c_parser_type_name (parser, true);
end_loc = c_parser_peek_token (parser)->location;
parens.skip_until_found_close (parser);
if (type_name == NULL)
{
struct c_expr ret;
c_inhibit_evaluation_warnings--;
in_alignof--;
ret.set_error ();
ret.original_code = ERROR_MARK;
ret.original_type = NULL;
return ret;
}
if (c_parser_next_token_is (parser, CPP_OPEN_BRACE))
{
expr = c_parser_postfix_expression_after_paren_type (parser,
type_name,
loc);
goto alignof_expr;
}
/* alignof ( type-name ). */
if (type_name->specs->alignas_p)
error_at (type_name->specs->locations[cdw_alignas],
"alignment specified for type name in %qE",
alignof_spelling);
c_inhibit_evaluation_warnings--;
in_alignof--;
ret.value = c_sizeof_or_alignof_type (loc, groktypename (type_name,
NULL, NULL),
false, is_c11_alignof, 1);
ret.original_code = ERROR_MARK;
ret.original_type = NULL;
set_c_expr_source_range (&ret, start_loc, end_loc);
return ret;
}
else
{
struct c_expr ret;
expr = c_parser_unary_expression (parser);
end_loc = expr.src_range.m_finish;
alignof_expr:
mark_exp_read (expr.value);
c_inhibit_evaluation_warnings--;
in_alignof--;
if (is_c11_alignof)
pedwarn (start_loc,
OPT_Wpedantic, "ISO C does not allow %<%E (expression)%>",
alignof_spelling);
ret.value = c_alignof_expr (start_loc, expr.value);
ret.original_code = ERROR_MARK;
ret.original_type = NULL;
set_c_expr_source_range (&ret, start_loc, end_loc);
return ret;
}
}
/* Parse the __builtin_has_attribute ([expr|type], attribute-spec)
expression. */
static struct c_expr
c_parser_has_attribute_expression (c_parser *parser)
{
gcc_assert (c_parser_next_token_is_keyword (parser,
RID_BUILTIN_HAS_ATTRIBUTE));
c_parser_consume_token (parser);
c_inhibit_evaluation_warnings++;
matching_parens parens;
if (!parens.require_open (parser))
{
c_inhibit_evaluation_warnings--;
in_typeof--;
struct c_expr result;
result.set_error ();
result.original_code = ERROR_MARK;
result.original_type = NULL;
return result;
}
/* Treat the type argument the same way as in typeof for the purposes
of warnings. FIXME: Generalize this so the warning refers to
__builtin_has_attribute rather than typeof. */
in_typeof++;
/* The first operand: one of DECL, EXPR, or TYPE. */
tree oper = NULL_TREE;
if (c_parser_next_tokens_start_typename (parser, cla_prefer_id))
{
struct c_type_name *tname = c_parser_type_name (parser);
in_typeof--;
if (tname)
{
oper = groktypename (tname, NULL, NULL);
pop_maybe_used (variably_modified_type_p (oper, NULL_TREE));
}
}
else
{
struct c_expr cexpr = c_parser_expr_no_commas (parser, NULL);
c_inhibit_evaluation_warnings--;
in_typeof--;
if (cexpr.value != error_mark_node)
{
mark_exp_read (cexpr.value);
oper = cexpr.value;
tree etype = TREE_TYPE (oper);
bool was_vm = variably_modified_type_p (etype, NULL_TREE);
/* This is returned with the type so that when the type is
evaluated, this can be evaluated. */
if (was_vm)
oper = c_fully_fold (oper, false, NULL);
pop_maybe_used (was_vm);
}
}
struct c_expr result;
result.original_code = ERROR_MARK;
result.original_type = NULL;
if (!c_parser_require (parser, CPP_COMMA, "expected %<,%>"))
{
/* Consume the closing parenthesis if that's the next token
in the likely case the built-in was invoked with fewer
than two arguments. */
if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN))
c_parser_consume_token (parser);
c_inhibit_evaluation_warnings--;
result.set_error ();
return result;
}
bool save_translate_strings_p = parser->translate_strings_p;
location_t atloc = c_parser_peek_token (parser)->location;
/* Parse a single attribute. Require no leading comma and do not
allow empty attributes. */
tree attr = c_parser_gnu_attribute (parser, NULL_TREE, false, false);
parser->translate_strings_p = save_translate_strings_p;
if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN))
c_parser_consume_token (parser);
else
{
c_parser_error (parser, "expected identifier");
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL);
result.set_error ();
return result;
}
if (!attr)
{
error_at (atloc, "expected identifier");
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<)%>");
result.set_error ();
return result;
}
result.original_code = INTEGER_CST;
result.original_type = boolean_type_node;
if (has_attribute (atloc, oper, attr, default_conversion))
result.value = boolean_true_node;
else
result.value = boolean_false_node;
return result;
}
/* Helper function to read arguments of builtins which are interfaces
for the middle-end nodes like COMPLEX_EXPR, VEC_PERM_EXPR and
others. The name of the builtin is passed using BNAME parameter.
Function returns true if there were no errors while parsing and
stores the arguments in CEXPR_LIST. If it returns true,
*OUT_CLOSE_PAREN_LOC is written to with the location of the closing
parenthesis. */
static bool
c_parser_get_builtin_args (c_parser *parser, const char *bname,
vec<c_expr_t, va_gc> **ret_cexpr_list,
bool choose_expr_p,
location_t *out_close_paren_loc)
{
location_t loc = c_parser_peek_token (parser)->location;
vec<c_expr_t, va_gc> *cexpr_list;
c_expr_t expr;
bool saved_force_folding_builtin_constant_p;
*ret_cexpr_list = NULL;
if (c_parser_next_token_is_not (parser, CPP_OPEN_PAREN))
{
error_at (loc, "cannot take address of %qs", bname);
return false;
}
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN))
{
*out_close_paren_loc = c_parser_peek_token (parser)->location;
c_parser_consume_token (parser);
return true;
}
saved_force_folding_builtin_constant_p
= force_folding_builtin_constant_p;
force_folding_builtin_constant_p |= choose_expr_p;
expr = c_parser_expr_no_commas (parser, NULL);
force_folding_builtin_constant_p
= saved_force_folding_builtin_constant_p;
vec_alloc (cexpr_list, 1);
vec_safe_push (cexpr_list, expr);
while (c_parser_next_token_is (parser, CPP_COMMA))
{
c_parser_consume_token (parser);
expr = c_parser_expr_no_commas (parser, NULL);
vec_safe_push (cexpr_list, expr);
}
*out_close_paren_loc = c_parser_peek_token (parser)->location;
if (!c_parser_require (parser, CPP_CLOSE_PAREN, "expected %<)%>"))
return false;
*ret_cexpr_list = cexpr_list;
return true;
}
/* This represents a single generic-association. */
struct c_generic_association
{
/* The location of the starting token of the type. */
location_t type_location;
/* The association's type, or NULL_TREE for 'default'. */
tree type;
/* The association's expression. */
struct c_expr expression;
};
/* Parse a generic-selection. (C11 6.5.1.1).
generic-selection:
_Generic ( assignment-expression , generic-assoc-list )
generic-assoc-list:
generic-association
generic-assoc-list , generic-association
generic-association:
type-name : assignment-expression
default : assignment-expression
*/
static struct c_expr
c_parser_generic_selection (c_parser *parser)
{
struct c_expr selector, error_expr;
tree selector_type;
struct c_generic_association matched_assoc;
int match_found = -1;
location_t generic_loc, selector_loc;
error_expr.original_code = ERROR_MARK;
error_expr.original_type = NULL;
error_expr.set_error ();
matched_assoc.type_location = UNKNOWN_LOCATION;
matched_assoc.type = NULL_TREE;
matched_assoc.expression = error_expr;
gcc_assert (c_parser_next_token_is_keyword (parser, RID_GENERIC));
generic_loc = c_parser_peek_token (parser)->location;
c_parser_consume_token (parser);
if (flag_isoc99)
pedwarn_c99 (generic_loc, OPT_Wpedantic,
"ISO C99 does not support %<_Generic%>");
else
pedwarn_c99 (generic_loc, OPT_Wpedantic,
"ISO C90 does not support %<_Generic%>");
matching_parens parens;
if (!parens.require_open (parser))
return error_expr;
c_inhibit_evaluation_warnings++;
selector_loc = c_parser_peek_token (parser)->location;
selector = c_parser_expr_no_commas (parser, NULL);
selector = default_function_array_conversion (selector_loc, selector);
c_inhibit_evaluation_warnings--;
if (selector.value == error_mark_node)
{
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL);
return selector;
}
mark_exp_read (selector.value);
selector_type = TREE_TYPE (selector.value);
/* In ISO C terms, rvalues (including the controlling expression of
_Generic) do not have qualified types. */
if (TREE_CODE (selector_type) != ARRAY_TYPE)
selector_type = TYPE_MAIN_VARIANT (selector_type);
/* In ISO C terms, _Noreturn is not part of the type of expressions
such as &abort, but in GCC it is represented internally as a type
qualifier. */
if (FUNCTION_POINTER_TYPE_P (selector_type)
&& TYPE_QUALS (TREE_TYPE (selector_type)) != TYPE_UNQUALIFIED)
selector_type
= build_pointer_type (TYPE_MAIN_VARIANT (TREE_TYPE (selector_type)));
if (!c_parser_require (parser, CPP_COMMA, "expected %<,%>"))
{
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL);
return error_expr;
}
auto_vec<c_generic_association> associations;
while (1)
{
struct c_generic_association assoc, *iter;
unsigned int ix;
c_token *token = c_parser_peek_token (parser);
assoc.type_location = token->location;
if (token->type == CPP_KEYWORD && token->keyword == RID_DEFAULT)
{
c_parser_consume_token (parser);
assoc.type = NULL_TREE;
}
else
{
struct c_type_name *type_name;
type_name = c_parser_type_name (parser);
if (type_name == NULL)
{
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL);
return error_expr;
}
assoc.type = groktypename (type_name, NULL, NULL);
if (assoc.type == error_mark_node)
{
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL);
return error_expr;
}
if (TREE_CODE (assoc.type) == FUNCTION_TYPE)
error_at (assoc.type_location,
"%<_Generic%> association has function type");
else if (!COMPLETE_TYPE_P (assoc.type))
error_at (assoc.type_location,
"%<_Generic%> association has incomplete type");
if (variably_modified_type_p (assoc.type, NULL_TREE))
error_at (assoc.type_location,
"%<_Generic%> association has "
"variable length type");
}
if (!c_parser_require (parser, CPP_COLON, "expected %<:%>"))
{
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL);
return error_expr;
}
assoc.expression = c_parser_expr_no_commas (parser, NULL);
if (assoc.expression.value == error_mark_node)
{
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL);
return error_expr;
}
for (ix = 0; associations.iterate (ix, &iter); ++ix)
{
if (assoc.type == NULL_TREE)
{
if (iter->type == NULL_TREE)
{
error_at (assoc.type_location,
"duplicate %<default%> case in %<_Generic%>");
inform (iter->type_location, "original %<default%> is here");
}
}
else if (iter->type != NULL_TREE)
{
if (comptypes (assoc.type, iter->type))
{
error_at (assoc.type_location,
"%<_Generic%> specifies two compatible types");
inform (iter->type_location, "compatible type is here");
}
}
}
if (assoc.type == NULL_TREE)
{
if (match_found < 0)
{
matched_assoc = assoc;
match_found = associations.length ();
}
}
else if (comptypes (assoc.type, selector_type))
{
if (match_found < 0 || matched_assoc.type == NULL_TREE)
{
matched_assoc = assoc;
match_found = associations.length ();
}
else
{
error_at (assoc.type_location,
"%<_Generic%> selector matches multiple associations");
inform (matched_assoc.type_location,
"other match is here");
}
}
associations.safe_push (assoc);
if (c_parser_peek_token (parser)->type != CPP_COMMA)
break;
c_parser_consume_token (parser);
}
unsigned int ix;
struct c_generic_association *iter;
FOR_EACH_VEC_ELT (associations, ix, iter)
if (ix != (unsigned) match_found)
mark_exp_read (iter->expression.value);
if (!parens.require_close (parser))
{
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL);
return error_expr;
}
if (match_found < 0)
{
error_at (selector_loc, "%<_Generic%> selector of type %qT is not "
"compatible with any association",
selector_type);
return error_expr;
}
return matched_assoc.expression;
}
/* Check the validity of a function pointer argument *EXPR (argument
position POS) to __builtin_tgmath. Return the number of function
arguments if possibly valid; return 0 having reported an error if
not valid. */
static unsigned int
check_tgmath_function (c_expr *expr, unsigned int pos)
{
tree type = TREE_TYPE (expr->value);
if (!FUNCTION_POINTER_TYPE_P (type))
{
error_at (expr->get_location (),
"argument %u of %<__builtin_tgmath%> is not a function pointer",
pos);
return 0;
}
type = TREE_TYPE (type);
if (!prototype_p (type))
{
error_at (expr->get_location (),
"argument %u of %<__builtin_tgmath%> is unprototyped", pos);
return 0;
}
if (stdarg_p (type))
{
error_at (expr->get_location (),
"argument %u of %<__builtin_tgmath%> has variable arguments",
pos);
return 0;
}
unsigned int nargs = 0;
function_args_iterator iter;
tree t;
FOREACH_FUNCTION_ARGS (type, t, iter)
{
if (t == void_type_node)
break;
nargs++;
}
if (nargs == 0)
{
error_at (expr->get_location (),
"argument %u of %<__builtin_tgmath%> has no arguments", pos);
return 0;
}
return nargs;
}
/* Ways in which a parameter or return value of a type-generic macro
may vary between the different functions the macro may call. */
enum tgmath_parm_kind
{
tgmath_fixed, tgmath_real, tgmath_complex
};
/* Helper function for c_parser_postfix_expression. Parse predefined
identifiers. */
static struct c_expr
c_parser_predefined_identifier (c_parser *parser)
{
location_t loc = c_parser_peek_token (parser)->location;
switch (c_parser_peek_token (parser)->keyword)
{
case RID_FUNCTION_NAME:
pedwarn (loc, OPT_Wpedantic, "ISO C does not support %qs predefined "
"identifier", "__FUNCTION__");
break;
case RID_PRETTY_FUNCTION_NAME:
pedwarn (loc, OPT_Wpedantic, "ISO C does not support %qs predefined "
"identifier", "__PRETTY_FUNCTION__");
break;
case RID_C99_FUNCTION_NAME:
pedwarn_c90 (loc, OPT_Wpedantic, "ISO C90 does not support "
"%<__func__%> predefined identifier");
break;
default:
gcc_unreachable ();
}
struct c_expr expr;
expr.original_code = ERROR_MARK;
expr.original_type = NULL;
expr.value = fname_decl (loc, c_parser_peek_token (parser)->keyword,
c_parser_peek_token (parser)->value);
set_c_expr_source_range (&expr, loc, loc);
c_parser_consume_token (parser);
return expr;
}
/* Parse a postfix expression (C90 6.3.1-6.3.2, C99 6.5.1-6.5.2,
C11 6.5.1-6.5.2). Compound literals aren't handled here; callers have to
call c_parser_postfix_expression_after_paren_type on encountering them.
postfix-expression:
primary-expression
postfix-expression [ expression ]
postfix-expression ( argument-expression-list[opt] )
postfix-expression . identifier
postfix-expression -> identifier
postfix-expression ++
postfix-expression --
( type-name ) { initializer-list }
( type-name ) { initializer-list , }
argument-expression-list:
argument-expression
argument-expression-list , argument-expression
primary-expression:
identifier
constant
string-literal
( expression )
generic-selection
GNU extensions:
primary-expression:
__func__
(treated as a keyword in GNU C)
__FUNCTION__
__PRETTY_FUNCTION__
( compound-statement )
__builtin_va_arg ( assignment-expression , type-name )
__builtin_offsetof ( type-name , offsetof-member-designator )
__builtin_choose_expr ( assignment-expression ,
assignment-expression ,
assignment-expression )
__builtin_types_compatible_p ( type-name , type-name )
__builtin_tgmath ( expr-list )
__builtin_complex ( assignment-expression , assignment-expression )
__builtin_shuffle ( assignment-expression , assignment-expression )
__builtin_shuffle ( assignment-expression ,
assignment-expression ,
assignment-expression, )
__builtin_convertvector ( assignment-expression , type-name )
offsetof-member-designator:
identifier
offsetof-member-designator . identifier
offsetof-member-designator [ expression ]
Objective-C:
primary-expression:
[ objc-receiver objc-message-args ]
@selector ( objc-selector-arg )
@protocol ( identifier )
@encode ( type-name )
objc-string-literal
Classname . identifier
*/
static struct c_expr
c_parser_postfix_expression (c_parser *parser)
{
struct c_expr expr, e1;
struct c_type_name *t1, *t2;
location_t loc = c_parser_peek_token (parser)->location;
source_range tok_range = c_parser_peek_token (parser)->get_range ();
expr.original_code = ERROR_MARK;
expr.original_type = NULL;
switch (c_parser_peek_token (parser)->type)
{
case CPP_NUMBER:
expr.value = c_parser_peek_token (parser)->value;
set_c_expr_source_range (&expr, tok_range);
loc = c_parser_peek_token (parser)->location;
c_parser_consume_token (parser);
if (TREE_CODE (expr.value) == FIXED_CST
&& !targetm.fixed_point_supported_p ())
{
error_at (loc, "fixed-point types not supported for this target");
expr.set_error ();
}
break;
case CPP_CHAR:
case CPP_CHAR16:
case CPP_CHAR32:
case CPP_UTF8CHAR:
case CPP_WCHAR:
expr.value = c_parser_peek_token (parser)->value;
/* For the purpose of warning when a pointer is compared with
a zero character constant. */
expr.original_type = char_type_node;
set_c_expr_source_range (&expr, tok_range);
c_parser_consume_token (parser);
break;
case CPP_STRING:
case CPP_STRING16:
case CPP_STRING32:
case CPP_WSTRING:
case CPP_UTF8STRING:
expr = c_parser_string_literal (parser, parser->translate_strings_p,
true);
break;
case CPP_OBJC_STRING:
gcc_assert (c_dialect_objc ());
expr.value
= objc_build_string_object (c_parser_peek_token (parser)->value);
set_c_expr_source_range (&expr, tok_range);
c_parser_consume_token (parser);
break;
case CPP_NAME:
switch (c_parser_peek_token (parser)->id_kind)
{
case C_ID_ID:
{
tree id = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
expr.value = build_external_ref (loc, id,
(c_parser_peek_token (parser)->type
== CPP_OPEN_PAREN),
&expr.original_type);
set_c_expr_source_range (&expr, tok_range);
break;
}
case C_ID_CLASSNAME:
{
/* Here we parse the Objective-C 2.0 Class.name dot
syntax. */
tree class_name = c_parser_peek_token (parser)->value;
tree component;
c_parser_consume_token (parser);
gcc_assert (c_dialect_objc ());
if (!c_parser_require (parser, CPP_DOT, "expected %<.%>"))
{
expr.set_error ();
break;
}
if (c_parser_next_token_is_not (parser, CPP_NAME))
{
c_parser_error (parser, "expected identifier");
expr.set_error ();
break;
}
c_token *component_tok = c_parser_peek_token (parser);
component = component_tok->value;
location_t end_loc = component_tok->get_finish ();
c_parser_consume_token (parser);
expr.value = objc_build_class_component_ref (class_name,
component);
set_c_expr_source_range (&expr, loc, end_loc);
break;
}
default:
c_parser_error (parser, "expected expression");
expr.set_error ();
break;
}
break;
case CPP_OPEN_PAREN:
/* A parenthesized expression, statement expression or compound
literal. */
if (c_parser_peek_2nd_token (parser)->type == CPP_OPEN_BRACE)
{
/* A statement expression. */
tree stmt;
location_t brace_loc;
c_parser_consume_token (parser);
brace_loc = c_parser_peek_token (parser)->location;
c_parser_consume_token (parser);
/* If we've not yet started the current function's statement list,
or we're in the parameter scope of an old-style function
declaration, statement expressions are not allowed. */
if (!building_stmt_list_p () || old_style_parameter_scope ())
{
error_at (loc, "braced-group within expression allowed "
"only inside a function");
parser->error = true;
c_parser_skip_until_found (parser, CPP_CLOSE_BRACE, NULL);
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL);
expr.set_error ();
break;
}
stmt = c_begin_stmt_expr ();
c_parser_compound_statement_nostart (parser);
location_t close_loc = c_parser_peek_token (parser)->location;
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<)%>");
pedwarn (loc, OPT_Wpedantic,
"ISO C forbids braced-groups within expressions");
expr.value = c_finish_stmt_expr (brace_loc, stmt);
set_c_expr_source_range (&expr, loc, close_loc);
mark_exp_read (expr.value);
}
else
{
/* A parenthesized expression. */
location_t loc_open_paren = c_parser_peek_token (parser)->location;
c_parser_consume_token (parser);
expr = c_parser_expression (parser);
if (TREE_CODE (expr.value) == MODIFY_EXPR)
TREE_NO_WARNING (expr.value) = 1;
if (expr.original_code != C_MAYBE_CONST_EXPR
&& expr.original_code != SIZEOF_EXPR)
expr.original_code = ERROR_MARK;
/* Don't change EXPR.ORIGINAL_TYPE. */
location_t loc_close_paren = c_parser_peek_token (parser)->location;
set_c_expr_source_range (&expr, loc_open_paren, loc_close_paren);
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<)%>", loc_open_paren);
}
break;
case CPP_KEYWORD:
switch (c_parser_peek_token (parser)->keyword)
{
case RID_FUNCTION_NAME:
case RID_PRETTY_FUNCTION_NAME:
case RID_C99_FUNCTION_NAME:
expr = c_parser_predefined_identifier (parser);
break;
case RID_VA_ARG:
{
location_t start_loc = loc;
c_parser_consume_token (parser);
matching_parens parens;
if (!parens.require_open (parser))
{
expr.set_error ();
break;
}
e1 = c_parser_expr_no_commas (parser, NULL);
mark_exp_read (e1.value);
e1.value = c_fully_fold (e1.value, false, NULL);
if (!c_parser_require (parser, CPP_COMMA, "expected %<,%>"))
{
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL);
expr.set_error ();
break;
}
loc = c_parser_peek_token (parser)->location;
t1 = c_parser_type_name (parser);
location_t end_loc = c_parser_peek_token (parser)->get_finish ();
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<)%>");
if (t1 == NULL)
{
expr.set_error ();
}
else
{
tree type_expr = NULL_TREE;
expr.value = c_build_va_arg (start_loc, e1.value, loc,
groktypename (t1, &type_expr, NULL));
if (type_expr)
{
expr.value = build2 (C_MAYBE_CONST_EXPR,
TREE_TYPE (expr.value), type_expr,
expr.value);
C_MAYBE_CONST_EXPR_NON_CONST (expr.value) = true;
}
set_c_expr_source_range (&expr, start_loc, end_loc);
}
}
break;
case RID_OFFSETOF:
{
c_parser_consume_token (parser);
matching_parens parens;
if (!parens.require_open (parser))
{
expr.set_error ();
break;
}
t1 = c_parser_type_name (parser);
if (t1 == NULL)
parser->error = true;
if (!c_parser_require (parser, CPP_COMMA, "expected %<,%>"))
gcc_assert (parser->error);
if (parser->error)
{
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL);
expr.set_error ();
break;
}
tree type = groktypename (t1, NULL, NULL);
tree offsetof_ref;
if (type == error_mark_node)
offsetof_ref = error_mark_node;
else
{
offsetof_ref = build1 (INDIRECT_REF, type, null_pointer_node);
SET_EXPR_LOCATION (offsetof_ref, loc);
}
/* Parse the second argument to __builtin_offsetof. We
must have one identifier, and beyond that we want to
accept sub structure and sub array references. */
if (c_parser_next_token_is (parser, CPP_NAME))
{
c_token *comp_tok = c_parser_peek_token (parser);
offsetof_ref = build_component_ref
(loc, offsetof_ref, comp_tok->value, comp_tok->location);
c_parser_consume_token (parser);
while (c_parser_next_token_is (parser, CPP_DOT)
|| c_parser_next_token_is (parser,
CPP_OPEN_SQUARE)
|| c_parser_next_token_is (parser,
CPP_DEREF))
{
if (c_parser_next_token_is (parser, CPP_DEREF))
{
loc = c_parser_peek_token (parser)->location;
offsetof_ref = build_array_ref (loc,
offsetof_ref,
integer_zero_node);
goto do_dot;
}
else if (c_parser_next_token_is (parser, CPP_DOT))
{
do_dot:
c_parser_consume_token (parser);
if (c_parser_next_token_is_not (parser,
CPP_NAME))
{
c_parser_error (parser, "expected identifier");
break;
}
c_token *comp_tok = c_parser_peek_token (parser);
offsetof_ref = build_component_ref
(loc, offsetof_ref, comp_tok->value,
comp_tok->location);
c_parser_consume_token (parser);
}
else
{
struct c_expr ce;
tree idx;
loc = c_parser_peek_token (parser)->location;
c_parser_consume_token (parser);
ce = c_parser_expression (parser);
ce = convert_lvalue_to_rvalue (loc, ce, false, false);
idx = ce.value;
idx = c_fully_fold (idx, false, NULL);
c_parser_skip_until_found (parser, CPP_CLOSE_SQUARE,
"expected %<]%>");
offsetof_ref = build_array_ref (loc, offsetof_ref, idx);
}
}
}
else
c_parser_error (parser, "expected identifier");
location_t end_loc = c_parser_peek_token (parser)->get_finish ();
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<)%>");
expr.value = fold_offsetof (offsetof_ref);
set_c_expr_source_range (&expr, loc, end_loc);
}
break;
case RID_CHOOSE_EXPR:
{
vec<c_expr_t, va_gc> *cexpr_list;
c_expr_t *e1_p, *e2_p, *e3_p;
tree c;
location_t close_paren_loc;
c_parser_consume_token (parser);
if (!c_parser_get_builtin_args (parser,
"__builtin_choose_expr",
&cexpr_list, true,
&close_paren_loc))
{
expr.set_error ();
break;
}
if (vec_safe_length (cexpr_list) != 3)
{
error_at (loc, "wrong number of arguments to "
"%<__builtin_choose_expr%>");
expr.set_error ();
break;
}
e1_p = &(*cexpr_list)[0];
e2_p = &(*cexpr_list)[1];
e3_p = &(*cexpr_list)[2];
c = e1_p->value;
mark_exp_read (e2_p->value);
mark_exp_read (e3_p->value);
if (TREE_CODE (c) != INTEGER_CST
|| !INTEGRAL_TYPE_P (TREE_TYPE (c)))
error_at (loc,
"first argument to %<__builtin_choose_expr%> not"
" a constant");
constant_expression_warning (c);
expr = integer_zerop (c) ? *e3_p : *e2_p;
set_c_expr_source_range (&expr, loc, close_paren_loc);
break;
}
case RID_TYPES_COMPATIBLE_P:
{
c_parser_consume_token (parser);
matching_parens parens;
if (!parens.require_open (parser))
{
expr.set_error ();
break;
}
t1 = c_parser_type_name (parser);
if (t1 == NULL)
{
expr.set_error ();
break;
}
if (!c_parser_require (parser, CPP_COMMA, "expected %<,%>"))
{
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL);
expr.set_error ();
break;
}
t2 = c_parser_type_name (parser);
if (t2 == NULL)
{
expr.set_error ();
break;
}
location_t close_paren_loc = c_parser_peek_token (parser)->location;
parens.skip_until_found_close (parser);
tree e1, e2;
e1 = groktypename (t1, NULL, NULL);
e2 = groktypename (t2, NULL, NULL);
if (e1 == error_mark_node || e2 == error_mark_node)
{
expr.set_error ();
break;
}
e1 = TYPE_MAIN_VARIANT (e1);
e2 = TYPE_MAIN_VARIANT (e2);
expr.value
= comptypes (e1, e2) ? integer_one_node : integer_zero_node;
set_c_expr_source_range (&expr, loc, close_paren_loc);
}
break;
case RID_BUILTIN_TGMATH:
{
vec<c_expr_t, va_gc> *cexpr_list;
location_t close_paren_loc;
c_parser_consume_token (parser);
if (!c_parser_get_builtin_args (parser,
"__builtin_tgmath",
&cexpr_list, false,
&close_paren_loc))
{
expr.set_error ();
break;
}
if (vec_safe_length (cexpr_list) < 3)
{
error_at (loc, "too few arguments to %<__builtin_tgmath%>");
expr.set_error ();
break;
}
unsigned int i;
c_expr_t *p;
FOR_EACH_VEC_ELT (*cexpr_list, i, p)
*p = convert_lvalue_to_rvalue (loc, *p, true, true);
unsigned int nargs = check_tgmath_function (&(*cexpr_list)[0], 1);
if (nargs == 0)
{
expr.set_error ();
break;
}
if (vec_safe_length (cexpr_list) < nargs)
{
error_at (loc, "too few arguments to %<__builtin_tgmath%>");
expr.set_error ();
break;
}
unsigned int num_functions = vec_safe_length (cexpr_list) - nargs;
if (num_functions < 2)
{
error_at (loc, "too few arguments to %<__builtin_tgmath%>");
expr.set_error ();
break;
}
/* The first NUM_FUNCTIONS expressions are the function
pointers. The remaining NARGS expressions are the
arguments that are to be passed to one of those
functions, chosen following <tgmath.h> rules. */
for (unsigned int j = 1; j < num_functions; j++)
{
unsigned int this_nargs
= check_tgmath_function (&(*cexpr_list)[j], j + 1);
if (this_nargs == 0)
{
expr.set_error ();
goto out;
}
if (this_nargs != nargs)
{
error_at ((*cexpr_list)[j].get_location (),
"argument %u of %<__builtin_tgmath%> has "
"wrong number of arguments", j + 1);
expr.set_error ();
goto out;
}
}
/* The functions all have the same number of arguments.
Determine whether arguments and return types vary in
ways permitted for <tgmath.h> functions. */
/* The first entry in each of these vectors is for the
return type, subsequent entries for parameter
types. */
auto_vec<enum tgmath_parm_kind> parm_kind (nargs + 1);
auto_vec<tree> parm_first (nargs + 1);
auto_vec<bool> parm_complex (nargs + 1);
auto_vec<bool> parm_varies (nargs + 1);
tree first_type = TREE_TYPE (TREE_TYPE ((*cexpr_list)[0].value));
tree first_ret = TYPE_MAIN_VARIANT (TREE_TYPE (first_type));
parm_first.quick_push (first_ret);
parm_complex.quick_push (TREE_CODE (first_ret) == COMPLEX_TYPE);
parm_varies.quick_push (false);
function_args_iterator iter;
tree t;
unsigned int argpos;
FOREACH_FUNCTION_ARGS (first_type, t, iter)
{
if (t == void_type_node)
break;
parm_first.quick_push (TYPE_MAIN_VARIANT (t));
parm_complex.quick_push (TREE_CODE (t) == COMPLEX_TYPE);
parm_varies.quick_push (false);
}
for (unsigned int j = 1; j < num_functions; j++)
{
tree type = TREE_TYPE (TREE_TYPE ((*cexpr_list)[j].value));
tree ret = TYPE_MAIN_VARIANT (TREE_TYPE (type));
if (ret != parm_first[0])
{
parm_varies[0] = true;
if (!SCALAR_FLOAT_TYPE_P (parm_first[0])
&& !COMPLEX_FLOAT_TYPE_P (parm_first[0]))
{
error_at ((*cexpr_list)[0].get_location (),
"invalid type-generic return type for "
"argument %u of %<__builtin_tgmath%>",
1);
expr.set_error ();
goto out;
}
if (!SCALAR_FLOAT_TYPE_P (ret)
&& !COMPLEX_FLOAT_TYPE_P (ret))
{
error_at ((*cexpr_list)[j].get_location (),
"invalid type-generic return type for "
"argument %u of %<__builtin_tgmath%>",
j + 1);
expr.set_error ();
goto out;
}
}
if (TREE_CODE (ret) == COMPLEX_TYPE)
parm_complex[0] = true;
argpos = 1;
FOREACH_FUNCTION_ARGS (type, t, iter)
{
if (t == void_type_node)
break;
t = TYPE_MAIN_VARIANT (t);
if (t != parm_first[argpos])
{
parm_varies[argpos] = true;
if (!SCALAR_FLOAT_TYPE_P (parm_first[argpos])
&& !COMPLEX_FLOAT_TYPE_P (parm_first[argpos]))
{
error_at ((*cexpr_list)[0].get_location (),
"invalid type-generic type for "
"argument %u of argument %u of "
"%<__builtin_tgmath%>", argpos, 1);
expr.set_error ();
goto out;
}
if (!SCALAR_FLOAT_TYPE_P (t)
&& !COMPLEX_FLOAT_TYPE_P (t))
{
error_at ((*cexpr_list)[j].get_location (),
"invalid type-generic type for "
"argument %u of argument %u of "
"%<__builtin_tgmath%>", argpos, j + 1);
expr.set_error ();
goto out;
}
}
if (TREE_CODE (t) == COMPLEX_TYPE)
parm_complex[argpos] = true;
argpos++;
}
}
enum tgmath_parm_kind max_variation = tgmath_fixed;
for (unsigned int j = 0; j <= nargs; j++)
{
enum tgmath_parm_kind this_kind;
if (parm_varies[j])
{
if (parm_complex[j])
max_variation = this_kind = tgmath_complex;
else
{
this_kind = tgmath_real;
if (max_variation != tgmath_complex)
max_variation = tgmath_real;
}
}
else
this_kind = tgmath_fixed;
parm_kind.quick_push (this_kind);
}
if (max_variation == tgmath_fixed)
{
error_at (loc, "function arguments of %<__builtin_tgmath%> "
"all have the same type");
expr.set_error ();
break;
}
/* Identify a parameter (not the return type) that varies,
including with complex types if any variation includes
complex types; there must be at least one such
parameter. */
unsigned int tgarg = 0;
for (unsigned int j = 1; j <= nargs; j++)
if (parm_kind[j] == max_variation)
{
tgarg = j;
break;
}
if (tgarg == 0)
{
error_at (loc, "function arguments of %<__builtin_tgmath%> "
"lack type-generic parameter");
expr.set_error ();
break;
}
/* Determine the type of the relevant parameter for each
function. */
auto_vec<tree> tg_type (num_functions);
for (unsigned int j = 0; j < num_functions; j++)
{
tree type = TREE_TYPE (TREE_TYPE ((*cexpr_list)[j].value));
argpos = 1;
FOREACH_FUNCTION_ARGS (type, t, iter)
{
if (argpos == tgarg)
{
tg_type.quick_push (TYPE_MAIN_VARIANT (t));
break;
}
argpos++;
}
}
/* Verify that the corresponding types are different for
all the listed functions. Also determine whether all
the types are complex, whether all the types are
standard or binary, and whether all the types are
decimal. */
bool all_complex = true;
bool all_binary = true;
bool all_decimal = true;
hash_set<tree> tg_types;
FOR_EACH_VEC_ELT (tg_type, i, t)
{
if (TREE_CODE (t) == COMPLEX_TYPE)
all_decimal = false;
else
{
all_complex = false;
if (DECIMAL_FLOAT_TYPE_P (t))
all_binary = false;
else
all_decimal = false;
}
if (tg_types.add (t))
{
error_at ((*cexpr_list)[i].get_location (),
"duplicate type-generic parameter type for "
"function argument %u of %<__builtin_tgmath%>",
i + 1);
expr.set_error ();
goto out;
}
}
/* Verify that other parameters and the return type whose
types vary have their types varying in the correct
way. */
for (unsigned int j = 0; j < num_functions; j++)
{
tree exp_type = tg_type[j];
tree exp_real_type = exp_type;
if (TREE_CODE (exp_type) == COMPLEX_TYPE)
exp_real_type = TREE_TYPE (exp_type);
tree type = TREE_TYPE (TREE_TYPE ((*cexpr_list)[j].value));
tree ret = TYPE_MAIN_VARIANT (TREE_TYPE (type));
if ((parm_kind[0] == tgmath_complex && ret != exp_type)
|| (parm_kind[0] == tgmath_real && ret != exp_real_type))
{
error_at ((*cexpr_list)[j].get_location (),
"bad return type for function argument %u "
"of %<__builtin_tgmath%>", j + 1);
expr.set_error ();
goto out;
}
argpos = 1;
FOREACH_FUNCTION_ARGS (type, t, iter)
{
if (t == void_type_node)
break;
t = TYPE_MAIN_VARIANT (t);
if ((parm_kind[argpos] == tgmath_complex
&& t != exp_type)
|| (parm_kind[argpos] == tgmath_real
&& t != exp_real_type))
{
error_at ((*cexpr_list)[j].get_location (),
"bad type for argument %u of "
"function argument %u of "
"%<__builtin_tgmath%>", argpos, j + 1);
expr.set_error ();
goto out;
}
argpos++;
}
}
/* The functions listed are a valid set of functions for a
<tgmath.h> macro to select between. Identify the
matching function, if any. First, the argument types
must be combined following <tgmath.h> rules. Integer
types are treated as _Decimal64 if any type-generic
argument is decimal, or if the only alternatives for
type-generic arguments are of decimal types, and are
otherwise treated as double (or _Complex double for
complex integer types, or _Float64 or _Complex _Float64
if all the return types are the same _FloatN or
_FloatNx type). After that adjustment, types are
combined following the usual arithmetic conversions.
If the function only accepts complex arguments, a
complex type is produced. */
bool arg_complex = all_complex;
bool arg_binary = all_binary;
bool arg_int_decimal = all_decimal;
for (unsigned int j = 1; j <= nargs; j++)
{
if (parm_kind[j] == tgmath_fixed)
continue;
c_expr_t *ce = &(*cexpr_list)[num_functions + j - 1];
tree type = TREE_TYPE (ce->value);
if (!INTEGRAL_TYPE_P (type)
&& !SCALAR_FLOAT_TYPE_P (type)
&& TREE_CODE (type) != COMPLEX_TYPE)
{
error_at (ce->get_location (),
"invalid type of argument %u of type-generic "
"function", j);
expr.set_error ();
goto out;
}
if (DECIMAL_FLOAT_TYPE_P (type))
{
arg_int_decimal = true;
if (all_complex)
{
error_at (ce->get_location (),
"decimal floating-point argument %u to "
"complex-only type-generic function", j);
expr.set_error ();
goto out;
}
else if (all_binary)
{
error_at (ce->get_location (),
"decimal floating-point argument %u to "
"binary-only type-generic function", j);
expr.set_error ();
goto out;
}
else if (arg_complex)
{
error_at (ce->get_location (),
"both complex and decimal floating-point "
"arguments to type-generic function");
expr.set_error ();
goto out;
}
else if (arg_binary)
{
error_at (ce->get_location (),
"both binary and decimal floating-point "
"arguments to type-generic function");
expr.set_error ();
goto out;
}
}
else if (TREE_CODE (type) == COMPLEX_TYPE)
{
arg_complex = true;
if (COMPLEX_FLOAT_TYPE_P (type))
arg_binary = true;
if (all_decimal)
{
error_at (ce->get_location (),
"complex argument %u to "
"decimal-only type-generic function", j);
expr.set_error ();
goto out;
}
else if (arg_int_decimal)
{
error_at (ce->get_location (),
"both complex and decimal floating-point "
"arguments to type-generic function");
expr.set_error ();
goto out;
}
}
else if (SCALAR_FLOAT_TYPE_P (type))
{
arg_binary = true;
if (all_decimal)
{
error_at (ce->get_location (),
"binary argument %u to "
"decimal-only type-generic function", j);
expr.set_error ();
goto out;
}
else if (arg_int_decimal)
{
error_at (ce->get_location (),
"both binary and decimal floating-point "
"arguments to type-generic function");
expr.set_error ();
goto out;
}
}
}
/* For a macro rounding its result to a narrower type, map
integer types to _Float64 not double if the return type
is a _FloatN or _FloatNx type. */
bool arg_int_float64 = false;
if (parm_kind[0] == tgmath_fixed
&& SCALAR_FLOAT_TYPE_P (parm_first[0])
&& float64_type_node != NULL_TREE)
for (unsigned int j = 0; j < NUM_FLOATN_NX_TYPES; j++)
if (parm_first[0] == FLOATN_TYPE_NODE (j))
{
arg_int_float64 = true;
break;
}
tree arg_real = NULL_TREE;
for (unsigned int j = 1; j <= nargs; j++)
{
if (parm_kind[j] == tgmath_fixed)
continue;
c_expr_t *ce = &(*cexpr_list)[num_functions + j - 1];
tree type = TYPE_MAIN_VARIANT (TREE_TYPE (ce->value));
if (TREE_CODE (type) == COMPLEX_TYPE)
type = TREE_TYPE (type);
if (INTEGRAL_TYPE_P (type))
type = (arg_int_decimal
? dfloat64_type_node
: arg_int_float64
? float64_type_node
: double_type_node);
if (arg_real == NULL_TREE)
arg_real = type;
else
arg_real = common_type (arg_real, type);
if (arg_real == error_mark_node)
{
expr.set_error ();
goto out;
}
}
tree arg_type = (arg_complex
? build_complex_type (arg_real)
: arg_real);
/* Look for a function to call with type-generic parameter
type ARG_TYPE. */
c_expr_t *fn = NULL;
for (unsigned int j = 0; j < num_functions; j++)
{
if (tg_type[j] == arg_type)
{
fn = &(*cexpr_list)[j];
break;
}
}
if (fn == NULL
&& parm_kind[0] == tgmath_fixed
&& SCALAR_FLOAT_TYPE_P (parm_first[0]))
{
/* Presume this is a macro that rounds its result to a
narrower type, and look for the first function with
at least the range and precision of the argument
type. */
for (unsigned int j = 0; j < num_functions; j++)
{
if (arg_complex
!= (TREE_CODE (tg_type[j]) == COMPLEX_TYPE))
continue;
tree real_tg_type = (arg_complex
? TREE_TYPE (tg_type[j])
: tg_type[j]);
if (DECIMAL_FLOAT_TYPE_P (arg_real)
!= DECIMAL_FLOAT_TYPE_P (real_tg_type))
continue;
scalar_float_mode arg_mode
= SCALAR_FLOAT_TYPE_MODE (arg_real);
scalar_float_mode tg_mode
= SCALAR_FLOAT_TYPE_MODE (real_tg_type);
const real_format *arg_fmt = REAL_MODE_FORMAT (arg_mode);
const real_format *tg_fmt = REAL_MODE_FORMAT (tg_mode);
if (arg_fmt->b == tg_fmt->b
&& arg_fmt->p <= tg_fmt->p
&& arg_fmt->emax <= tg_fmt->emax
&& (arg_fmt->emin - arg_fmt->p
>= tg_fmt->emin - tg_fmt->p))
{
fn = &(*cexpr_list)[j];
break;
}
}
}
if (fn == NULL)
{
error_at (loc, "no matching function for type-generic call");
expr.set_error ();
break;
}
/* Construct a call to FN. */
vec<tree, va_gc> *args;
vec_alloc (args, nargs);
vec<tree, va_gc> *origtypes;
vec_alloc (origtypes, nargs);
auto_vec<location_t> arg_loc (nargs);
for (unsigned int j = 0; j < nargs; j++)
{
c_expr_t *ce = &(*cexpr_list)[num_functions + j];
args->quick_push (ce->value);
arg_loc.quick_push (ce->get_location ());
origtypes->quick_push (ce->original_type);
}
expr.value = c_build_function_call_vec (loc, arg_loc, fn->value,
args, origtypes);
set_c_expr_source_range (&expr, loc, close_paren_loc);
break;
}
case RID_BUILTIN_CALL_WITH_STATIC_CHAIN:
{
vec<c_expr_t, va_gc> *cexpr_list;
c_expr_t *e2_p;
tree chain_value;
location_t close_paren_loc;
c_parser_consume_token (parser);
if (!c_parser_get_builtin_args (parser,
"__builtin_call_with_static_chain",
&cexpr_list, false,
&close_paren_loc))
{
expr.set_error ();
break;
}
if (vec_safe_length (cexpr_list) != 2)
{
error_at (loc, "wrong number of arguments to "
"%<__builtin_call_with_static_chain%>");
expr.set_error ();
break;
}
expr = (*cexpr_list)[0];
e2_p = &(*cexpr_list)[1];
*e2_p = convert_lvalue_to_rvalue (loc, *e2_p, true, true);
chain_value = e2_p->value;
mark_exp_read (chain_value);
if (TREE_CODE (expr.value) != CALL_EXPR)
error_at (loc, "first argument to "
"%<__builtin_call_with_static_chain%> "
"must be a call expression");
else if (TREE_CODE (TREE_TYPE (chain_value)) != POINTER_TYPE)
error_at (loc, "second argument to "
"%<__builtin_call_with_static_chain%> "
"must be a pointer type");
else
CALL_EXPR_STATIC_CHAIN (expr.value) = chain_value;
set_c_expr_source_range (&expr, loc, close_paren_loc);
break;
}
case RID_BUILTIN_COMPLEX:
{
vec<c_expr_t, va_gc> *cexpr_list;
c_expr_t *e1_p, *e2_p;
location_t close_paren_loc;
c_parser_consume_token (parser);
if (!c_parser_get_builtin_args (parser,
"__builtin_complex",
&cexpr_list, false,
&close_paren_loc))
{
expr.set_error ();
break;
}
if (vec_safe_length (cexpr_list) != 2)
{
error_at (loc, "wrong number of arguments to "
"%<__builtin_complex%>");
expr.set_error ();
break;
}
e1_p = &(*cexpr_list)[0];
e2_p = &(*cexpr_list)[1];
*e1_p = convert_lvalue_to_rvalue (loc, *e1_p, true, true);
if (TREE_CODE (e1_p->value) == EXCESS_PRECISION_EXPR)
e1_p->value = convert (TREE_TYPE (e1_p->value),
TREE_OPERAND (e1_p->value, 0));
*e2_p = convert_lvalue_to_rvalue (loc, *e2_p, true, true);
if (TREE_CODE (e2_p->value) == EXCESS_PRECISION_EXPR)
e2_p->value = convert (TREE_TYPE (e2_p->value),
TREE_OPERAND (e2_p->value, 0));
if (!SCALAR_FLOAT_TYPE_P (TREE_TYPE (e1_p->value))
|| DECIMAL_FLOAT_TYPE_P (TREE_TYPE (e1_p->value))
|| !SCALAR_FLOAT_TYPE_P (TREE_TYPE (e2_p->value))
|| DECIMAL_FLOAT_TYPE_P (TREE_TYPE (e2_p->value)))
{
error_at (loc, "%<__builtin_complex%> operand "
"not of real binary floating-point type");
expr.set_error ();
break;
}
if (TYPE_MAIN_VARIANT (TREE_TYPE (e1_p->value))
!= TYPE_MAIN_VARIANT (TREE_TYPE (e2_p->value)))
{
error_at (loc,
"%<__builtin_complex%> operands of different types");
expr.set_error ();
break;
}
pedwarn_c90 (loc, OPT_Wpedantic,
"ISO C90 does not support complex types");
expr.value = build2_loc (loc, COMPLEX_EXPR,
build_complex_type
(TYPE_MAIN_VARIANT
(TREE_TYPE (e1_p->value))),
e1_p->value, e2_p->value);
set_c_expr_source_range (&expr, loc, close_paren_loc);
break;
}
case RID_BUILTIN_SHUFFLE:
{
vec<c_expr_t, va_gc> *cexpr_list;
unsigned int i;
c_expr_t *p;
location_t close_paren_loc;
c_parser_consume_token (parser);
if (!c_parser_get_builtin_args (parser,
"__builtin_shuffle",
&cexpr_list, false,
&close_paren_loc))
{
expr.set_error ();
break;
}
FOR_EACH_VEC_SAFE_ELT (cexpr_list, i, p)
*p = convert_lvalue_to_rvalue (loc, *p, true, true);
if (vec_safe_length (cexpr_list) == 2)
expr.value = c_build_vec_perm_expr (loc, (*cexpr_list)[0].value,
NULL_TREE,
(*cexpr_list)[1].value);
else if (vec_safe_length (cexpr_list) == 3)
expr.value = c_build_vec_perm_expr (loc, (*cexpr_list)[0].value,
(*cexpr_list)[1].value,
(*cexpr_list)[2].value);
else
{
error_at (loc, "wrong number of arguments to "
"%<__builtin_shuffle%>");
expr.set_error ();
}
set_c_expr_source_range (&expr, loc, close_paren_loc);
break;
}
case RID_BUILTIN_CONVERTVECTOR:
{
location_t start_loc = loc;
c_parser_consume_token (parser);
matching_parens parens;
if (!parens.require_open (parser))
{
expr.set_error ();
break;
}
e1 = c_parser_expr_no_commas (parser, NULL);
mark_exp_read (e1.value);
if (!c_parser_require (parser, CPP_COMMA, "expected %<,%>"))
{
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL);
expr.set_error ();
break;
}
loc = c_parser_peek_token (parser)->location;
t1 = c_parser_type_name (parser);
location_t end_loc = c_parser_peek_token (parser)->get_finish ();
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<)%>");
if (t1 == NULL)
expr.set_error ();
else
{
tree type_expr = NULL_TREE;
expr.value = c_build_vec_convert (start_loc, e1.value, loc,
groktypename (t1, &type_expr,
NULL));
set_c_expr_source_range (&expr, start_loc, end_loc);
}
}
break;
case RID_AT_SELECTOR:
{
gcc_assert (c_dialect_objc ());
c_parser_consume_token (parser);
matching_parens parens;
if (!parens.require_open (parser))
{
expr.set_error ();
break;
}
tree sel = c_parser_objc_selector_arg (parser);
location_t close_loc = c_parser_peek_token (parser)->location;
parens.skip_until_found_close (parser);
expr.value = objc_build_selector_expr (loc, sel);
set_c_expr_source_range (&expr, loc, close_loc);
}
break;
case RID_AT_PROTOCOL:
{
gcc_assert (c_dialect_objc ());
c_parser_consume_token (parser);
matching_parens parens;
if (!parens.require_open (parser))
{
expr.set_error ();
break;
}
if (c_parser_next_token_is_not (parser, CPP_NAME))
{
c_parser_error (parser, "expected identifier");
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL);
expr.set_error ();
break;
}
tree id = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
location_t close_loc = c_parser_peek_token (parser)->location;
parens.skip_until_found_close (parser);
expr.value = objc_build_protocol_expr (id);
set_c_expr_source_range (&expr, loc, close_loc);
}
break;
case RID_AT_ENCODE:
{
/* Extension to support C-structures in the archiver. */
gcc_assert (c_dialect_objc ());
c_parser_consume_token (parser);
matching_parens parens;
if (!parens.require_open (parser))
{
expr.set_error ();
break;
}
t1 = c_parser_type_name (parser);
if (t1 == NULL)
{
expr.set_error ();
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL);
break;
}
location_t close_loc = c_parser_peek_token (parser)->location;
parens.skip_until_found_close (parser);
tree type = groktypename (t1, NULL, NULL);
expr.value = objc_build_encode_expr (type);
set_c_expr_source_range (&expr, loc, close_loc);
}
break;
case RID_GENERIC:
expr = c_parser_generic_selection (parser);
break;
default:
c_parser_error (parser, "expected expression");
expr.set_error ();
break;
}
break;
case CPP_OPEN_SQUARE:
if (c_dialect_objc ())
{
tree receiver, args;
c_parser_consume_token (parser);
receiver = c_parser_objc_receiver (parser);
args = c_parser_objc_message_args (parser);
location_t close_loc = c_parser_peek_token (parser)->location;
c_parser_skip_until_found (parser, CPP_CLOSE_SQUARE,
"expected %<]%>");
expr.value = objc_build_message_expr (receiver, args);
set_c_expr_source_range (&expr, loc, close_loc);
break;
}
/* Else fall through to report error. */
/* FALLTHRU */
default:
c_parser_error (parser, "expected expression");
expr.set_error ();
break;
}
out:
return c_parser_postfix_expression_after_primary
(parser, EXPR_LOC_OR_LOC (expr.value, loc), expr);
}
/* Parse a postfix expression after a parenthesized type name: the
brace-enclosed initializer of a compound literal, possibly followed
by some postfix operators. This is separate because it is not
possible to tell until after the type name whether a cast
expression has a cast or a compound literal, or whether the operand
of sizeof is a parenthesized type name or starts with a compound
literal. TYPE_LOC is the location where TYPE_NAME starts--the
location of the first token after the parentheses around the type
name. */
static struct c_expr
c_parser_postfix_expression_after_paren_type (c_parser *parser,
struct c_type_name *type_name,
location_t type_loc)
{
tree type;
struct c_expr init;
bool non_const;
struct c_expr expr;
location_t start_loc;
tree type_expr = NULL_TREE;
bool type_expr_const = true;
check_compound_literal_type (type_loc, type_name);
rich_location richloc (line_table, type_loc);
start_init (NULL_TREE, NULL, 0, &richloc);
type = groktypename (type_name, &type_expr, &type_expr_const);
start_loc = c_parser_peek_token (parser)->location;
if (type != error_mark_node && C_TYPE_VARIABLE_SIZE (type))
{
error_at (type_loc, "compound literal has variable size");
type = error_mark_node;
}
init = c_parser_braced_init (parser, type, false, NULL);
finish_init ();
maybe_warn_string_init (type_loc, type, init);
if (type != error_mark_node
&& !ADDR_SPACE_GENERIC_P (TYPE_ADDR_SPACE (type))
&& current_function_decl)
{
error ("compound literal qualified by address-space qualifier");
type = error_mark_node;
}
pedwarn_c90 (start_loc, OPT_Wpedantic, "ISO C90 forbids compound literals");
non_const = ((init.value && TREE_CODE (init.value) == CONSTRUCTOR)
? CONSTRUCTOR_NON_CONST (init.value)
: init.original_code == C_MAYBE_CONST_EXPR);
non_const |= !type_expr_const;
unsigned int alignas_align = 0;
if (type != error_mark_node
&& type_name->specs->align_log != -1)
{
alignas_align = 1U << type_name->specs->align_log;
if (alignas_align < min_align_of_type (type))
{
error_at (type_name->specs->locations[cdw_alignas],
"%<_Alignas%> specifiers cannot reduce "
"alignment of compound literal");
alignas_align = 0;
}
}
expr.value = build_compound_literal (start_loc, type, init.value, non_const,
alignas_align);
set_c_expr_source_range (&expr, init.src_range);
expr.original_code = ERROR_MARK;
expr.original_type = NULL;
if (type != error_mark_node
&& expr.value != error_mark_node
&& type_expr)
{
if (TREE_CODE (expr.value) == C_MAYBE_CONST_EXPR)
{
gcc_assert (C_MAYBE_CONST_EXPR_PRE (expr.value) == NULL_TREE);
C_MAYBE_CONST_EXPR_PRE (expr.value) = type_expr;
}
else
{
gcc_assert (!non_const);
expr.value = build2 (C_MAYBE_CONST_EXPR, type,
type_expr, expr.value);
}
}
return c_parser_postfix_expression_after_primary (parser, start_loc, expr);
}
/* Callback function for sizeof_pointer_memaccess_warning to compare
types. */
static bool
sizeof_ptr_memacc_comptypes (tree type1, tree type2)
{
return comptypes (type1, type2) == 1;
}
/* Warn for patterns where abs-like function appears to be used incorrectly,
gracefully ignore any non-abs-like function. The warning location should
be LOC. FNDECL is the declaration of called function, it must be a
BUILT_IN_NORMAL function. ARG is the first and only argument of the
call. */
static void
warn_for_abs (location_t loc, tree fndecl, tree arg)
{
/* Avoid warning in unreachable subexpressions. */
if (c_inhibit_evaluation_warnings)
return;
tree atype = TREE_TYPE (arg);
/* Casts from pointers (and thus arrays and fndecls) will generate
-Wint-conversion warnings. Most other wrong types hopefully lead to type
mismatch errors. TODO: Think about what to do with FIXED_POINT_TYPE_P
types and possibly other exotic types. */
if (!INTEGRAL_TYPE_P (atype)
&& !SCALAR_FLOAT_TYPE_P (atype)
&& TREE_CODE (atype) != COMPLEX_TYPE)
return;
enum built_in_function fcode = DECL_FUNCTION_CODE (fndecl);
switch (fcode)
{
case BUILT_IN_ABS:
case BUILT_IN_LABS:
case BUILT_IN_LLABS:
case BUILT_IN_IMAXABS:
if (!INTEGRAL_TYPE_P (atype))
{
if (SCALAR_FLOAT_TYPE_P (atype))
warning_at (loc, OPT_Wabsolute_value,
"using integer absolute value function %qD when "
"argument is of floating-point type %qT",
fndecl, atype);
else if (TREE_CODE (atype) == COMPLEX_TYPE)
warning_at (loc, OPT_Wabsolute_value,
"using integer absolute value function %qD when "
"argument is of complex type %qT", fndecl, atype);
else
gcc_unreachable ();
return;
}
if (TYPE_UNSIGNED (atype))
warning_at (loc, OPT_Wabsolute_value,
"taking the absolute value of unsigned type %qT "
"has no effect", atype);
break;
CASE_FLT_FN (BUILT_IN_FABS):
CASE_FLT_FN_FLOATN_NX (BUILT_IN_FABS):
if (!SCALAR_FLOAT_TYPE_P (atype)
|| DECIMAL_FLOAT_MODE_P (TYPE_MODE (atype)))
{
if (INTEGRAL_TYPE_P (atype))
warning_at (loc, OPT_Wabsolute_value,
"using floating-point absolute value function %qD "
"when argument is of integer type %qT", fndecl, atype);
else if (DECIMAL_FLOAT_TYPE_P (atype))
warning_at (loc, OPT_Wabsolute_value,
"using floating-point absolute value function %qD "
"when argument is of decimal floating-point type %qT",
fndecl, atype);
else if (TREE_CODE (atype) == COMPLEX_TYPE)
warning_at (loc, OPT_Wabsolute_value,
"using floating-point absolute value function %qD when "
"argument is of complex type %qT", fndecl, atype);
else
gcc_unreachable ();
return;
}
break;
CASE_FLT_FN (BUILT_IN_CABS):
if (TREE_CODE (atype) != COMPLEX_TYPE)
{
if (INTEGRAL_TYPE_P (atype))
warning_at (loc, OPT_Wabsolute_value,
"using complex absolute value function %qD when "
"argument is of integer type %qT", fndecl, atype);
else if (SCALAR_FLOAT_TYPE_P (atype))
warning_at (loc, OPT_Wabsolute_value,
"using complex absolute value function %qD when "
"argument is of floating-point type %qT",
fndecl, atype);
else
gcc_unreachable ();
return;
}
break;
case BUILT_IN_FABSD32:
case BUILT_IN_FABSD64:
case BUILT_IN_FABSD128:
if (!DECIMAL_FLOAT_TYPE_P (atype))
{
if (INTEGRAL_TYPE_P (atype))
warning_at (loc, OPT_Wabsolute_value,
"using decimal floating-point absolute value "
"function %qD when argument is of integer type %qT",
fndecl, atype);
else if (SCALAR_FLOAT_TYPE_P (atype))
warning_at (loc, OPT_Wabsolute_value,
"using decimal floating-point absolute value "
"function %qD when argument is of floating-point "
"type %qT", fndecl, atype);
else if (TREE_CODE (atype) == COMPLEX_TYPE)
warning_at (loc, OPT_Wabsolute_value,
"using decimal floating-point absolute value "
"function %qD when argument is of complex type %qT",
fndecl, atype);
else
gcc_unreachable ();
return;
}
break;
default:
return;
}
if (!TYPE_ARG_TYPES (TREE_TYPE (fndecl)))
return;
tree ftype = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl)));
if (TREE_CODE (atype) == COMPLEX_TYPE)
{
gcc_assert (TREE_CODE (ftype) == COMPLEX_TYPE);
atype = TREE_TYPE (atype);
ftype = TREE_TYPE (ftype);
}
if (TYPE_PRECISION (ftype) < TYPE_PRECISION (atype))
warning_at (loc, OPT_Wabsolute_value,
"absolute value function %qD given an argument of type %qT "
"but has parameter of type %qT which may cause truncation "
"of value", fndecl, atype, ftype);
}
/* Parse a postfix expression after the initial primary or compound
literal; that is, parse a series of postfix operators.
EXPR_LOC is the location of the primary expression. */
static struct c_expr
c_parser_postfix_expression_after_primary (c_parser *parser,
location_t expr_loc,
struct c_expr expr)
{
struct c_expr orig_expr;
tree ident, idx;
location_t sizeof_arg_loc[3], comp_loc;
tree sizeof_arg[3];
unsigned int literal_zero_mask;
unsigned int i;
vec<tree, va_gc> *exprlist;
vec<tree, va_gc> *origtypes = NULL;
vec<location_t> arg_loc = vNULL;
location_t start;
location_t finish;
while (true)
{
location_t op_loc = c_parser_peek_token (parser)->location;
switch (c_parser_peek_token (parser)->type)
{
case CPP_OPEN_SQUARE:
/* Array reference. */
c_parser_consume_token (parser);
idx = c_parser_expression (parser).value;
c_parser_skip_until_found (parser, CPP_CLOSE_SQUARE,
"expected %<]%>");
start = expr.get_start ();
finish = parser->tokens_buf[0].location;
expr.value = build_array_ref (op_loc, expr.value, idx);
set_c_expr_source_range (&expr, start, finish);
expr.original_code = ERROR_MARK;
expr.original_type = NULL;
break;
case CPP_OPEN_PAREN:
/* Function call. */
{
matching_parens parens;
parens.consume_open (parser);
for (i = 0; i < 3; i++)
{
sizeof_arg[i] = NULL_TREE;
sizeof_arg_loc[i] = UNKNOWN_LOCATION;
}
literal_zero_mask = 0;
if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN))
exprlist = NULL;
else
exprlist = c_parser_expr_list (parser, true, false, &origtypes,
sizeof_arg_loc, sizeof_arg,
&arg_loc, &literal_zero_mask);
parens.skip_until_found_close (parser);
}
orig_expr = expr;
mark_exp_read (expr.value);
if (warn_sizeof_pointer_memaccess)
sizeof_pointer_memaccess_warning (sizeof_arg_loc,
expr.value, exprlist,
sizeof_arg,
sizeof_ptr_memacc_comptypes);
if (TREE_CODE (expr.value) == FUNCTION_DECL)
{
if (fndecl_built_in_p (expr.value, BUILT_IN_MEMSET)
&& vec_safe_length (exprlist) == 3)
{
tree arg0 = (*exprlist)[0];
tree arg2 = (*exprlist)[2];
warn_for_memset (expr_loc, arg0, arg2, literal_zero_mask);
}
if (warn_absolute_value
&& fndecl_built_in_p (expr.value, BUILT_IN_NORMAL)
&& vec_safe_length (exprlist) == 1)
warn_for_abs (expr_loc, expr.value, (*exprlist)[0]);
}
start = expr.get_start ();
finish = parser->tokens_buf[0].get_finish ();
expr.value
= c_build_function_call_vec (expr_loc, arg_loc, expr.value,
exprlist, origtypes);
set_c_expr_source_range (&expr, start, finish);
expr.original_code = ERROR_MARK;
if (TREE_CODE (expr.value) == INTEGER_CST
&& TREE_CODE (orig_expr.value) == FUNCTION_DECL
&& fndecl_built_in_p (orig_expr.value, BUILT_IN_CONSTANT_P))
expr.original_code = C_MAYBE_CONST_EXPR;
expr.original_type = NULL;
if (exprlist)
{
release_tree_vector (exprlist);
release_tree_vector (origtypes);
}
arg_loc.release ();
break;
case CPP_DOT:
/* Structure element reference. */
c_parser_consume_token (parser);
expr = default_function_array_conversion (expr_loc, expr);
if (c_parser_next_token_is (parser, CPP_NAME))
{
c_token *comp_tok = c_parser_peek_token (parser);
ident = comp_tok->value;
comp_loc = comp_tok->location;
}
else
{
c_parser_error (parser, "expected identifier");
expr.set_error ();
expr.original_code = ERROR_MARK;
expr.original_type = NULL;
return expr;
}
start = expr.get_start ();
finish = c_parser_peek_token (parser)->get_finish ();
c_parser_consume_token (parser);
expr.value = build_component_ref (op_loc, expr.value, ident,
comp_loc);
set_c_expr_source_range (&expr, start, finish);
expr.original_code = ERROR_MARK;
if (TREE_CODE (expr.value) != COMPONENT_REF)
expr.original_type = NULL;
else
{
/* Remember the original type of a bitfield. */
tree field = TREE_OPERAND (expr.value, 1);
if (TREE_CODE (field) != FIELD_DECL)
expr.original_type = NULL;
else
expr.original_type = DECL_BIT_FIELD_TYPE (field);
}
break;
case CPP_DEREF:
/* Structure element reference. */
c_parser_consume_token (parser);
expr = convert_lvalue_to_rvalue (expr_loc, expr, true, false);
if (c_parser_next_token_is (parser, CPP_NAME))
{
c_token *comp_tok = c_parser_peek_token (parser);
ident = comp_tok->value;
comp_loc = comp_tok->location;
}
else
{
c_parser_error (parser, "expected identifier");
expr.set_error ();
expr.original_code = ERROR_MARK;
expr.original_type = NULL;
return expr;
}
start = expr.get_start ();
finish = c_parser_peek_token (parser)->get_finish ();
c_parser_consume_token (parser);
expr.value = build_component_ref (op_loc,
build_indirect_ref (op_loc,
expr.value,
RO_ARROW),
ident, comp_loc);
set_c_expr_source_range (&expr, start, finish);
expr.original_code = ERROR_MARK;
if (TREE_CODE (expr.value) != COMPONENT_REF)
expr.original_type = NULL;
else
{
/* Remember the original type of a bitfield. */
tree field = TREE_OPERAND (expr.value, 1);
if (TREE_CODE (field) != FIELD_DECL)
expr.original_type = NULL;
else
expr.original_type = DECL_BIT_FIELD_TYPE (field);
}
break;
case CPP_PLUS_PLUS:
/* Postincrement. */
start = expr.get_start ();
finish = c_parser_peek_token (parser)->get_finish ();
c_parser_consume_token (parser);
expr = default_function_array_read_conversion (expr_loc, expr);
expr.value = build_unary_op (op_loc, POSTINCREMENT_EXPR,
expr.value, false);
set_c_expr_source_range (&expr, start, finish);
expr.original_code = ERROR_MARK;
expr.original_type = NULL;
break;
case CPP_MINUS_MINUS:
/* Postdecrement. */
start = expr.get_start ();
finish = c_parser_peek_token (parser)->get_finish ();
c_parser_consume_token (parser);
expr = default_function_array_read_conversion (expr_loc, expr);
expr.value = build_unary_op (op_loc, POSTDECREMENT_EXPR,
expr.value, false);
set_c_expr_source_range (&expr, start, finish);
expr.original_code = ERROR_MARK;
expr.original_type = NULL;
break;
default:
return expr;
}
}
}
/* Parse an expression (C90 6.3.17, C99 6.5.17, C11 6.5.17).
expression:
assignment-expression
expression , assignment-expression
*/
static struct c_expr
c_parser_expression (c_parser *parser)
{
location_t tloc = c_parser_peek_token (parser)->location;
struct c_expr expr;
expr = c_parser_expr_no_commas (parser, NULL);
if (c_parser_next_token_is (parser, CPP_COMMA))
expr = convert_lvalue_to_rvalue (tloc, expr, true, false);
while (c_parser_next_token_is (parser, CPP_COMMA))
{
struct c_expr next;
tree lhsval;
location_t loc = c_parser_peek_token (parser)->location;
location_t expr_loc;
c_parser_consume_token (parser);
expr_loc = c_parser_peek_token (parser)->location;
lhsval = expr.value;
while (TREE_CODE (lhsval) == COMPOUND_EXPR)
lhsval = TREE_OPERAND (lhsval, 1);
if (DECL_P (lhsval) || handled_component_p (lhsval))
mark_exp_read (lhsval);
next = c_parser_expr_no_commas (parser, NULL);
next = convert_lvalue_to_rvalue (expr_loc, next, true, false);
expr.value = build_compound_expr (loc, expr.value, next.value);
expr.original_code = COMPOUND_EXPR;
expr.original_type = next.original_type;
}
return expr;
}
/* Parse an expression and convert functions or arrays to pointers and
lvalues to rvalues. */
static struct c_expr
c_parser_expression_conv (c_parser *parser)
{
struct c_expr expr;
location_t loc = c_parser_peek_token (parser)->location;
expr = c_parser_expression (parser);
expr = convert_lvalue_to_rvalue (loc, expr, true, false);
return expr;
}
/* Helper function of c_parser_expr_list. Check if IDXth (0 based)
argument is a literal zero alone and if so, set it in literal_zero_mask. */
static inline void
c_parser_check_literal_zero (c_parser *parser, unsigned *literal_zero_mask,
unsigned int idx)
{
if (idx >= HOST_BITS_PER_INT)
return;
c_token *tok = c_parser_peek_token (parser);
switch (tok->type)
{
case CPP_NUMBER:
case CPP_CHAR:
case CPP_WCHAR:
case CPP_CHAR16:
case CPP_CHAR32:
case CPP_UTF8CHAR:
/* If a parameter is literal zero alone, remember it
for -Wmemset-transposed-args warning. */
if (integer_zerop (tok->value)
&& !TREE_OVERFLOW (tok->value)
&& (c_parser_peek_2nd_token (parser)->type == CPP_COMMA
|| c_parser_peek_2nd_token (parser)->type == CPP_CLOSE_PAREN))
*literal_zero_mask |= 1U << idx;
default:
break;
}
}
/* Parse a non-empty list of expressions. If CONVERT_P, convert
functions and arrays to pointers and lvalues to rvalues. If
FOLD_P, fold the expressions. If LOCATIONS is non-NULL, save the
locations of function arguments into this vector.
nonempty-expr-list:
assignment-expression
nonempty-expr-list , assignment-expression
*/
static vec<tree, va_gc> *
c_parser_expr_list (c_parser *parser, bool convert_p, bool fold_p,
vec<tree, va_gc> **p_orig_types,
location_t *sizeof_arg_loc, tree *sizeof_arg,
vec<location_t> *locations,
unsigned int *literal_zero_mask)
{
vec<tree, va_gc> *ret;
vec<tree, va_gc> *orig_types;
struct c_expr expr;
unsigned int idx = 0;
ret = make_tree_vector ();
if (p_orig_types == NULL)
orig_types = NULL;
else
orig_types = make_tree_vector ();
if (literal_zero_mask)
c_parser_check_literal_zero (parser, literal_zero_mask, 0);
expr = c_parser_expr_no_commas (parser, NULL);
if (convert_p)
expr = convert_lvalue_to_rvalue (expr.get_location (), expr, true, true);
if (fold_p)
expr.value = c_fully_fold (expr.value, false, NULL);
ret->quick_push (expr.value);
if (orig_types)
orig_types->quick_push (expr.original_type);
if (locations)
locations->safe_push (expr.get_location ());
if (sizeof_arg != NULL
&& expr.original_code == SIZEOF_EXPR)
{
sizeof_arg[0] = c_last_sizeof_arg;
sizeof_arg_loc[0] = c_last_sizeof_loc;
}
while (c_parser_next_token_is (parser, CPP_COMMA))
{
c_parser_consume_token (parser);
if (literal_zero_mask)
c_parser_check_literal_zero (parser, literal_zero_mask, idx + 1);
expr = c_parser_expr_no_commas (parser, NULL);
if (convert_p)
expr = convert_lvalue_to_rvalue (expr.get_location (), expr, true,
true);
if (fold_p)
expr.value = c_fully_fold (expr.value, false, NULL);
vec_safe_push (ret, expr.value);
if (orig_types)
vec_safe_push (orig_types, expr.original_type);
if (locations)
locations->safe_push (expr.get_location ());
if (++idx < 3
&& sizeof_arg != NULL
&& expr.original_code == SIZEOF_EXPR)
{
sizeof_arg[idx] = c_last_sizeof_arg;
sizeof_arg_loc[idx] = c_last_sizeof_loc;
}
}
if (orig_types)
*p_orig_types = orig_types;
return ret;
}
/* Parse Objective-C-specific constructs. */
/* Parse an objc-class-definition.
objc-class-definition:
@interface identifier objc-superclass[opt] objc-protocol-refs[opt]
objc-class-instance-variables[opt] objc-methodprotolist @end
@implementation identifier objc-superclass[opt]
objc-class-instance-variables[opt]
@interface identifier ( identifier ) objc-protocol-refs[opt]
objc-methodprotolist @end
@interface identifier ( ) objc-protocol-refs[opt]
objc-methodprotolist @end
@implementation identifier ( identifier )
objc-superclass:
: identifier
"@interface identifier (" must start "@interface identifier (
identifier ) ...": objc-methodprotolist in the first production may
not start with a parenthesized identifier as a declarator of a data
definition with no declaration specifiers if the objc-superclass,
objc-protocol-refs and objc-class-instance-variables are omitted. */
static void
c_parser_objc_class_definition (c_parser *parser, tree attributes)
{
bool iface_p;
tree id1;
tree superclass;
if (c_parser_next_token_is_keyword (parser, RID_AT_INTERFACE))
iface_p = true;
else if (c_parser_next_token_is_keyword (parser, RID_AT_IMPLEMENTATION))
iface_p = false;
else
gcc_unreachable ();
c_parser_consume_token (parser);
if (c_parser_next_token_is_not (parser, CPP_NAME))
{
c_parser_error (parser, "expected identifier");
return;
}
id1 = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_OPEN_PAREN))
{
/* We have a category or class extension. */
tree id2;
tree proto = NULL_TREE;
matching_parens parens;
parens.consume_open (parser);
if (c_parser_next_token_is_not (parser, CPP_NAME))
{
if (iface_p && c_parser_next_token_is (parser, CPP_CLOSE_PAREN))
{
/* We have a class extension. */
id2 = NULL_TREE;
}
else
{
c_parser_error (parser, "expected identifier or %<)%>");
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL);
return;
}
}
else
{
id2 = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
}
parens.skip_until_found_close (parser);
if (!iface_p)
{
objc_start_category_implementation (id1, id2);
return;
}
if (c_parser_next_token_is (parser, CPP_LESS))
proto = c_parser_objc_protocol_refs (parser);
objc_start_category_interface (id1, id2, proto, attributes);
c_parser_objc_methodprotolist (parser);
c_parser_require_keyword (parser, RID_AT_END, "expected %<@end%>");
objc_finish_interface ();
return;
}
if (c_parser_next_token_is (parser, CPP_COLON))
{
c_parser_consume_token (parser);
if (c_parser_next_token_is_not (parser, CPP_NAME))
{
c_parser_error (parser, "expected identifier");
return;
}
superclass = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
}
else
superclass = NULL_TREE;
if (iface_p)
{
tree proto = NULL_TREE;
if (c_parser_next_token_is (parser, CPP_LESS))
proto = c_parser_objc_protocol_refs (parser);
objc_start_class_interface (id1, superclass, proto, attributes);
}
else
objc_start_class_implementation (id1, superclass);
if (c_parser_next_token_is (parser, CPP_OPEN_BRACE))
c_parser_objc_class_instance_variables (parser);
if (iface_p)
{
objc_continue_interface ();
c_parser_objc_methodprotolist (parser);
c_parser_require_keyword (parser, RID_AT_END, "expected %<@end%>");
objc_finish_interface ();
}
else
{
objc_continue_implementation ();
return;
}
}
/* Parse objc-class-instance-variables.
objc-class-instance-variables:
{ objc-instance-variable-decl-list[opt] }
objc-instance-variable-decl-list:
objc-visibility-spec
objc-instance-variable-decl ;
;
objc-instance-variable-decl-list objc-visibility-spec
objc-instance-variable-decl-list objc-instance-variable-decl ;
objc-instance-variable-decl-list ;
objc-visibility-spec:
@private
@protected
@public
objc-instance-variable-decl:
struct-declaration
*/
static void
c_parser_objc_class_instance_variables (c_parser *parser)
{
gcc_assert (c_parser_next_token_is (parser, CPP_OPEN_BRACE));
c_parser_consume_token (parser);
while (c_parser_next_token_is_not (parser, CPP_EOF))
{
tree decls;
/* Parse any stray semicolon. */
if (c_parser_next_token_is (parser, CPP_SEMICOLON))
{
pedwarn (c_parser_peek_token (parser)->location, OPT_Wpedantic,
"extra semicolon");
c_parser_consume_token (parser);
continue;
}
/* Stop if at the end of the instance variables. */
if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE))
{
c_parser_consume_token (parser);
break;
}
/* Parse any objc-visibility-spec. */
if (c_parser_next_token_is_keyword (parser, RID_AT_PRIVATE))
{
c_parser_consume_token (parser);
objc_set_visibility (OBJC_IVAR_VIS_PRIVATE);
continue;
}
else if (c_parser_next_token_is_keyword (parser, RID_AT_PROTECTED))
{
c_parser_consume_token (parser);
objc_set_visibility (OBJC_IVAR_VIS_PROTECTED);
continue;
}
else if (c_parser_next_token_is_keyword (parser, RID_AT_PUBLIC))
{
c_parser_consume_token (parser);
objc_set_visibility (OBJC_IVAR_VIS_PUBLIC);
continue;
}
else if (c_parser_next_token_is_keyword (parser, RID_AT_PACKAGE))
{
c_parser_consume_token (parser);
objc_set_visibility (OBJC_IVAR_VIS_PACKAGE);
continue;
}
else if (c_parser_next_token_is (parser, CPP_PRAGMA))
{
c_parser_pragma (parser, pragma_external, NULL);
continue;
}
/* Parse some comma-separated declarations. */
decls = c_parser_struct_declaration (parser);
if (decls == NULL)
{
/* There is a syntax error. We want to skip the offending
tokens up to the next ';' (included) or '}'
(excluded). */
/* First, skip manually a ')' or ']'. This is because they
reduce the nesting level, so c_parser_skip_until_found()
wouldn't be able to skip past them. */
c_token *token = c_parser_peek_token (parser);
if (token->type == CPP_CLOSE_PAREN || token->type == CPP_CLOSE_SQUARE)
c_parser_consume_token (parser);
/* Then, do the standard skipping. */
c_parser_skip_until_found (parser, CPP_SEMICOLON, NULL);
/* We hopefully recovered. Start normal parsing again. */
parser->error = false;
continue;
}
else
{
/* Comma-separated instance variables are chained together
in reverse order; add them one by one. */
tree ivar = nreverse (decls);
for (; ivar; ivar = DECL_CHAIN (ivar))
objc_add_instance_variable (copy_node (ivar));
}
c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>");
}
}
/* Parse an objc-class-declaration.
objc-class-declaration:
@class identifier-list ;
*/
static void
c_parser_objc_class_declaration (c_parser *parser)
{
gcc_assert (c_parser_next_token_is_keyword (parser, RID_AT_CLASS));
c_parser_consume_token (parser);
/* Any identifiers, including those declared as type names, are OK
here. */
while (true)
{
tree id;
if (c_parser_next_token_is_not (parser, CPP_NAME))
{
c_parser_error (parser, "expected identifier");
c_parser_skip_until_found (parser, CPP_SEMICOLON, NULL);
parser->error = false;
return;
}
id = c_parser_peek_token (parser)->value;
objc_declare_class (id);
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_COMMA))
c_parser_consume_token (parser);
else
break;
}
c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>");
}
/* Parse an objc-alias-declaration.
objc-alias-declaration:
@compatibility_alias identifier identifier ;
*/
static void
c_parser_objc_alias_declaration (c_parser *parser)
{
tree id1, id2;
gcc_assert (c_parser_next_token_is_keyword (parser, RID_AT_ALIAS));
c_parser_consume_token (parser);
if (c_parser_next_token_is_not (parser, CPP_NAME))
{
c_parser_error (parser, "expected identifier");
c_parser_skip_until_found (parser, CPP_SEMICOLON, NULL);
return;
}
id1 = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
if (c_parser_next_token_is_not (parser, CPP_NAME))
{
c_parser_error (parser, "expected identifier");
c_parser_skip_until_found (parser, CPP_SEMICOLON, NULL);
return;
}
id2 = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>");
objc_declare_alias (id1, id2);
}
/* Parse an objc-protocol-definition.
objc-protocol-definition:
@protocol identifier objc-protocol-refs[opt] objc-methodprotolist @end
@protocol identifier-list ;
"@protocol identifier ;" should be resolved as "@protocol
identifier-list ;": objc-methodprotolist may not start with a
semicolon in the first alternative if objc-protocol-refs are
omitted. */
static void
c_parser_objc_protocol_definition (c_parser *parser, tree attributes)
{
gcc_assert (c_parser_next_token_is_keyword (parser, RID_AT_PROTOCOL));
c_parser_consume_token (parser);
if (c_parser_next_token_is_not (parser, CPP_NAME))
{
c_parser_error (parser, "expected identifier");
return;
}
if (c_parser_peek_2nd_token (parser)->type == CPP_COMMA
|| c_parser_peek_2nd_token (parser)->type == CPP_SEMICOLON)
{
/* Any identifiers, including those declared as type names, are
OK here. */
while (true)
{
tree id;
if (c_parser_next_token_is_not (parser, CPP_NAME))
{
c_parser_error (parser, "expected identifier");
break;
}
id = c_parser_peek_token (parser)->value;
objc_declare_protocol (id, attributes);
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_COMMA))
c_parser_consume_token (parser);
else
break;
}
c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>");
}
else
{
tree id = c_parser_peek_token (parser)->value;
tree proto = NULL_TREE;
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_LESS))
proto = c_parser_objc_protocol_refs (parser);
parser->objc_pq_context = true;
objc_start_protocol (id, proto, attributes);
c_parser_objc_methodprotolist (parser);
c_parser_require_keyword (parser, RID_AT_END, "expected %<@end%>");
parser->objc_pq_context = false;
objc_finish_interface ();
}
}
/* Parse an objc-method-type.
objc-method-type:
+
-
Return true if it is a class method (+) and false if it is
an instance method (-).
*/
static inline bool
c_parser_objc_method_type (c_parser *parser)
{
switch (c_parser_peek_token (parser)->type)
{
case CPP_PLUS:
c_parser_consume_token (parser);
return true;
case CPP_MINUS:
c_parser_consume_token (parser);
return false;
default:
gcc_unreachable ();
}
}
/* Parse an objc-method-definition.
objc-method-definition:
objc-method-type objc-method-decl ;[opt] compound-statement
*/
static void
c_parser_objc_method_definition (c_parser *parser)
{
bool is_class_method = c_parser_objc_method_type (parser);
tree decl, attributes = NULL_TREE, expr = NULL_TREE;
parser->objc_pq_context = true;
decl = c_parser_objc_method_decl (parser, is_class_method, &attributes,
&expr);
if (decl == error_mark_node)
return; /* Bail here. */
if (c_parser_next_token_is (parser, CPP_SEMICOLON))
{
c_parser_consume_token (parser);
pedwarn (c_parser_peek_token (parser)->location, OPT_Wpedantic,
"extra semicolon in method definition specified");
}
if (!c_parser_next_token_is (parser, CPP_OPEN_BRACE))
{
c_parser_error (parser, "expected %<{%>");
return;
}
parser->objc_pq_context = false;
if (objc_start_method_definition (is_class_method, decl, attributes, expr))
{
add_stmt (c_parser_compound_statement (parser));
objc_finish_method_definition (current_function_decl);
}
else
{
/* This code is executed when we find a method definition
outside of an @implementation context (or invalid for other
reasons). Parse the method (to keep going) but do not emit
any code.
*/
c_parser_compound_statement (parser);
}
}
/* Parse an objc-methodprotolist.
objc-methodprotolist:
empty
objc-methodprotolist objc-methodproto
objc-methodprotolist declaration
objc-methodprotolist ;
@optional
@required
The declaration is a data definition, which may be missing
declaration specifiers under the same rules and diagnostics as
other data definitions outside functions, and the stray semicolon
is diagnosed the same way as a stray semicolon outside a
function. */
static void
c_parser_objc_methodprotolist (c_parser *parser)
{
while (true)
{
/* The list is terminated by @end. */
switch (c_parser_peek_token (parser)->type)
{
case CPP_SEMICOLON:
pedwarn (c_parser_peek_token (parser)->location, OPT_Wpedantic,
"ISO C does not allow extra %<;%> outside of a function");
c_parser_consume_token (parser);
break;
case CPP_PLUS:
case CPP_MINUS:
c_parser_objc_methodproto (parser);
break;
case CPP_PRAGMA:
c_parser_pragma (parser, pragma_external, NULL);
break;
case CPP_EOF:
return;
default:
if (c_parser_next_token_is_keyword (parser, RID_AT_END))
return;
else if (c_parser_next_token_is_keyword (parser, RID_AT_PROPERTY))
c_parser_objc_at_property_declaration (parser);
else if (c_parser_next_token_is_keyword (parser, RID_AT_OPTIONAL))
{
objc_set_method_opt (true);
c_parser_consume_token (parser);
}
else if (c_parser_next_token_is_keyword (parser, RID_AT_REQUIRED))
{
objc_set_method_opt (false);
c_parser_consume_token (parser);
}
else
c_parser_declaration_or_fndef (parser, false, false, true,
false, true, NULL, vNULL);
break;
}
}
}
/* Parse an objc-methodproto.
objc-methodproto:
objc-method-type objc-method-decl ;
*/
static void
c_parser_objc_methodproto (c_parser *parser)
{
bool is_class_method = c_parser_objc_method_type (parser);
tree decl, attributes = NULL_TREE;
/* Remember protocol qualifiers in prototypes. */
parser->objc_pq_context = true;
decl = c_parser_objc_method_decl (parser, is_class_method, &attributes,
NULL);
/* Forget protocol qualifiers now. */
parser->objc_pq_context = false;
/* Do not allow the presence of attributes to hide an erroneous
method implementation in the interface section. */
if (!c_parser_next_token_is (parser, CPP_SEMICOLON))
{
c_parser_error (parser, "expected %<;%>");
return;
}
if (decl != error_mark_node)
objc_add_method_declaration (is_class_method, decl, attributes);
c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>");
}
/* If we are at a position that method attributes may be present, check that
there are not any parsed already (a syntax error) and then collect any
specified at the current location. Finally, if new attributes were present,
check that the next token is legal ( ';' for decls and '{' for defs). */
static bool
c_parser_objc_maybe_method_attributes (c_parser* parser, tree* attributes)
{
bool bad = false;
if (*attributes)
{
c_parser_error (parser,
"method attributes must be specified at the end only");
*attributes = NULL_TREE;
bad = true;
}
if (c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE))
*attributes = c_parser_gnu_attributes (parser);
/* If there were no attributes here, just report any earlier error. */
if (*attributes == NULL_TREE || bad)
return bad;
/* If the attributes are followed by a ; or {, then just report any earlier
error. */
if (c_parser_next_token_is (parser, CPP_SEMICOLON)
|| c_parser_next_token_is (parser, CPP_OPEN_BRACE))
return bad;
/* We've got attributes, but not at the end. */
c_parser_error (parser,
"expected %<;%> or %<{%> after method attribute definition");
return true;
}
/* Parse an objc-method-decl.
objc-method-decl:
( objc-type-name ) objc-selector
objc-selector
( objc-type-name ) objc-keyword-selector objc-optparmlist
objc-keyword-selector objc-optparmlist
gnu-attributes
objc-keyword-selector:
objc-keyword-decl
objc-keyword-selector objc-keyword-decl
objc-keyword-decl:
objc-selector : ( objc-type-name ) identifier
objc-selector : identifier
: ( objc-type-name ) identifier
: identifier
objc-optparmlist:
objc-optparms objc-optellipsis
objc-optparms:
empty
objc-opt-parms , parameter-declaration
objc-optellipsis:
empty
, ...
*/
static tree
c_parser_objc_method_decl (c_parser *parser, bool is_class_method,
tree *attributes, tree *expr)
{
tree type = NULL_TREE;
tree sel;
tree parms = NULL_TREE;
bool ellipsis = false;
bool attr_err = false;
*attributes = NULL_TREE;
if (c_parser_next_token_is (parser, CPP_OPEN_PAREN))
{
matching_parens parens;
parens.consume_open (parser);
type = c_parser_objc_type_name (parser);
parens.skip_until_found_close (parser);
}
sel = c_parser_objc_selector (parser);
/* If there is no selector, or a colon follows, we have an
objc-keyword-selector. If there is a selector, and a colon does
not follow, that selector ends the objc-method-decl. */
if (!sel || c_parser_next_token_is (parser, CPP_COLON))
{
tree tsel = sel;
tree list = NULL_TREE;
while (true)
{
tree atype = NULL_TREE, id, keyworddecl;
tree param_attr = NULL_TREE;
if (!c_parser_require (parser, CPP_COLON, "expected %<:%>"))
break;
if (c_parser_next_token_is (parser, CPP_OPEN_PAREN))
{
c_parser_consume_token (parser);
atype = c_parser_objc_type_name (parser);
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<)%>");
}
/* New ObjC allows attributes on method parameters. */
if (c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE))
param_attr = c_parser_gnu_attributes (parser);
if (c_parser_next_token_is_not (parser, CPP_NAME))
{
c_parser_error (parser, "expected identifier");
return error_mark_node;
}
id = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
keyworddecl = objc_build_keyword_decl (tsel, atype, id, param_attr);
list = chainon (list, keyworddecl);
tsel = c_parser_objc_selector (parser);
if (!tsel && c_parser_next_token_is_not (parser, CPP_COLON))
break;
}
attr_err |= c_parser_objc_maybe_method_attributes (parser, attributes) ;
/* Parse the optional parameter list. Optional Objective-C
method parameters follow the C syntax, and may include '...'
to denote a variable number of arguments. */
parms = make_node (TREE_LIST);
while (c_parser_next_token_is (parser, CPP_COMMA))
{
struct c_parm *parm;
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_ELLIPSIS))
{
ellipsis = true;
c_parser_consume_token (parser);
attr_err |= c_parser_objc_maybe_method_attributes
(parser, attributes) ;
break;
}
parm = c_parser_parameter_declaration (parser, NULL_TREE, false);
if (parm == NULL)
break;
parms = chainon (parms,
build_tree_list (NULL_TREE, grokparm (parm, expr)));
}
sel = list;
}
else
attr_err |= c_parser_objc_maybe_method_attributes (parser, attributes) ;
if (sel == NULL)
{
c_parser_error (parser, "objective-c method declaration is expected");
return error_mark_node;
}
if (attr_err)
return error_mark_node;
return objc_build_method_signature (is_class_method, type, sel, parms, ellipsis);
}
/* Parse an objc-type-name.
objc-type-name:
objc-type-qualifiers[opt] type-name
objc-type-qualifiers[opt]
objc-type-qualifiers:
objc-type-qualifier
objc-type-qualifiers objc-type-qualifier
objc-type-qualifier: one of
in out inout bycopy byref oneway
*/
static tree
c_parser_objc_type_name (c_parser *parser)
{
tree quals = NULL_TREE;
struct c_type_name *type_name = NULL;
tree type = NULL_TREE;
while (true)
{
c_token *token = c_parser_peek_token (parser);
if (token->type == CPP_KEYWORD
&& (token->keyword == RID_IN
|| token->keyword == RID_OUT
|| token->keyword == RID_INOUT
|| token->keyword == RID_BYCOPY
|| token->keyword == RID_BYREF
|| token->keyword == RID_ONEWAY))
{
quals = chainon (build_tree_list (NULL_TREE, token->value), quals);
c_parser_consume_token (parser);
}
else
break;
}
if (c_parser_next_tokens_start_typename (parser, cla_prefer_type))
type_name = c_parser_type_name (parser);
if (type_name)
type = groktypename (type_name, NULL, NULL);
/* If the type is unknown, and error has already been produced and
we need to recover from the error. In that case, use NULL_TREE
for the type, as if no type had been specified; this will use the
default type ('id') which is good for error recovery. */
if (type == error_mark_node)
type = NULL_TREE;
return build_tree_list (quals, type);
}
/* Parse objc-protocol-refs.
objc-protocol-refs:
< identifier-list >
*/
static tree
c_parser_objc_protocol_refs (c_parser *parser)
{
tree list = NULL_TREE;
gcc_assert (c_parser_next_token_is (parser, CPP_LESS));
c_parser_consume_token (parser);
/* Any identifiers, including those declared as type names, are OK
here. */
while (true)
{
tree id;
if (c_parser_next_token_is_not (parser, CPP_NAME))
{
c_parser_error (parser, "expected identifier");
break;
}
id = c_parser_peek_token (parser)->value;
list = chainon (list, build_tree_list (NULL_TREE, id));
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_COMMA))
c_parser_consume_token (parser);
else
break;
}
c_parser_require (parser, CPP_GREATER, "expected %<>%>");
return list;
}
/* Parse an objc-try-catch-finally-statement.
objc-try-catch-finally-statement:
@try compound-statement objc-catch-list[opt]
@try compound-statement objc-catch-list[opt] @finally compound-statement
objc-catch-list:
@catch ( objc-catch-parameter-declaration ) compound-statement
objc-catch-list @catch ( objc-catch-parameter-declaration ) compound-statement
objc-catch-parameter-declaration:
parameter-declaration
'...'
where '...' is to be interpreted literally, that is, it means CPP_ELLIPSIS.
PS: This function is identical to cp_parser_objc_try_catch_finally_statement
for C++. Keep them in sync. */
static void
c_parser_objc_try_catch_finally_statement (c_parser *parser)
{
location_t location;
tree stmt;
gcc_assert (c_parser_next_token_is_keyword (parser, RID_AT_TRY));
c_parser_consume_token (parser);
location = c_parser_peek_token (parser)->location;
objc_maybe_warn_exceptions (location);
stmt = c_parser_compound_statement (parser);
objc_begin_try_stmt (location, stmt);
while (c_parser_next_token_is_keyword (parser, RID_AT_CATCH))
{
struct c_parm *parm;
tree parameter_declaration = error_mark_node;
bool seen_open_paren = false;
c_parser_consume_token (parser);
matching_parens parens;
if (!parens.require_open (parser))
seen_open_paren = true;
if (c_parser_next_token_is (parser, CPP_ELLIPSIS))
{
/* We have "@catch (...)" (where the '...' are literally
what is in the code). Skip the '...'.
parameter_declaration is set to NULL_TREE, and
objc_being_catch_clauses() knows that that means
'...'. */
c_parser_consume_token (parser);
parameter_declaration = NULL_TREE;
}
else
{
/* We have "@catch (NSException *exception)" or something
like that. Parse the parameter declaration. */
parm = c_parser_parameter_declaration (parser, NULL_TREE, false);
if (parm == NULL)
parameter_declaration = error_mark_node;
else
parameter_declaration = grokparm (parm, NULL);
}
if (seen_open_paren)
parens.require_close (parser);
else
{
/* If there was no open parenthesis, we are recovering from
an error, and we are trying to figure out what mistake
the user has made. */
/* If there is an immediate closing parenthesis, the user
probably forgot the opening one (ie, they typed "@catch
NSException *e)". Parse the closing parenthesis and keep
going. */
if (c_parser_next_token_is (parser, CPP_CLOSE_PAREN))
c_parser_consume_token (parser);
/* If these is no immediate closing parenthesis, the user
probably doesn't know that parenthesis are required at
all (ie, they typed "@catch NSException *e"). So, just
forget about the closing parenthesis and keep going. */
}
objc_begin_catch_clause (parameter_declaration);
if (c_parser_require (parser, CPP_OPEN_BRACE, "expected %<{%>"))
c_parser_compound_statement_nostart (parser);
objc_finish_catch_clause ();
}
if (c_parser_next_token_is_keyword (parser, RID_AT_FINALLY))
{
c_parser_consume_token (parser);
location = c_parser_peek_token (parser)->location;
stmt = c_parser_compound_statement (parser);
objc_build_finally_clause (location, stmt);
}
objc_finish_try_stmt ();
}
/* Parse an objc-synchronized-statement.
objc-synchronized-statement:
@synchronized ( expression ) compound-statement
*/
static void
c_parser_objc_synchronized_statement (c_parser *parser)
{
location_t loc;
tree expr, stmt;
gcc_assert (c_parser_next_token_is_keyword (parser, RID_AT_SYNCHRONIZED));
c_parser_consume_token (parser);
loc = c_parser_peek_token (parser)->location;
objc_maybe_warn_exceptions (loc);
matching_parens parens;
if (parens.require_open (parser))
{
struct c_expr ce = c_parser_expression (parser);
ce = convert_lvalue_to_rvalue (loc, ce, false, false);
expr = ce.value;
expr = c_fully_fold (expr, false, NULL);
parens.skip_until_found_close (parser);
}
else
expr = error_mark_node;
stmt = c_parser_compound_statement (parser);
objc_build_synchronized (loc, expr, stmt);
}
/* Parse an objc-selector; return NULL_TREE without an error if the
next token is not an objc-selector.
objc-selector:
identifier
one of
enum struct union if else while do for switch case default
break continue return goto asm sizeof typeof __alignof
unsigned long const short volatile signed restrict _Complex
in out inout bycopy byref oneway int char float double void _Bool
_Atomic
??? Why this selection of keywords but not, for example, storage
class specifiers? */
static tree
c_parser_objc_selector (c_parser *parser)
{
c_token *token = c_parser_peek_token (parser);
tree value = token->value;
if (token->type == CPP_NAME)
{
c_parser_consume_token (parser);
return value;
}
if (token->type != CPP_KEYWORD)
return NULL_TREE;
switch (token->keyword)
{
case RID_ENUM:
case RID_STRUCT:
case RID_UNION:
case RID_IF:
case RID_ELSE:
case RID_WHILE:
case RID_DO:
case RID_FOR:
case RID_SWITCH:
case RID_CASE:
case RID_DEFAULT:
case RID_BREAK:
case RID_CONTINUE:
case RID_RETURN:
case RID_GOTO:
case RID_ASM:
case RID_SIZEOF:
case RID_TYPEOF:
case RID_ALIGNOF:
case RID_UNSIGNED:
case RID_LONG:
case RID_CONST:
case RID_SHORT:
case RID_VOLATILE:
case RID_SIGNED:
case RID_RESTRICT:
case RID_COMPLEX:
case RID_IN:
case RID_OUT:
case RID_INOUT:
case RID_BYCOPY:
case RID_BYREF:
case RID_ONEWAY:
case RID_INT:
case RID_CHAR:
case RID_FLOAT:
case RID_DOUBLE:
CASE_RID_FLOATN_NX:
case RID_VOID:
case RID_BOOL:
case RID_ATOMIC:
case RID_AUTO_TYPE:
case RID_INT_N_0:
case RID_INT_N_1:
case RID_INT_N_2:
case RID_INT_N_3:
c_parser_consume_token (parser);
return value;
default:
return NULL_TREE;
}
}
/* Parse an objc-selector-arg.
objc-selector-arg:
objc-selector
objc-keywordname-list
objc-keywordname-list:
objc-keywordname
objc-keywordname-list objc-keywordname
objc-keywordname:
objc-selector :
:
*/
static tree
c_parser_objc_selector_arg (c_parser *parser)
{
tree sel = c_parser_objc_selector (parser);
tree list = NULL_TREE;
if (sel
&& c_parser_next_token_is_not (parser, CPP_COLON)
&& c_parser_next_token_is_not (parser, CPP_SCOPE))
return sel;
while (true)
{
if (c_parser_next_token_is (parser, CPP_SCOPE))
{
c_parser_consume_token (parser);
list = chainon (list, build_tree_list (sel, NULL_TREE));
list = chainon (list, build_tree_list (NULL_TREE, NULL_TREE));
}
else
{
if (!c_parser_require (parser, CPP_COLON, "expected %<:%>"))
return list;
list = chainon (list, build_tree_list (sel, NULL_TREE));
}
sel = c_parser_objc_selector (parser);
if (!sel
&& c_parser_next_token_is_not (parser, CPP_COLON)
&& c_parser_next_token_is_not (parser, CPP_SCOPE))
break;
}
return list;
}
/* Parse an objc-receiver.
objc-receiver:
expression
class-name
type-name
*/
static tree
c_parser_objc_receiver (c_parser *parser)
{
location_t loc = c_parser_peek_token (parser)->location;
if (c_parser_peek_token (parser)->type == CPP_NAME
&& (c_parser_peek_token (parser)->id_kind == C_ID_TYPENAME
|| c_parser_peek_token (parser)->id_kind == C_ID_CLASSNAME))
{
tree id = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
return objc_get_class_reference (id);
}
struct c_expr ce = c_parser_expression (parser);
ce = convert_lvalue_to_rvalue (loc, ce, false, false);
return c_fully_fold (ce.value, false, NULL);
}
/* Parse objc-message-args.
objc-message-args:
objc-selector
objc-keywordarg-list
objc-keywordarg-list:
objc-keywordarg
objc-keywordarg-list objc-keywordarg
objc-keywordarg:
objc-selector : objc-keywordexpr
: objc-keywordexpr
*/
static tree
c_parser_objc_message_args (c_parser *parser)
{
tree sel = c_parser_objc_selector (parser);
tree list = NULL_TREE;
if (sel && c_parser_next_token_is_not (parser, CPP_COLON))
return sel;
while (true)
{
tree keywordexpr;
if (!c_parser_require (parser, CPP_COLON, "expected %<:%>"))
return error_mark_node;
keywordexpr = c_parser_objc_keywordexpr (parser);
list = chainon (list, build_tree_list (sel, keywordexpr));
sel = c_parser_objc_selector (parser);
if (!sel && c_parser_next_token_is_not (parser, CPP_COLON))
break;
}
return list;
}
/* Parse an objc-keywordexpr.
objc-keywordexpr:
nonempty-expr-list
*/
static tree
c_parser_objc_keywordexpr (c_parser *parser)
{
tree ret;
vec<tree, va_gc> *expr_list = c_parser_expr_list (parser, true, true,
NULL, NULL, NULL, NULL);
if (vec_safe_length (expr_list) == 1)
{
/* Just return the expression, remove a level of
indirection. */
ret = (*expr_list)[0];
}
else
{
/* We have a comma expression, we will collapse later. */
ret = build_tree_list_vec (expr_list);
}
release_tree_vector (expr_list);
return ret;
}
/* A check, needed in several places, that ObjC interface, implementation or
method definitions are not prefixed by incorrect items. */
static bool
c_parser_objc_diagnose_bad_element_prefix (c_parser *parser,
struct c_declspecs *specs)
{
if (!specs->declspecs_seen_p || specs->non_sc_seen_p
|| specs->typespec_kind != ctsk_none)
{
c_parser_error (parser,
"no type or storage class may be specified here,");
c_parser_skip_to_end_of_block_or_statement (parser);
return true;
}
return false;
}
/* Parse an Objective-C @property declaration. The syntax is:
objc-property-declaration:
'@property' objc-property-attributes[opt] struct-declaration ;
objc-property-attributes:
'(' objc-property-attribute-list ')'
objc-property-attribute-list:
objc-property-attribute
objc-property-attribute-list, objc-property-attribute
objc-property-attribute
'getter' = identifier
'setter' = identifier
'readonly'
'readwrite'
'assign'
'retain'
'copy'
'nonatomic'
For example:
@property NSString *name;
@property (readonly) id object;
@property (retain, nonatomic, getter=getTheName) id name;
@property int a, b, c;
PS: This function is identical to cp_parser_objc_at_propery_declaration
for C++. Keep them in sync. */
static void
c_parser_objc_at_property_declaration (c_parser *parser)
{
/* The following variables hold the attributes of the properties as
parsed. They are 'false' or 'NULL_TREE' if the attribute was not
seen. When we see an attribute, we set them to 'true' (if they
are boolean properties) or to the identifier (if they have an
argument, ie, for getter and setter). Note that here we only
parse the list of attributes, check the syntax and accumulate the
attributes that we find. objc_add_property_declaration() will
then process the information. */
bool property_assign = false;
bool property_copy = false;
tree property_getter_ident = NULL_TREE;
bool property_nonatomic = false;
bool property_readonly = false;
bool property_readwrite = false;
bool property_retain = false;
tree property_setter_ident = NULL_TREE;
/* 'properties' is the list of properties that we read. Usually a
single one, but maybe more (eg, in "@property int a, b, c;" there
are three). */
tree properties;
location_t loc;
loc = c_parser_peek_token (parser)->location;
gcc_assert (c_parser_next_token_is_keyword (parser, RID_AT_PROPERTY));
c_parser_consume_token (parser); /* Eat '@property'. */
/* Parse the optional attribute list... */
if (c_parser_next_token_is (parser, CPP_OPEN_PAREN))
{
matching_parens parens;
/* Eat the '(' */
parens.consume_open (parser);
/* Property attribute keywords are valid now. */
parser->objc_property_attr_context = true;
while (true)
{
bool syntax_error = false;
c_token *token = c_parser_peek_token (parser);
enum rid keyword;
if (token->type != CPP_KEYWORD)
{
if (token->type == CPP_CLOSE_PAREN)
c_parser_error (parser, "expected identifier");
else
{
c_parser_consume_token (parser);
c_parser_error (parser, "unknown property attribute");
}
break;
}
keyword = token->keyword;
c_parser_consume_token (parser);
switch (keyword)
{
case RID_ASSIGN: property_assign = true; break;
case RID_COPY: property_copy = true; break;
case RID_NONATOMIC: property_nonatomic = true; break;
case RID_READONLY: property_readonly = true; break;
case RID_READWRITE: property_readwrite = true; break;
case RID_RETAIN: property_retain = true; break;
case RID_GETTER:
case RID_SETTER:
if (c_parser_next_token_is_not (parser, CPP_EQ))
{
if (keyword == RID_GETTER)
c_parser_error (parser,
"missing %<=%> (after %<getter%> attribute)");
else
c_parser_error (parser,
"missing %<=%> (after %<setter%> attribute)");
syntax_error = true;
break;
}
c_parser_consume_token (parser); /* eat the = */
if (c_parser_next_token_is_not (parser, CPP_NAME))
{
c_parser_error (parser, "expected identifier");
syntax_error = true;
break;
}
if (keyword == RID_SETTER)
{
if (property_setter_ident != NULL_TREE)
c_parser_error (parser, "the %<setter%> attribute may only be specified once");
else
property_setter_ident = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
if (c_parser_next_token_is_not (parser, CPP_COLON))
c_parser_error (parser, "setter name must terminate with %<:%>");
else
c_parser_consume_token (parser);
}
else
{
if (property_getter_ident != NULL_TREE)
c_parser_error (parser, "the %<getter%> attribute may only be specified once");
else
property_getter_ident = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
}
break;
default:
c_parser_error (parser, "unknown property attribute");
syntax_error = true;
break;
}
if (syntax_error)
break;
if (c_parser_next_token_is (parser, CPP_COMMA))
c_parser_consume_token (parser);
else
break;
}
parser->objc_property_attr_context = false;
parens.skip_until_found_close (parser);
}
/* ... and the property declaration(s). */
properties = c_parser_struct_declaration (parser);
if (properties == error_mark_node)
{
c_parser_skip_until_found (parser, CPP_SEMICOLON, NULL);
parser->error = false;
return;
}
if (properties == NULL_TREE)
c_parser_error (parser, "expected identifier");
else
{
/* Comma-separated properties are chained together in
reverse order; add them one by one. */
properties = nreverse (properties);
for (; properties; properties = TREE_CHAIN (properties))
objc_add_property_declaration (loc, copy_node (properties),
property_readonly, property_readwrite,
property_assign, property_retain,
property_copy, property_nonatomic,
property_getter_ident, property_setter_ident);
}
c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>");
parser->error = false;
}
/* Parse an Objective-C @synthesize declaration. The syntax is:
objc-synthesize-declaration:
@synthesize objc-synthesize-identifier-list ;
objc-synthesize-identifier-list:
objc-synthesize-identifier
objc-synthesize-identifier-list, objc-synthesize-identifier
objc-synthesize-identifier
identifier
identifier = identifier
For example:
@synthesize MyProperty;
@synthesize OneProperty, AnotherProperty=MyIvar, YetAnotherProperty;
PS: This function is identical to cp_parser_objc_at_synthesize_declaration
for C++. Keep them in sync.
*/
static void
c_parser_objc_at_synthesize_declaration (c_parser *parser)
{
tree list = NULL_TREE;
location_t loc;
gcc_assert (c_parser_next_token_is_keyword (parser, RID_AT_SYNTHESIZE));
loc = c_parser_peek_token (parser)->location;
c_parser_consume_token (parser);
while (true)
{
tree property, ivar;
if (c_parser_next_token_is_not (parser, CPP_NAME))
{
c_parser_error (parser, "expected identifier");
c_parser_skip_until_found (parser, CPP_SEMICOLON, NULL);
/* Once we find the semicolon, we can resume normal parsing.
We have to reset parser->error manually because
c_parser_skip_until_found() won't reset it for us if the
next token is precisely a semicolon. */
parser->error = false;
return;
}
property = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_EQ))
{
c_parser_consume_token (parser);
if (c_parser_next_token_is_not (parser, CPP_NAME))
{
c_parser_error (parser, "expected identifier");
c_parser_skip_until_found (parser, CPP_SEMICOLON, NULL);
parser->error = false;
return;
}
ivar = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
}
else
ivar = NULL_TREE;
list = chainon (list, build_tree_list (ivar, property));
if (c_parser_next_token_is (parser, CPP_COMMA))
c_parser_consume_token (parser);
else
break;
}
c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>");
objc_add_synthesize_declaration (loc, list);
}
/* Parse an Objective-C @dynamic declaration. The syntax is:
objc-dynamic-declaration:
@dynamic identifier-list ;
For example:
@dynamic MyProperty;
@dynamic MyProperty, AnotherProperty;
PS: This function is identical to cp_parser_objc_at_dynamic_declaration
for C++. Keep them in sync.
*/
static void
c_parser_objc_at_dynamic_declaration (c_parser *parser)
{
tree list = NULL_TREE;
location_t loc;
gcc_assert (c_parser_next_token_is_keyword (parser, RID_AT_DYNAMIC));
loc = c_parser_peek_token (parser)->location;
c_parser_consume_token (parser);
while (true)
{
tree property;
if (c_parser_next_token_is_not (parser, CPP_NAME))
{
c_parser_error (parser, "expected identifier");
c_parser_skip_until_found (parser, CPP_SEMICOLON, NULL);
parser->error = false;
return;
}
property = c_parser_peek_token (parser)->value;
list = chainon (list, build_tree_list (NULL_TREE, property));
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_COMMA))
c_parser_consume_token (parser);
else
break;
}
c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>");
objc_add_dynamic_declaration (loc, list);
}
/* Parse a pragma GCC ivdep. */
static bool
c_parse_pragma_ivdep (c_parser *parser)
{
c_parser_consume_pragma (parser);
c_parser_skip_to_pragma_eol (parser);
return true;
}
/* Parse a pragma GCC unroll. */
static unsigned short
c_parser_pragma_unroll (c_parser *parser)
{
unsigned short unroll;
c_parser_consume_pragma (parser);
location_t location = c_parser_peek_token (parser)->location;
tree expr = c_parser_expr_no_commas (parser, NULL).value;
mark_exp_read (expr);
expr = c_fully_fold (expr, false, NULL);
HOST_WIDE_INT lunroll = 0;
if (!INTEGRAL_TYPE_P (TREE_TYPE (expr))
|| TREE_CODE (expr) != INTEGER_CST
|| (lunroll = tree_to_shwi (expr)) < 0
|| lunroll >= USHRT_MAX)
{
error_at (location, "%<#pragma GCC unroll%> requires an"
" assignment-expression that evaluates to a non-negative"
" integral constant less than %u", USHRT_MAX);
unroll = 0;
}
else
{
unroll = (unsigned short)lunroll;
if (unroll == 0)
unroll = 1;
}
c_parser_skip_to_pragma_eol (parser);
return unroll;
}
/* Handle pragmas. Some OpenMP pragmas are associated with, and therefore
should be considered, statements. ALLOW_STMT is true if we're within
the context of a function and such pragmas are to be allowed. Returns
true if we actually parsed such a pragma. */
static bool
c_parser_pragma (c_parser *parser, enum pragma_context context, bool *if_p)
{
unsigned int id;
const char *construct = NULL;
id = c_parser_peek_token (parser)->pragma_kind;
gcc_assert (id != PRAGMA_NONE);
switch (id)
{
case PRAGMA_OACC_DECLARE:
c_parser_oacc_declare (parser);
return false;
case PRAGMA_OACC_ENTER_DATA:
if (context != pragma_compound)
{
construct = "acc enter data";
in_compound:
if (context == pragma_stmt)
{
error_at (c_parser_peek_token (parser)->location,
"%<#pragma %s%> may only be used in compound "
"statements", construct);
c_parser_skip_until_found (parser, CPP_PRAGMA_EOL, NULL);
return false;
}
goto bad_stmt;
}
c_parser_oacc_enter_exit_data (parser, true);
return false;
case PRAGMA_OACC_EXIT_DATA:
if (context != pragma_compound)
{
construct = "acc exit data";
goto in_compound;
}
c_parser_oacc_enter_exit_data (parser, false);
return false;
case PRAGMA_OACC_ROUTINE:
if (context != pragma_external)
{
error_at (c_parser_peek_token (parser)->location,
"%<#pragma acc routine%> must be at file scope");
c_parser_skip_until_found (parser, CPP_PRAGMA_EOL, NULL);
return false;
}
c_parser_oacc_routine (parser, context);
return false;
case PRAGMA_OACC_UPDATE:
if (context != pragma_compound)
{
construct = "acc update";
goto in_compound;
}
c_parser_oacc_update (parser);
return false;
case PRAGMA_OMP_BARRIER:
if (context != pragma_compound)
{
construct = "omp barrier";
goto in_compound;
}
c_parser_omp_barrier (parser);
return false;
case PRAGMA_OMP_DEPOBJ:
if (context != pragma_compound)
{
construct = "omp depobj";
goto in_compound;
}
c_parser_omp_depobj (parser);
return false;
case PRAGMA_OMP_FLUSH:
if (context != pragma_compound)
{
construct = "omp flush";
goto in_compound;
}
c_parser_omp_flush (parser);
return false;
case PRAGMA_OMP_TASKWAIT:
if (context != pragma_compound)
{
construct = "omp taskwait";
goto in_compound;
}
c_parser_omp_taskwait (parser);
return false;
case PRAGMA_OMP_TASKYIELD:
if (context != pragma_compound)
{
construct = "omp taskyield";
goto in_compound;
}
c_parser_omp_taskyield (parser);
return false;
case PRAGMA_OMP_CANCEL:
if (context != pragma_compound)
{
construct = "omp cancel";
goto in_compound;
}
c_parser_omp_cancel (parser);
return false;
case PRAGMA_OMP_CANCELLATION_POINT:
c_parser_omp_cancellation_point (parser, context);
return false;
case PRAGMA_OMP_THREADPRIVATE:
c_parser_omp_threadprivate (parser);
return false;
case PRAGMA_OMP_TARGET:
return c_parser_omp_target (parser, context, if_p);
case PRAGMA_OMP_END_DECLARE_TARGET:
c_parser_omp_end_declare_target (parser);
return false;
case PRAGMA_OMP_SCAN:
error_at (c_parser_peek_token (parser)->location,
"%<#pragma omp scan%> may only be used in "
"a loop construct with %<inscan%> %<reduction%> clause");
c_parser_skip_until_found (parser, CPP_PRAGMA_EOL, NULL);
return false;
case PRAGMA_OMP_SECTION:
error_at (c_parser_peek_token (parser)->location,
"%<#pragma omp section%> may only be used in "
"%<#pragma omp sections%> construct");
c_parser_skip_until_found (parser, CPP_PRAGMA_EOL, NULL);
return false;
case PRAGMA_OMP_DECLARE:
c_parser_omp_declare (parser, context);
return false;
case PRAGMA_OMP_REQUIRES:
if (context != pragma_external)
{
error_at (c_parser_peek_token (parser)->location,
"%<#pragma omp requires%> may only be used at file scope");
c_parser_skip_until_found (parser, CPP_PRAGMA_EOL, NULL);
return false;
}
c_parser_omp_requires (parser);
return false;
case PRAGMA_OMP_ORDERED:
return c_parser_omp_ordered (parser, context, if_p);
case PRAGMA_IVDEP:
{
const bool ivdep = c_parse_pragma_ivdep (parser);
unsigned short unroll;
if (c_parser_peek_token (parser)->pragma_kind == PRAGMA_UNROLL)
unroll = c_parser_pragma_unroll (parser);
else
unroll = 0;
if (!c_parser_next_token_is_keyword (parser, RID_FOR)
&& !c_parser_next_token_is_keyword (parser, RID_WHILE)
&& !c_parser_next_token_is_keyword (parser, RID_DO))
{
c_parser_error (parser, "for, while or do statement expected");
return false;
}
if (c_parser_next_token_is_keyword (parser, RID_FOR))
c_parser_for_statement (parser, ivdep, unroll, if_p);
else if (c_parser_next_token_is_keyword (parser, RID_WHILE))
c_parser_while_statement (parser, ivdep, unroll, if_p);
else
c_parser_do_statement (parser, ivdep, unroll);
}
return false;
case PRAGMA_UNROLL:
{
unsigned short unroll = c_parser_pragma_unroll (parser);
bool ivdep;
if (c_parser_peek_token (parser)->pragma_kind == PRAGMA_IVDEP)
ivdep = c_parse_pragma_ivdep (parser);
else
ivdep = false;
if (!c_parser_next_token_is_keyword (parser, RID_FOR)
&& !c_parser_next_token_is_keyword (parser, RID_WHILE)
&& !c_parser_next_token_is_keyword (parser, RID_DO))
{
c_parser_error (parser, "for, while or do statement expected");
return false;
}
if (c_parser_next_token_is_keyword (parser, RID_FOR))
c_parser_for_statement (parser, ivdep, unroll, if_p);
else if (c_parser_next_token_is_keyword (parser, RID_WHILE))
c_parser_while_statement (parser, ivdep, unroll, if_p);
else
c_parser_do_statement (parser, ivdep, unroll);
}
return false;
case PRAGMA_GCC_PCH_PREPROCESS:
c_parser_error (parser, "%<#pragma GCC pch_preprocess%> must be first");
c_parser_skip_until_found (parser, CPP_PRAGMA_EOL, NULL);
return false;
case PRAGMA_OACC_WAIT:
if (context != pragma_compound)
{
construct = "acc wait";
goto in_compound;
}
/* FALL THROUGH. */
default:
if (id < PRAGMA_FIRST_EXTERNAL)
{
if (context != pragma_stmt && context != pragma_compound)
{
bad_stmt:
c_parser_error (parser, "expected declaration specifiers");
c_parser_skip_until_found (parser, CPP_PRAGMA_EOL, NULL);
return false;
}
c_parser_omp_construct (parser, if_p);
return true;
}
break;
}
c_parser_consume_pragma (parser);
c_invoke_pragma_handler (id);
/* Skip to EOL, but suppress any error message. Those will have been
generated by the handler routine through calling error, as opposed
to calling c_parser_error. */
parser->error = true;
c_parser_skip_to_pragma_eol (parser);
return false;
}
/* The interface the pragma parsers have to the lexer. */
enum cpp_ttype
pragma_lex (tree *value, location_t *loc)
{
c_token *tok = c_parser_peek_token (the_parser);
enum cpp_ttype ret = tok->type;
*value = tok->value;
if (loc)
*loc = tok->location;
if (ret == CPP_PRAGMA_EOL || ret == CPP_EOF)
ret = CPP_EOF;
else if (ret == CPP_STRING)
*value = c_parser_string_literal (the_parser, false, false).value;
else
{
if (ret == CPP_KEYWORD)
ret = CPP_NAME;
c_parser_consume_token (the_parser);
}
return ret;
}
static void
c_parser_pragma_pch_preprocess (c_parser *parser)
{
tree name = NULL;
parser->lex_joined_string = true;
c_parser_consume_pragma (parser);
if (c_parser_next_token_is (parser, CPP_STRING))
{
name = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
}
else
c_parser_error (parser, "expected string literal");
c_parser_skip_to_pragma_eol (parser);
parser->lex_joined_string = false;
if (name)
c_common_pch_pragma (parse_in, TREE_STRING_POINTER (name));
}
/* OpenACC and OpenMP parsing routines. */
/* Returns name of the next clause.
If the clause is not recognized PRAGMA_OMP_CLAUSE_NONE is returned and
the token is not consumed. Otherwise appropriate pragma_omp_clause is
returned and the token is consumed. */
static pragma_omp_clause
c_parser_omp_clause_name (c_parser *parser)
{
pragma_omp_clause result = PRAGMA_OMP_CLAUSE_NONE;
if (c_parser_next_token_is_keyword (parser, RID_AUTO))
result = PRAGMA_OACC_CLAUSE_AUTO;
else if (c_parser_next_token_is_keyword (parser, RID_IF))
result = PRAGMA_OMP_CLAUSE_IF;
else if (c_parser_next_token_is_keyword (parser, RID_DEFAULT))
result = PRAGMA_OMP_CLAUSE_DEFAULT;
else if (c_parser_next_token_is_keyword (parser, RID_FOR))
result = PRAGMA_OMP_CLAUSE_FOR;
else if (c_parser_next_token_is (parser, CPP_NAME))
{
const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value);
switch (p[0])
{
case 'a':
if (!strcmp ("aligned", p))
result = PRAGMA_OMP_CLAUSE_ALIGNED;
else if (!strcmp ("async", p))
result = PRAGMA_OACC_CLAUSE_ASYNC;
else if (!strcmp ("attach", p))
result = PRAGMA_OACC_CLAUSE_ATTACH;
break;
case 'b':
if (!strcmp ("bind", p))
result = PRAGMA_OMP_CLAUSE_BIND;
break;
case 'c':
if (!strcmp ("collapse", p))
result = PRAGMA_OMP_CLAUSE_COLLAPSE;
else if (!strcmp ("copy", p))
result = PRAGMA_OACC_CLAUSE_COPY;
else if (!strcmp ("copyin", p))
result = PRAGMA_OMP_CLAUSE_COPYIN;
else if (!strcmp ("copyout", p))
result = PRAGMA_OACC_CLAUSE_COPYOUT;
else if (!strcmp ("copyprivate", p))
result = PRAGMA_OMP_CLAUSE_COPYPRIVATE;
else if (!strcmp ("create", p))
result = PRAGMA_OACC_CLAUSE_CREATE;
break;
case 'd':
if (!strcmp ("defaultmap", p))
result = PRAGMA_OMP_CLAUSE_DEFAULTMAP;
else if (!strcmp ("delete", p))
result = PRAGMA_OACC_CLAUSE_DELETE;
else if (!strcmp ("depend", p))
result = PRAGMA_OMP_CLAUSE_DEPEND;
else if (!strcmp ("detach", p))
result = PRAGMA_OACC_CLAUSE_DETACH;
else if (!strcmp ("device", p))
result = PRAGMA_OMP_CLAUSE_DEVICE;
else if (!strcmp ("deviceptr", p))
result = PRAGMA_OACC_CLAUSE_DEVICEPTR;
else if (!strcmp ("device_resident", p))
result = PRAGMA_OACC_CLAUSE_DEVICE_RESIDENT;
else if (!strcmp ("device_type", p))
result = PRAGMA_OMP_CLAUSE_DEVICE_TYPE;
else if (!strcmp ("dist_schedule", p))
result = PRAGMA_OMP_CLAUSE_DIST_SCHEDULE;
break;
case 'f':
if (!strcmp ("final", p))
result = PRAGMA_OMP_CLAUSE_FINAL;
else if (!strcmp ("finalize", p))
result = PRAGMA_OACC_CLAUSE_FINALIZE;
else if (!strcmp ("firstprivate", p))
result = PRAGMA_OMP_CLAUSE_FIRSTPRIVATE;
else if (!strcmp ("from", p))
result = PRAGMA_OMP_CLAUSE_FROM;
break;
case 'g':
if (!strcmp ("gang", p))
result = PRAGMA_OACC_CLAUSE_GANG;
else if (!strcmp ("grainsize", p))
result = PRAGMA_OMP_CLAUSE_GRAINSIZE;
break;
case 'h':
if (!strcmp ("hint", p))
result = PRAGMA_OMP_CLAUSE_HINT;
else if (!strcmp ("host", p))
result = PRAGMA_OACC_CLAUSE_HOST;
break;
case 'i':
if (!strcmp ("if_present", p))
result = PRAGMA_OACC_CLAUSE_IF_PRESENT;
else if (!strcmp ("in_reduction", p))
result = PRAGMA_OMP_CLAUSE_IN_REDUCTION;
else if (!strcmp ("inbranch", p))
result = PRAGMA_OMP_CLAUSE_INBRANCH;
else if (!strcmp ("independent", p))
result = PRAGMA_OACC_CLAUSE_INDEPENDENT;
else if (!strcmp ("is_device_ptr", p))
result = PRAGMA_OMP_CLAUSE_IS_DEVICE_PTR;
break;
case 'l':
if (!strcmp ("lastprivate", p))
result = PRAGMA_OMP_CLAUSE_LASTPRIVATE;
else if (!strcmp ("linear", p))
result = PRAGMA_OMP_CLAUSE_LINEAR;
else if (!strcmp ("link", p))
result = PRAGMA_OMP_CLAUSE_LINK;
break;
case 'm':
if (!strcmp ("map", p))
result = PRAGMA_OMP_CLAUSE_MAP;
else if (!strcmp ("mergeable", p))
result = PRAGMA_OMP_CLAUSE_MERGEABLE;
break;
case 'n':
if (!strcmp ("no_create", p))
result = PRAGMA_OACC_CLAUSE_NO_CREATE;
else if (!strcmp ("nogroup", p))
result = PRAGMA_OMP_CLAUSE_NOGROUP;
else if (!strcmp ("nontemporal", p))
result = PRAGMA_OMP_CLAUSE_NONTEMPORAL;
else if (!strcmp ("notinbranch", p))
result = PRAGMA_OMP_CLAUSE_NOTINBRANCH;
else if (!strcmp ("nowait", p))
result = PRAGMA_OMP_CLAUSE_NOWAIT;
else if (!strcmp ("num_gangs", p))
result = PRAGMA_OACC_CLAUSE_NUM_GANGS;
else if (!strcmp ("num_tasks", p))
result = PRAGMA_OMP_CLAUSE_NUM_TASKS;
else if (!strcmp ("num_teams", p))
result = PRAGMA_OMP_CLAUSE_NUM_TEAMS;
else if (!strcmp ("num_threads", p))
result = PRAGMA_OMP_CLAUSE_NUM_THREADS;
else if (!strcmp ("num_workers", p))
result = PRAGMA_OACC_CLAUSE_NUM_WORKERS;
break;
case 'o':
if (!strcmp ("ordered", p))
result = PRAGMA_OMP_CLAUSE_ORDERED;
else if (!strcmp ("order", p))
result = PRAGMA_OMP_CLAUSE_ORDER;
break;
case 'p':
if (!strcmp ("parallel", p))
result = PRAGMA_OMP_CLAUSE_PARALLEL;
else if (!strcmp ("present", p))
result = PRAGMA_OACC_CLAUSE_PRESENT;
/* As of OpenACC 2.5, these are now aliases of the non-present_or
clauses. */
else if (!strcmp ("present_or_copy", p)
|| !strcmp ("pcopy", p))
result = PRAGMA_OACC_CLAUSE_COPY;
else if (!strcmp ("present_or_copyin", p)
|| !strcmp ("pcopyin", p))
result = PRAGMA_OACC_CLAUSE_COPYIN;
else if (!strcmp ("present_or_copyout", p)
|| !strcmp ("pcopyout", p))
result = PRAGMA_OACC_CLAUSE_COPYOUT;
else if (!strcmp ("present_or_create", p)
|| !strcmp ("pcreate", p))
result = PRAGMA_OACC_CLAUSE_CREATE;
else if (!strcmp ("priority", p))
result = PRAGMA_OMP_CLAUSE_PRIORITY;
else if (!strcmp ("private", p))
result = PRAGMA_OMP_CLAUSE_PRIVATE;
else if (!strcmp ("proc_bind", p))
result = PRAGMA_OMP_CLAUSE_PROC_BIND;
break;
case 'r':
if (!strcmp ("reduction", p))
result = PRAGMA_OMP_CLAUSE_REDUCTION;
break;
case 's':
if (!strcmp ("safelen", p))
result = PRAGMA_OMP_CLAUSE_SAFELEN;
else if (!strcmp ("schedule", p))
result = PRAGMA_OMP_CLAUSE_SCHEDULE;
else if (!strcmp ("sections", p))
result = PRAGMA_OMP_CLAUSE_SECTIONS;
else if (!strcmp ("self", p)) /* "self" is a synonym for "host". */
result = PRAGMA_OACC_CLAUSE_HOST;
else if (!strcmp ("seq", p))
result = PRAGMA_OACC_CLAUSE_SEQ;
else if (!strcmp ("shared", p))
result = PRAGMA_OMP_CLAUSE_SHARED;
else if (!strcmp ("simd", p))
result = PRAGMA_OMP_CLAUSE_SIMD;
else if (!strcmp ("simdlen", p))
result = PRAGMA_OMP_CLAUSE_SIMDLEN;
break;
case 't':
if (!strcmp ("task_reduction", p))
result = PRAGMA_OMP_CLAUSE_TASK_REDUCTION;
else if (!strcmp ("taskgroup", p))
result = PRAGMA_OMP_CLAUSE_TASKGROUP;
else if (!strcmp ("thread_limit", p))
result = PRAGMA_OMP_CLAUSE_THREAD_LIMIT;
else if (!strcmp ("threads", p))
result = PRAGMA_OMP_CLAUSE_THREADS;
else if (!strcmp ("tile", p))
result = PRAGMA_OACC_CLAUSE_TILE;
else if (!strcmp ("to", p))
result = PRAGMA_OMP_CLAUSE_TO;
break;
case 'u':
if (!strcmp ("uniform", p))
result = PRAGMA_OMP_CLAUSE_UNIFORM;
else if (!strcmp ("untied", p))
result = PRAGMA_OMP_CLAUSE_UNTIED;
else if (!strcmp ("use_device", p))
result = PRAGMA_OACC_CLAUSE_USE_DEVICE;
else if (!strcmp ("use_device_addr", p))
result = PRAGMA_OMP_CLAUSE_USE_DEVICE_ADDR;
else if (!strcmp ("use_device_ptr", p))
result = PRAGMA_OMP_CLAUSE_USE_DEVICE_PTR;
break;
case 'v':
if (!strcmp ("vector", p))
result = PRAGMA_OACC_CLAUSE_VECTOR;
else if (!strcmp ("vector_length", p))
result = PRAGMA_OACC_CLAUSE_VECTOR_LENGTH;
break;
case 'w':
if (!strcmp ("wait", p))
result = PRAGMA_OACC_CLAUSE_WAIT;
else if (!strcmp ("worker", p))
result = PRAGMA_OACC_CLAUSE_WORKER;
break;
}
}
if (result != PRAGMA_OMP_CLAUSE_NONE)
c_parser_consume_token (parser);
return result;
}
/* Validate that a clause of the given type does not already exist. */
static void
check_no_duplicate_clause (tree clauses, enum omp_clause_code code,
const char *name)
{
if (tree c = omp_find_clause (clauses, code))
error_at (OMP_CLAUSE_LOCATION (c), "too many %qs clauses", name);
}
/* OpenACC 2.0
Parse wait clause or wait directive parameters. */
static tree
c_parser_oacc_wait_list (c_parser *parser, location_t clause_loc, tree list)
{
vec<tree, va_gc> *args;
tree t, args_tree;
matching_parens parens;
if (!parens.require_open (parser))
return list;
args = c_parser_expr_list (parser, false, true, NULL, NULL, NULL, NULL);
args_tree = build_tree_list_vec (args);
for (t = args_tree; t; t = TREE_CHAIN (t))
{
tree targ = TREE_VALUE (t);
if (targ != error_mark_node)
{
if (!INTEGRAL_TYPE_P (TREE_TYPE (targ)))
{
c_parser_error (parser, "expression must be integral");
targ = error_mark_node;
}
else
{
tree c = build_omp_clause (clause_loc, OMP_CLAUSE_WAIT);
OMP_CLAUSE_DECL (c) = targ;
OMP_CLAUSE_CHAIN (c) = list;
list = c;
}
}
}
release_tree_vector (args);
parens.require_close (parser);
return list;
}
/* OpenACC 2.0, OpenMP 2.5:
variable-list:
identifier
variable-list , identifier
If KIND is nonzero, create the appropriate node and install the
decl in OMP_CLAUSE_DECL and add the node to the head of the list.
If KIND is nonzero, CLAUSE_LOC is the location of the clause.
If KIND is zero, create a TREE_LIST with the decl in TREE_PURPOSE;
return the list created.
The optional ALLOW_DEREF argument is true if list items can use the deref
(->) operator. */
static tree
c_parser_omp_variable_list (c_parser *parser,
location_t clause_loc,
enum omp_clause_code kind, tree list,
bool allow_deref = false)
{
auto_vec<c_token> tokens;
unsigned int tokens_avail = 0;
bool first = true;
while (1)
{
bool array_section_p = false;
if (kind == OMP_CLAUSE_DEPEND)
{
if (c_parser_next_token_is_not (parser, CPP_NAME)
|| c_parser_peek_token (parser)->id_kind != C_ID_ID)
{
struct c_expr expr = c_parser_expr_no_commas (parser, NULL);
if (expr.value != error_mark_node)
{
tree u = build_omp_clause (clause_loc, kind);
OMP_CLAUSE_DECL (u) = expr.value;
OMP_CLAUSE_CHAIN (u) = list;
list = u;
}
if (c_parser_next_token_is_not (parser, CPP_COMMA))
break;
c_parser_consume_token (parser);
first = false;
continue;
}
tokens.truncate (0);
unsigned int nesting_depth = 0;
while (1)
{
c_token *token = c_parser_peek_token (parser);
switch (token->type)
{
case CPP_EOF:
case CPP_PRAGMA_EOL:
break;
case CPP_OPEN_BRACE:
case CPP_OPEN_PAREN:
case CPP_OPEN_SQUARE:
++nesting_depth;
goto add;
case CPP_CLOSE_BRACE:
case CPP_CLOSE_PAREN:
case CPP_CLOSE_SQUARE:
if (nesting_depth-- == 0)
break;
goto add;
case CPP_COMMA:
if (nesting_depth == 0)
break;
goto add;
default:
add:
tokens.safe_push (*token);
c_parser_consume_token (parser);
continue;
}
break;
}
/* Make sure nothing tries to read past the end of the tokens. */
c_token eof_token;
memset (&eof_token, 0, sizeof (eof_token));
eof_token.type = CPP_EOF;
tokens.safe_push (eof_token);
tokens.safe_push (eof_token);
tokens_avail = parser->tokens_avail;
gcc_assert (parser->tokens == &parser->tokens_buf[0]);
parser->tokens = tokens.address ();
parser->tokens_avail = tokens.length ();
}
tree t = NULL_TREE;
if (c_parser_next_token_is (parser, CPP_NAME)
&& c_parser_peek_token (parser)->id_kind == C_ID_ID)
{
t = lookup_name (c_parser_peek_token (parser)->value);
if (t == NULL_TREE)
{
undeclared_variable (c_parser_peek_token (parser)->location,
c_parser_peek_token (parser)->value);
t = error_mark_node;
}
c_parser_consume_token (parser);
}
else if (c_parser_next_token_is (parser, CPP_KEYWORD)
&& (c_parser_peek_token (parser)->keyword == RID_FUNCTION_NAME
|| (c_parser_peek_token (parser)->keyword
== RID_PRETTY_FUNCTION_NAME)
|| (c_parser_peek_token (parser)->keyword
== RID_C99_FUNCTION_NAME)))
t = c_parser_predefined_identifier (parser).value;
else
{
if (first)
c_parser_error (parser, "expected identifier");
break;
}
if (t == error_mark_node)
;
else if (kind != 0)
{
switch (kind)
{
case OMP_CLAUSE__CACHE_:
/* The OpenACC cache directive explicitly only allows "array
elements or subarrays". */
if (c_parser_peek_token (parser)->type != CPP_OPEN_SQUARE)
{
c_parser_error (parser, "expected %<[%>");
t = error_mark_node;
break;
}
/* FALLTHROUGH */
case OMP_CLAUSE_MAP:
case OMP_CLAUSE_FROM:
case OMP_CLAUSE_TO:
while (c_parser_next_token_is (parser, CPP_DOT)
|| (allow_deref
&& c_parser_next_token_is (parser, CPP_DEREF)))
{
location_t op_loc = c_parser_peek_token (parser)->location;
if (c_parser_next_token_is (parser, CPP_DEREF))
t = build_simple_mem_ref (t);
c_parser_consume_token (parser);
if (!c_parser_next_token_is (parser, CPP_NAME))
{
c_parser_error (parser, "expected identifier");
t = error_mark_node;
break;
}
c_token *comp_tok = c_parser_peek_token (parser);
tree ident = comp_tok->value;
location_t comp_loc = comp_tok->location;
c_parser_consume_token (parser);
t = build_component_ref (op_loc, t, ident, comp_loc);
}
/* FALLTHROUGH */
case OMP_CLAUSE_DEPEND:
case OMP_CLAUSE_REDUCTION:
case OMP_CLAUSE_IN_REDUCTION:
case OMP_CLAUSE_TASK_REDUCTION:
while (c_parser_next_token_is (parser, CPP_OPEN_SQUARE))
{
tree low_bound = NULL_TREE, length = NULL_TREE;
c_parser_consume_token (parser);
if (!c_parser_next_token_is (parser, CPP_COLON))
{
location_t expr_loc
= c_parser_peek_token (parser)->location;
c_expr expr = c_parser_expression (parser);
expr = convert_lvalue_to_rvalue (expr_loc, expr,
false, true);
low_bound = expr.value;
}
if (c_parser_next_token_is (parser, CPP_CLOSE_SQUARE))
length = integer_one_node;
else
{
/* Look for `:'. */
if (!c_parser_require (parser, CPP_COLON,
"expected %<:%>"))
{
t = error_mark_node;
break;
}
array_section_p = true;
if (!c_parser_next_token_is (parser, CPP_CLOSE_SQUARE))
{
location_t expr_loc
= c_parser_peek_token (parser)->location;
c_expr expr = c_parser_expression (parser);
expr = convert_lvalue_to_rvalue (expr_loc, expr,
false, true);
length = expr.value;
}
}
/* Look for the closing `]'. */
if (!c_parser_require (parser, CPP_CLOSE_SQUARE,
"expected %<]%>"))
{
t = error_mark_node;
break;
}
t = tree_cons (low_bound, length, t);
}
if (kind == OMP_CLAUSE_DEPEND
&& t != error_mark_node
&& parser->tokens_avail != 2)
{
if (array_section_p)
{
error_at (c_parser_peek_token (parser)->location,
"expected %<)%> or %<,%>");
t = error_mark_node;
}
else
{
parser->tokens = tokens.address ();
parser->tokens_avail = tokens.length ();
t = c_parser_expr_no_commas (parser, NULL).value;
if (t != error_mark_node && parser->tokens_avail != 2)
{
error_at (c_parser_peek_token (parser)->location,
"expected %<)%> or %<,%>");
t = error_mark_node;
}
}
}
break;
default:
break;
}
if (t != error_mark_node)
{
tree u = build_omp_clause (clause_loc, kind);
OMP_CLAUSE_DECL (u) = t;
OMP_CLAUSE_CHAIN (u) = list;
list = u;
}
}
else
list = tree_cons (t, NULL_TREE, list);
if (kind == OMP_CLAUSE_DEPEND)
{
parser->tokens = &parser->tokens_buf[0];
parser->tokens_avail = tokens_avail;
}
if (c_parser_next_token_is_not (parser, CPP_COMMA))
break;
c_parser_consume_token (parser);
first = false;
}
return list;
}
/* Similarly, but expect leading and trailing parenthesis. This is a very
common case for OpenACC and OpenMP clauses. The optional ALLOW_DEREF
argument is true if list items can use the deref (->) operator. */
static tree
c_parser_omp_var_list_parens (c_parser *parser, enum omp_clause_code kind,
tree list, bool allow_deref = false)
{
/* The clauses location. */
location_t loc = c_parser_peek_token (parser)->location;
matching_parens parens;
if (parens.require_open (parser))
{
list = c_parser_omp_variable_list (parser, loc, kind, list, allow_deref);
parens.skip_until_found_close (parser);
}
return list;
}
/* OpenACC 2.0:
copy ( variable-list )
copyin ( variable-list )
copyout ( variable-list )
create ( variable-list )
delete ( variable-list )
present ( variable-list )
OpenACC 2.6:
no_create ( variable-list )
attach ( variable-list )
detach ( variable-list ) */
static tree
c_parser_oacc_data_clause (c_parser *parser, pragma_omp_clause c_kind,
tree list)
{
enum gomp_map_kind kind;
switch (c_kind)
{
case PRAGMA_OACC_CLAUSE_ATTACH:
kind = GOMP_MAP_ATTACH;
break;
case PRAGMA_OACC_CLAUSE_COPY:
kind = GOMP_MAP_TOFROM;
break;
case PRAGMA_OACC_CLAUSE_COPYIN:
kind = GOMP_MAP_TO;
break;
case PRAGMA_OACC_CLAUSE_COPYOUT:
kind = GOMP_MAP_FROM;
break;
case PRAGMA_OACC_CLAUSE_CREATE:
kind = GOMP_MAP_ALLOC;
break;
case PRAGMA_OACC_CLAUSE_DELETE:
kind = GOMP_MAP_RELEASE;
break;
case PRAGMA_OACC_CLAUSE_DETACH:
kind = GOMP_MAP_DETACH;
break;
case PRAGMA_OACC_CLAUSE_DEVICE:
kind = GOMP_MAP_FORCE_TO;
break;
case PRAGMA_OACC_CLAUSE_DEVICE_RESIDENT:
kind = GOMP_MAP_DEVICE_RESIDENT;
break;
case PRAGMA_OACC_CLAUSE_HOST:
kind = GOMP_MAP_FORCE_FROM;
break;
case PRAGMA_OACC_CLAUSE_LINK:
kind = GOMP_MAP_LINK;
break;
case PRAGMA_OACC_CLAUSE_NO_CREATE:
kind = GOMP_MAP_IF_PRESENT;
break;
case PRAGMA_OACC_CLAUSE_PRESENT:
kind = GOMP_MAP_FORCE_PRESENT;
break;
default:
gcc_unreachable ();
}
tree nl, c;
nl = c_parser_omp_var_list_parens (parser, OMP_CLAUSE_MAP, list, true);
for (c = nl; c != list; c = OMP_CLAUSE_CHAIN (c))
OMP_CLAUSE_SET_MAP_KIND (c, kind);
return nl;
}
/* OpenACC 2.0:
deviceptr ( variable-list ) */
static tree
c_parser_oacc_data_clause_deviceptr (c_parser *parser, tree list)
{
location_t loc = c_parser_peek_token (parser)->location;
tree vars, t;
/* Can't use OMP_CLAUSE_MAP here (that is, can't use the generic
c_parser_oacc_data_clause), as for PRAGMA_OACC_CLAUSE_DEVICEPTR,
variable-list must only allow for pointer variables. */
vars = c_parser_omp_var_list_parens (parser, OMP_CLAUSE_ERROR, NULL);
for (t = vars; t && t; t = TREE_CHAIN (t))
{
tree v = TREE_PURPOSE (t);
/* FIXME diagnostics: Ideally we should keep individual
locations for all the variables in the var list to make the
following errors more precise. Perhaps
c_parser_omp_var_list_parens() should construct a list of
locations to go along with the var list. */
if (!VAR_P (v) && TREE_CODE (v) != PARM_DECL)
error_at (loc, "%qD is not a variable", v);
else if (TREE_TYPE (v) == error_mark_node)
;
else if (!POINTER_TYPE_P (TREE_TYPE (v)))
error_at (loc, "%qD is not a pointer variable", v);
tree u = build_omp_clause (loc, OMP_CLAUSE_MAP);
OMP_CLAUSE_SET_MAP_KIND (u, GOMP_MAP_FORCE_DEVICEPTR);
OMP_CLAUSE_DECL (u) = v;
OMP_CLAUSE_CHAIN (u) = list;
list = u;
}
return list;
}
/* OpenACC 2.0, OpenMP 3.0:
collapse ( constant-expression ) */
static tree
c_parser_omp_clause_collapse (c_parser *parser, tree list)
{
tree c, num = error_mark_node;
HOST_WIDE_INT n;
location_t loc;
check_no_duplicate_clause (list, OMP_CLAUSE_COLLAPSE, "collapse");
check_no_duplicate_clause (list, OMP_CLAUSE_TILE, "tile");
loc = c_parser_peek_token (parser)->location;
matching_parens parens;
if (parens.require_open (parser))
{
num = c_parser_expr_no_commas (parser, NULL).value;
parens.skip_until_found_close (parser);
}
if (num == error_mark_node)
return list;
mark_exp_read (num);
num = c_fully_fold (num, false, NULL);
if (!INTEGRAL_TYPE_P (TREE_TYPE (num))
|| !tree_fits_shwi_p (num)
|| (n = tree_to_shwi (num)) <= 0
|| (int) n != n)
{
error_at (loc,
"collapse argument needs positive constant integer expression");
return list;
}
c = build_omp_clause (loc, OMP_CLAUSE_COLLAPSE);
OMP_CLAUSE_COLLAPSE_EXPR (c) = num;
OMP_CLAUSE_CHAIN (c) = list;
return c;
}
/* OpenMP 2.5:
copyin ( variable-list ) */
static tree
c_parser_omp_clause_copyin (c_parser *parser, tree list)
{
return c_parser_omp_var_list_parens (parser, OMP_CLAUSE_COPYIN, list);
}
/* OpenMP 2.5:
copyprivate ( variable-list ) */
static tree
c_parser_omp_clause_copyprivate (c_parser *parser, tree list)
{
return c_parser_omp_var_list_parens (parser, OMP_CLAUSE_COPYPRIVATE, list);
}
/* OpenMP 2.5:
default ( none | shared )
OpenACC:
default ( none | present ) */
static tree
c_parser_omp_clause_default (c_parser *parser, tree list, bool is_oacc)
{
enum omp_clause_default_kind kind = OMP_CLAUSE_DEFAULT_UNSPECIFIED;
location_t loc = c_parser_peek_token (parser)->location;
tree c;
matching_parens parens;
if (!parens.require_open (parser))
return list;
if (c_parser_next_token_is (parser, CPP_NAME))
{
const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value);
switch (p[0])
{
case 'n':
if (strcmp ("none", p) != 0)
goto invalid_kind;
kind = OMP_CLAUSE_DEFAULT_NONE;
break;
case 'p':
if (strcmp ("present", p) != 0 || !is_oacc)
goto invalid_kind;
kind = OMP_CLAUSE_DEFAULT_PRESENT;
break;
case 's':
if (strcmp ("shared", p) != 0 || is_oacc)
goto invalid_kind;
kind = OMP_CLAUSE_DEFAULT_SHARED;
break;
default:
goto invalid_kind;
}
c_parser_consume_token (parser);
}
else
{
invalid_kind:
if (is_oacc)
c_parser_error (parser, "expected %<none%> or %<present%>");
else
c_parser_error (parser, "expected %<none%> or %<shared%>");
}
parens.skip_until_found_close (parser);
if (kind == OMP_CLAUSE_DEFAULT_UNSPECIFIED)
return list;
check_no_duplicate_clause (list, OMP_CLAUSE_DEFAULT, "default");
c = build_omp_clause (loc, OMP_CLAUSE_DEFAULT);
OMP_CLAUSE_CHAIN (c) = list;
OMP_CLAUSE_DEFAULT_KIND (c) = kind;
return c;
}
/* OpenMP 2.5:
firstprivate ( variable-list ) */
static tree
c_parser_omp_clause_firstprivate (c_parser *parser, tree list)
{
return c_parser_omp_var_list_parens (parser, OMP_CLAUSE_FIRSTPRIVATE, list);
}
/* OpenMP 3.1:
final ( expression ) */
static tree
c_parser_omp_clause_final (c_parser *parser, tree list)
{
location_t loc = c_parser_peek_token (parser)->location;
if (c_parser_next_token_is (parser, CPP_OPEN_PAREN))
{
matching_parens parens;
tree t, c;
if (!parens.require_open (parser))
t = error_mark_node;
else
{
location_t eloc = c_parser_peek_token (parser)->location;
c_expr expr = c_parser_expr_no_commas (parser, NULL);
t = convert_lvalue_to_rvalue (eloc, expr, true, true).value;
t = c_objc_common_truthvalue_conversion (eloc, t);
t = c_fully_fold (t, false, NULL);
parens.skip_until_found_close (parser);
}
check_no_duplicate_clause (list, OMP_CLAUSE_FINAL, "final");
c = build_omp_clause (loc, OMP_CLAUSE_FINAL);
OMP_CLAUSE_FINAL_EXPR (c) = t;
OMP_CLAUSE_CHAIN (c) = list;
list = c;
}
else
c_parser_error (parser, "expected %<(%>");
return list;
}
/* OpenACC, OpenMP 2.5:
if ( expression )
OpenMP 4.5:
if ( directive-name-modifier : expression )
directive-name-modifier:
parallel | task | taskloop | target data | target | target update
| target enter data | target exit data
OpenMP 5.0:
directive-name-modifier:
... | simd | cancel */
static tree
c_parser_omp_clause_if (c_parser *parser, tree list, bool is_omp)
{
location_t location = c_parser_peek_token (parser)->location;
enum tree_code if_modifier = ERROR_MARK;
matching_parens parens;
if (!parens.require_open (parser))
return list;
if (is_omp && c_parser_next_token_is (parser, CPP_NAME))
{
const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value);
int n = 2;
if (strcmp (p, "cancel") == 0)
if_modifier = VOID_CST;
else if (strcmp (p, "parallel") == 0)
if_modifier = OMP_PARALLEL;
else if (strcmp (p, "simd") == 0)
if_modifier = OMP_SIMD;
else if (strcmp (p, "task") == 0)
if_modifier = OMP_TASK;
else if (strcmp (p, "taskloop") == 0)
if_modifier = OMP_TASKLOOP;
else if (strcmp (p, "target") == 0)
{
if_modifier = OMP_TARGET;
if (c_parser_peek_2nd_token (parser)->type == CPP_NAME)
{
p = IDENTIFIER_POINTER (c_parser_peek_2nd_token (parser)->value);
if (strcmp ("data", p) == 0)
if_modifier = OMP_TARGET_DATA;
else if (strcmp ("update", p) == 0)
if_modifier = OMP_TARGET_UPDATE;
else if (strcmp ("enter", p) == 0)
if_modifier = OMP_TARGET_ENTER_DATA;
else if (strcmp ("exit", p) == 0)
if_modifier = OMP_TARGET_EXIT_DATA;
if (if_modifier != OMP_TARGET)
{
n = 3;
c_parser_consume_token (parser);
}
else
{
location_t loc = c_parser_peek_2nd_token (parser)->location;
error_at (loc, "expected %<data%>, %<update%>, %<enter%> "
"or %<exit%>");
if_modifier = ERROR_MARK;
}
if (if_modifier == OMP_TARGET_ENTER_DATA
|| if_modifier == OMP_TARGET_EXIT_DATA)
{
if (c_parser_peek_2nd_token (parser)->type == CPP_NAME)
{
p = IDENTIFIER_POINTER
(c_parser_peek_2nd_token (parser)->value);
if (strcmp ("data", p) == 0)
n = 4;
}
if (n == 4)
c_parser_consume_token (parser);
else
{
location_t loc
= c_parser_peek_2nd_token (parser)->location;
error_at (loc, "expected %<data%>");
if_modifier = ERROR_MARK;
}
}
}
}
if (if_modifier != ERROR_MARK)
{
if (c_parser_peek_2nd_token (parser)->type == CPP_COLON)
{
c_parser_consume_token (parser);
c_parser_consume_token (parser);
}
else
{
if (n > 2)
{
location_t loc = c_parser_peek_2nd_token (parser)->location;
error_at (loc, "expected %<:%>");
}
if_modifier = ERROR_MARK;
}
}
}
location_t loc = c_parser_peek_token (parser)->location;
c_expr expr = c_parser_expr_no_commas (parser, NULL);
expr = convert_lvalue_to_rvalue (loc, expr, true, true);
tree t = c_objc_common_truthvalue_conversion (loc, expr.value), c;
t = c_fully_fold (t, false, NULL);
parens.skip_until_found_close (parser);
for (c = list; c ; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_IF)
{
if (if_modifier != ERROR_MARK
&& OMP_CLAUSE_IF_MODIFIER (c) == if_modifier)
{
const char *p = NULL;
switch (if_modifier)
{
case VOID_CST: p = "cancel"; break;
case OMP_PARALLEL: p = "parallel"; break;
case OMP_SIMD: p = "simd"; break;
case OMP_TASK: p = "task"; break;
case OMP_TASKLOOP: p = "taskloop"; break;
case OMP_TARGET_DATA: p = "target data"; break;
case OMP_TARGET: p = "target"; break;
case OMP_TARGET_UPDATE: p = "target update"; break;
case OMP_TARGET_ENTER_DATA: p = "target enter data"; break;
case OMP_TARGET_EXIT_DATA: p = "target exit data"; break;
default: gcc_unreachable ();
}
error_at (location, "too many %<if%> clauses with %qs modifier",
p);
return list;
}
else if (OMP_CLAUSE_IF_MODIFIER (c) == if_modifier)
{
if (!is_omp)
error_at (location, "too many %<if%> clauses");
else
error_at (location, "too many %<if%> clauses without modifier");
return list;
}
else if (if_modifier == ERROR_MARK
|| OMP_CLAUSE_IF_MODIFIER (c) == ERROR_MARK)
{
error_at (location, "if any %<if%> clause has modifier, then all "
"%<if%> clauses have to use modifier");
return list;
}
}
c = build_omp_clause (location, OMP_CLAUSE_IF);
OMP_CLAUSE_IF_MODIFIER (c) = if_modifier;
OMP_CLAUSE_IF_EXPR (c) = t;
OMP_CLAUSE_CHAIN (c) = list;
return c;
}
/* OpenMP 2.5:
lastprivate ( variable-list )
OpenMP 5.0:
lastprivate ( [ lastprivate-modifier : ] variable-list ) */
static tree
c_parser_omp_clause_lastprivate (c_parser *parser, tree list)
{
/* The clauses location. */
location_t loc = c_parser_peek_token (parser)->location;
if (c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>"))
{
bool conditional = false;
if (c_parser_next_token_is (parser, CPP_NAME)
&& c_parser_peek_2nd_token (parser)->type == CPP_COLON)
{
const char *p
= IDENTIFIER_POINTER (c_parser_peek_token (parser)->value);
if (strcmp (p, "conditional") == 0)
{
conditional = true;
c_parser_consume_token (parser);
c_parser_consume_token (parser);
}
}
tree nlist = c_parser_omp_variable_list (parser, loc,
OMP_CLAUSE_LASTPRIVATE, list);
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, "expected %<)%>");
if (conditional)
for (tree c = nlist; c != list; c = OMP_CLAUSE_CHAIN (c))
OMP_CLAUSE_LASTPRIVATE_CONDITIONAL (c) = 1;
return nlist;
}
return list;
}
/* OpenMP 3.1:
mergeable */
static tree
c_parser_omp_clause_mergeable (c_parser *parser ATTRIBUTE_UNUSED, tree list)
{
tree c;
/* FIXME: Should we allow duplicates? */
check_no_duplicate_clause (list, OMP_CLAUSE_MERGEABLE, "mergeable");
c = build_omp_clause (c_parser_peek_token (parser)->location,
OMP_CLAUSE_MERGEABLE);
OMP_CLAUSE_CHAIN (c) = list;
return c;
}
/* OpenMP 2.5:
nowait */
static tree
c_parser_omp_clause_nowait (c_parser *parser ATTRIBUTE_UNUSED, tree list)
{
tree c;
location_t loc = c_parser_peek_token (parser)->location;
check_no_duplicate_clause (list, OMP_CLAUSE_NOWAIT, "nowait");
c = build_omp_clause (loc, OMP_CLAUSE_NOWAIT);
OMP_CLAUSE_CHAIN (c) = list;
return c;
}
/* OpenMP 2.5:
num_threads ( expression ) */
static tree
c_parser_omp_clause_num_threads (c_parser *parser, tree list)
{
location_t num_threads_loc = c_parser_peek_token (parser)->location;
matching_parens parens;
if (parens.require_open (parser))
{
location_t expr_loc = c_parser_peek_token (parser)->location;
c_expr expr = c_parser_expr_no_commas (parser, NULL);
expr = convert_lvalue_to_rvalue (expr_loc, expr, false, true);
tree c, t = expr.value;
t = c_fully_fold (t, false, NULL);
parens.skip_until_found_close (parser);
if (!INTEGRAL_TYPE_P (TREE_TYPE (t)))
{
c_parser_error (parser, "expected integer expression");
return list;
}
/* Attempt to statically determine when the number isn't positive. */
c = fold_build2_loc (expr_loc, LE_EXPR, boolean_type_node, t,
build_int_cst (TREE_TYPE (t), 0));
protected_set_expr_location (c, expr_loc);
if (c == boolean_true_node)
{
warning_at (expr_loc, 0,
"%<num_threads%> value must be positive");
t = integer_one_node;
}
check_no_duplicate_clause (list, OMP_CLAUSE_NUM_THREADS, "num_threads");
c = build_omp_clause (num_threads_loc, OMP_CLAUSE_NUM_THREADS);
OMP_CLAUSE_NUM_THREADS_EXPR (c) = t;
OMP_CLAUSE_CHAIN (c) = list;
list = c;
}
return list;
}
/* OpenMP 4.5:
num_tasks ( expression ) */
static tree
c_parser_omp_clause_num_tasks (c_parser *parser, tree list)
{
location_t num_tasks_loc = c_parser_peek_token (parser)->location;
matching_parens parens;
if (parens.require_open (parser))
{
location_t expr_loc = c_parser_peek_token (parser)->location;
c_expr expr = c_parser_expr_no_commas (parser, NULL);
expr = convert_lvalue_to_rvalue (expr_loc, expr, false, true);
tree c, t = expr.value;
t = c_fully_fold (t, false, NULL);
parens.skip_until_found_close (parser);
if (!INTEGRAL_TYPE_P (TREE_TYPE (t)))
{
c_parser_error (parser, "expected integer expression");
return list;
}
/* Attempt to statically determine when the number isn't positive. */
c = fold_build2_loc (expr_loc, LE_EXPR, boolean_type_node, t,
build_int_cst (TREE_TYPE (t), 0));
if (CAN_HAVE_LOCATION_P (c))
SET_EXPR_LOCATION (c, expr_loc);
if (c == boolean_true_node)
{
warning_at (expr_loc, 0, "%<num_tasks%> value must be positive");
t = integer_one_node;
}
check_no_duplicate_clause (list, OMP_CLAUSE_NUM_TASKS, "num_tasks");
c = build_omp_clause (num_tasks_loc, OMP_CLAUSE_NUM_TASKS);
OMP_CLAUSE_NUM_TASKS_EXPR (c) = t;
OMP_CLAUSE_CHAIN (c) = list;
list = c;
}
return list;
}
/* OpenMP 4.5:
grainsize ( expression ) */
static tree
c_parser_omp_clause_grainsize (c_parser *parser, tree list)
{
location_t grainsize_loc = c_parser_peek_token (parser)->location;
matching_parens parens;
if (parens.require_open (parser))
{
location_t expr_loc = c_parser_peek_token (parser)->location;
c_expr expr = c_parser_expr_no_commas (parser, NULL);
expr = convert_lvalue_to_rvalue (expr_loc, expr, false, true);
tree c, t = expr.value;
t = c_fully_fold (t, false, NULL);
parens.skip_until_found_close (parser);
if (!INTEGRAL_TYPE_P (TREE_TYPE (t)))
{
c_parser_error (parser, "expected integer expression");
return list;
}
/* Attempt to statically determine when the number isn't positive. */
c = fold_build2_loc (expr_loc, LE_EXPR, boolean_type_node, t,
build_int_cst (TREE_TYPE (t), 0));
if (CAN_HAVE_LOCATION_P (c))
SET_EXPR_LOCATION (c, expr_loc);
if (c == boolean_true_node)
{
warning_at (expr_loc, 0, "%<grainsize%> value must be positive");
t = integer_one_node;
}
check_no_duplicate_clause (list, OMP_CLAUSE_GRAINSIZE, "grainsize");
c = build_omp_clause (grainsize_loc, OMP_CLAUSE_GRAINSIZE);
OMP_CLAUSE_GRAINSIZE_EXPR (c) = t;
OMP_CLAUSE_CHAIN (c) = list;
list = c;
}
return list;
}
/* OpenMP 4.5:
priority ( expression ) */
static tree
c_parser_omp_clause_priority (c_parser *parser, tree list)
{
location_t priority_loc = c_parser_peek_token (parser)->location;
matching_parens parens;
if (parens.require_open (parser))
{
location_t expr_loc = c_parser_peek_token (parser)->location;
c_expr expr = c_parser_expr_no_commas (parser, NULL);
expr = convert_lvalue_to_rvalue (expr_loc, expr, false, true);
tree c, t = expr.value;
t = c_fully_fold (t, false, NULL);
parens.skip_until_found_close (parser);
if (!INTEGRAL_TYPE_P (TREE_TYPE (t)))
{
c_parser_error (parser, "expected integer expression");
return list;
}
/* Attempt to statically determine when the number isn't
non-negative. */
c = fold_build2_loc (expr_loc, LT_EXPR, boolean_type_node, t,
build_int_cst (TREE_TYPE (t), 0));
if (CAN_HAVE_LOCATION_P (c))
SET_EXPR_LOCATION (c, expr_loc);
if (c == boolean_true_node)
{
warning_at (expr_loc, 0, "%<priority%> value must be non-negative");
t = integer_one_node;
}
check_no_duplicate_clause (list, OMP_CLAUSE_PRIORITY, "priority");
c = build_omp_clause (priority_loc, OMP_CLAUSE_PRIORITY);
OMP_CLAUSE_PRIORITY_EXPR (c) = t;
OMP_CLAUSE_CHAIN (c) = list;
list = c;
}
return list;
}
/* OpenMP 4.5:
hint ( expression ) */
static tree
c_parser_omp_clause_hint (c_parser *parser, tree list)
{
location_t hint_loc = c_parser_peek_token (parser)->location;
matching_parens parens;
if (parens.require_open (parser))
{
location_t expr_loc = c_parser_peek_token (parser)->location;
c_expr expr = c_parser_expr_no_commas (parser, NULL);
expr = convert_lvalue_to_rvalue (expr_loc, expr, false, true);
tree c, t = expr.value;
t = c_fully_fold (t, false, NULL);
if (!INTEGRAL_TYPE_P (TREE_TYPE (t))
|| TREE_CODE (t) != INTEGER_CST
|| tree_int_cst_sgn (t) == -1)
{
c_parser_error (parser, "expected constant integer expression "
"with valid sync-hint value");
return list;
}
parens.skip_until_found_close (parser);
check_no_duplicate_clause (list, OMP_CLAUSE_HINT, "hint");
c = build_omp_clause (hint_loc, OMP_CLAUSE_HINT);
OMP_CLAUSE_HINT_EXPR (c) = t;
OMP_CLAUSE_CHAIN (c) = list;
list = c;
}
return list;
}
/* OpenMP 4.5:
defaultmap ( tofrom : scalar )
OpenMP 5.0:
defaultmap ( implicit-behavior [ : variable-category ] ) */
static tree
c_parser_omp_clause_defaultmap (c_parser *parser, tree list)
{
location_t loc = c_parser_peek_token (parser)->location;
tree c;
const char *p;
enum omp_clause_defaultmap_kind behavior = OMP_CLAUSE_DEFAULTMAP_DEFAULT;
enum omp_clause_defaultmap_kind category
= OMP_CLAUSE_DEFAULTMAP_CATEGORY_UNSPECIFIED;
matching_parens parens;
if (!parens.require_open (parser))
return list;
if (c_parser_next_token_is_keyword (parser, RID_DEFAULT))
p = "default";
else if (!c_parser_next_token_is (parser, CPP_NAME))
{
invalid_behavior:
c_parser_error (parser, "expected %<alloc%>, %<to%>, %<from%>, "
"%<tofrom%>, %<firstprivate%>, %<none%> "
"or %<default%>");
goto out_err;
}
else
p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value);
switch (p[0])
{
case 'a':
if (strcmp ("alloc", p) == 0)
behavior = OMP_CLAUSE_DEFAULTMAP_ALLOC;
else
goto invalid_behavior;
break;
case 'd':
if (strcmp ("default", p) == 0)
behavior = OMP_CLAUSE_DEFAULTMAP_DEFAULT;
else
goto invalid_behavior;
break;
case 'f':
if (strcmp ("firstprivate", p) == 0)
behavior = OMP_CLAUSE_DEFAULTMAP_FIRSTPRIVATE;
else if (strcmp ("from", p) == 0)
behavior = OMP_CLAUSE_DEFAULTMAP_FROM;
else
goto invalid_behavior;
break;
case 'n':
if (strcmp ("none", p) == 0)
behavior = OMP_CLAUSE_DEFAULTMAP_NONE;
else
goto invalid_behavior;
break;
case 't':
if (strcmp ("tofrom", p) == 0)
behavior = OMP_CLAUSE_DEFAULTMAP_TOFROM;
else if (strcmp ("to", p) == 0)
behavior = OMP_CLAUSE_DEFAULTMAP_TO;
else
goto invalid_behavior;
break;
default:
goto invalid_behavior;
}
c_parser_consume_token (parser);
if (!c_parser_next_token_is (parser, CPP_CLOSE_PAREN))
{
if (!c_parser_require (parser, CPP_COLON, "expected %<:%>"))
goto out_err;
if (!c_parser_next_token_is (parser, CPP_NAME))
{
invalid_category:
c_parser_error (parser, "expected %<scalar%>, %<aggregate%> or "
"%<pointer%>");
goto out_err;
}
p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value);
switch (p[0])
{
case 'a':
if (strcmp ("aggregate", p) == 0)
category = OMP_CLAUSE_DEFAULTMAP_CATEGORY_AGGREGATE;
else
goto invalid_category;
break;
case 'p':
if (strcmp ("pointer", p) == 0)
category = OMP_CLAUSE_DEFAULTMAP_CATEGORY_POINTER;
else
goto invalid_category;
break;
case 's':
if (strcmp ("scalar", p) == 0)
category = OMP_CLAUSE_DEFAULTMAP_CATEGORY_SCALAR;
else
goto invalid_category;
break;
default:
goto invalid_category;
}
c_parser_consume_token (parser);
}
parens.skip_until_found_close (parser);
for (c = list; c ; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEFAULTMAP
&& (category == OMP_CLAUSE_DEFAULTMAP_CATEGORY_UNSPECIFIED
|| OMP_CLAUSE_DEFAULTMAP_CATEGORY (c) == category
|| (OMP_CLAUSE_DEFAULTMAP_CATEGORY (c)
== OMP_CLAUSE_DEFAULTMAP_CATEGORY_UNSPECIFIED)))
{
enum omp_clause_defaultmap_kind cat = category;
location_t loc = OMP_CLAUSE_LOCATION (c);
if (cat == OMP_CLAUSE_DEFAULTMAP_CATEGORY_UNSPECIFIED)
cat = OMP_CLAUSE_DEFAULTMAP_CATEGORY (c);
p = NULL;
switch (cat)
{
case OMP_CLAUSE_DEFAULTMAP_CATEGORY_UNSPECIFIED:
p = NULL;
break;
case OMP_CLAUSE_DEFAULTMAP_CATEGORY_AGGREGATE:
p = "aggregate";
break;
case OMP_CLAUSE_DEFAULTMAP_CATEGORY_POINTER:
p = "pointer";
break;
case OMP_CLAUSE_DEFAULTMAP_CATEGORY_SCALAR:
p = "scalar";
break;
default:
gcc_unreachable ();
}
if (p)
error_at (loc, "too many %<defaultmap%> clauses with %qs category",
p);
else
error_at (loc, "too many %<defaultmap%> clauses with unspecified "
"category");
break;
}
c = build_omp_clause (loc, OMP_CLAUSE_DEFAULTMAP);
OMP_CLAUSE_DEFAULTMAP_SET_KIND (c, behavior, category);
OMP_CLAUSE_CHAIN (c) = list;
return c;
out_err:
parens.skip_until_found_close (parser);
return list;
}
/* OpenACC 2.0:
use_device ( variable-list )
OpenMP 4.5:
use_device_ptr ( variable-list ) */
static tree
c_parser_omp_clause_use_device_ptr (c_parser *parser, tree list)
{
return c_parser_omp_var_list_parens (parser, OMP_CLAUSE_USE_DEVICE_PTR,
list);
}
/* OpenMP 5.0:
use_device_addr ( variable-list ) */
static tree
c_parser_omp_clause_use_device_addr (c_parser *parser, tree list)
{
return c_parser_omp_var_list_parens (parser, OMP_CLAUSE_USE_DEVICE_ADDR,
list);
}
/* OpenMP 4.5:
is_device_ptr ( variable-list ) */
static tree
c_parser_omp_clause_is_device_ptr (c_parser *parser, tree list)
{
return c_parser_omp_var_list_parens (parser, OMP_CLAUSE_IS_DEVICE_PTR, list);
}
/* OpenACC:
num_gangs ( expression )
num_workers ( expression )
vector_length ( expression ) */
static tree
c_parser_oacc_single_int_clause (c_parser *parser, omp_clause_code code,
tree list)
{
location_t loc = c_parser_peek_token (parser)->location;
matching_parens parens;
if (!parens.require_open (parser))
return list;
location_t expr_loc = c_parser_peek_token (parser)->location;
c_expr expr = c_parser_expression (parser);
expr = convert_lvalue_to_rvalue (expr_loc, expr, false, true);
tree c, t = expr.value;
t = c_fully_fold (t, false, NULL);
parens.skip_until_found_close (parser);
if (t == error_mark_node)
return list;
else if (!INTEGRAL_TYPE_P (TREE_TYPE (t)))
{
error_at (expr_loc, "%qs expression must be integral",
omp_clause_code_name[code]);
return list;
}
/* Attempt to statically determine when the number isn't positive. */
c = fold_build2_loc (expr_loc, LE_EXPR, boolean_type_node, t,
build_int_cst (TREE_TYPE (t), 0));
protected_set_expr_location (c, expr_loc);
if (c == boolean_true_node)
{
warning_at (expr_loc, 0,
"%qs value must be positive",
omp_clause_code_name[code]);
t = integer_one_node;
}
check_no_duplicate_clause (list, code, omp_clause_code_name[code]);
c = build_omp_clause (loc, code);
OMP_CLAUSE_OPERAND (c, 0) = t;
OMP_CLAUSE_CHAIN (c) = list;
return c;
}
/* OpenACC:
gang [( gang-arg-list )]
worker [( [num:] int-expr )]
vector [( [length:] int-expr )]
where gang-arg is one of:
[num:] int-expr
static: size-expr
and size-expr may be:
*
int-expr
*/
static tree
c_parser_oacc_shape_clause (c_parser *parser, location_t loc,
omp_clause_code kind,
const char *str, tree list)
{
const char *id = "num";
tree ops[2] = { NULL_TREE, NULL_TREE }, c;
if (kind == OMP_CLAUSE_VECTOR)
id = "length";
if (c_parser_next_token_is (parser, CPP_OPEN_PAREN))
{
c_parser_consume_token (parser);
do
{
c_token *next = c_parser_peek_token (parser);
int idx = 0;
/* Gang static argument. */
if (kind == OMP_CLAUSE_GANG
&& c_parser_next_token_is_keyword (parser, RID_STATIC))
{
c_parser_consume_token (parser);
if (!c_parser_require (parser, CPP_COLON, "expected %<:%>"))
goto cleanup_error;
idx = 1;
if (ops[idx] != NULL_TREE)
{
c_parser_error (parser, "too many %<static%> arguments");
goto cleanup_error;
}
/* Check for the '*' argument. */
if (c_parser_next_token_is (parser, CPP_MULT)
&& (c_parser_peek_2nd_token (parser)->type == CPP_COMMA
|| c_parser_peek_2nd_token (parser)->type
== CPP_CLOSE_PAREN))
{
c_parser_consume_token (parser);
ops[idx] = integer_minus_one_node;
if (c_parser_next_token_is (parser, CPP_COMMA))
{
c_parser_consume_token (parser);
continue;
}
else
break;
}
}
/* Worker num: argument and vector length: arguments. */
else if (c_parser_next_token_is (parser, CPP_NAME)
&& strcmp (id, IDENTIFIER_POINTER (next->value)) == 0
&& c_parser_peek_2nd_token (parser)->type == CPP_COLON)
{
c_parser_consume_token (parser); /* id */
c_parser_consume_token (parser); /* ':' */
}
/* Now collect the actual argument. */
if (ops[idx] != NULL_TREE)
{
c_parser_error (parser, "unexpected argument");
goto cleanup_error;
}
location_t expr_loc = c_parser_peek_token (parser)->location;
c_expr cexpr = c_parser_expr_no_commas (parser, NULL);
cexpr = convert_lvalue_to_rvalue (expr_loc, cexpr, false, true);
tree expr = cexpr.value;
if (expr == error_mark_node)
goto cleanup_error;
expr = c_fully_fold (expr, false, NULL);
/* Attempt to statically determine when the number isn't a
positive integer. */
if (!INTEGRAL_TYPE_P (TREE_TYPE (expr)))
{
c_parser_error (parser, "expected integer expression");
return list;
}
tree c = fold_build2_loc (expr_loc, LE_EXPR, boolean_type_node, expr,
build_int_cst (TREE_TYPE (expr), 0));
if (c == boolean_true_node)
{
warning_at (loc, 0,
"%qs value must be positive", str);
expr = integer_one_node;
}
ops[idx] = expr;
if (kind == OMP_CLAUSE_GANG
&& c_parser_next_token_is (parser, CPP_COMMA))
{
c_parser_consume_token (parser);
continue;
}
break;
}
while (1);
if (!c_parser_require (parser, CPP_CLOSE_PAREN, "expected %<)%>"))
goto cleanup_error;
}
check_no_duplicate_clause (list, kind, str);
c = build_omp_clause (loc, kind);
if (ops[1])
OMP_CLAUSE_OPERAND (c, 1) = ops[1];
OMP_CLAUSE_OPERAND (c, 0) = ops[0];
OMP_CLAUSE_CHAIN (c) = list;
return c;
cleanup_error:
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, 0);
return list;
}
/* OpenACC 2.5:
auto
finalize
independent
nohost
seq */
static tree
c_parser_oacc_simple_clause (location_t loc, enum omp_clause_code code,
tree list)
{
check_no_duplicate_clause (list, code, omp_clause_code_name[code]);
tree c = build_omp_clause (loc, code);
OMP_CLAUSE_CHAIN (c) = list;
return c;
}
/* OpenACC:
async [( int-expr )] */
static tree
c_parser_oacc_clause_async (c_parser *parser, tree list)
{
tree c, t;
location_t loc = c_parser_peek_token (parser)->location;
t = build_int_cst (integer_type_node, GOMP_ASYNC_NOVAL);
if (c_parser_peek_token (parser)->type == CPP_OPEN_PAREN)
{
c_parser_consume_token (parser);
t = c_parser_expression (parser).value;
if (!INTEGRAL_TYPE_P (TREE_TYPE (t)))
c_parser_error (parser, "expected integer expression");
else if (t == error_mark_node
|| !c_parser_require (parser, CPP_CLOSE_PAREN, "expected %<)%>"))
return list;
}
else
t = c_fully_fold (t, false, NULL);
check_no_duplicate_clause (list, OMP_CLAUSE_ASYNC, "async");
c = build_omp_clause (loc, OMP_CLAUSE_ASYNC);
OMP_CLAUSE_ASYNC_EXPR (c) = t;
OMP_CLAUSE_CHAIN (c) = list;
list = c;
return list;
}
/* OpenACC 2.0:
tile ( size-expr-list ) */
static tree
c_parser_oacc_clause_tile (c_parser *parser, tree list)
{
tree c, expr = error_mark_node;
location_t loc;
tree tile = NULL_TREE;
check_no_duplicate_clause (list, OMP_CLAUSE_TILE, "tile");
check_no_duplicate_clause (list, OMP_CLAUSE_COLLAPSE, "collapse");
loc = c_parser_peek_token (parser)->location;
if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>"))
return list;
do
{
if (tile && !c_parser_require (parser, CPP_COMMA, "expected %<,%>"))
return list;
if (c_parser_next_token_is (parser, CPP_MULT)
&& (c_parser_peek_2nd_token (parser)->type == CPP_COMMA
|| c_parser_peek_2nd_token (parser)->type == CPP_CLOSE_PAREN))
{
c_parser_consume_token (parser);
expr = integer_zero_node;
}
else
{
location_t expr_loc = c_parser_peek_token (parser)->location;
c_expr cexpr = c_parser_expr_no_commas (parser, NULL);
cexpr = convert_lvalue_to_rvalue (expr_loc, cexpr, false, true);
expr = cexpr.value;
if (expr == error_mark_node)
{
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<)%>");
return list;
}
expr = c_fully_fold (expr, false, NULL);
if (!INTEGRAL_TYPE_P (TREE_TYPE (expr))
|| !tree_fits_shwi_p (expr)
|| tree_to_shwi (expr) <= 0)
{
error_at (expr_loc, "%<tile%> argument needs positive"
" integral constant");
expr = integer_zero_node;
}
}
tile = tree_cons (NULL_TREE, expr, tile);
}
while (c_parser_next_token_is_not (parser, CPP_CLOSE_PAREN));
/* Consume the trailing ')'. */
c_parser_consume_token (parser);
c = build_omp_clause (loc, OMP_CLAUSE_TILE);
tile = nreverse (tile);
OMP_CLAUSE_TILE_LIST (c) = tile;
OMP_CLAUSE_CHAIN (c) = list;
return c;
}
/* OpenACC:
wait [( int-expr-list )] */
static tree
c_parser_oacc_clause_wait (c_parser *parser, tree list)
{
location_t clause_loc = c_parser_peek_token (parser)->location;
if (c_parser_peek_token (parser)->type == CPP_OPEN_PAREN)
list = c_parser_oacc_wait_list (parser, clause_loc, list);
else
{
tree c = build_omp_clause (clause_loc, OMP_CLAUSE_WAIT);
OMP_CLAUSE_DECL (c) = build_int_cst (integer_type_node, GOMP_ASYNC_NOVAL);
OMP_CLAUSE_CHAIN (c) = list;
list = c;
}
return list;
}
/* OpenMP 5.0:
order ( concurrent ) */
static tree
c_parser_omp_clause_order (c_parser *parser, tree list)
{
location_t loc = c_parser_peek_token (parser)->location;
tree c;
const char *p;
matching_parens parens;
if (!parens.require_open (parser))
return list;
if (!c_parser_next_token_is (parser, CPP_NAME))
{
c_parser_error (parser, "expected %<concurrent%>");
goto out_err;
}
p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value);
if (strcmp (p, "concurrent") != 0)
{
c_parser_error (parser, "expected %<concurrent%>");
goto out_err;
}
c_parser_consume_token (parser);
parens.skip_until_found_close (parser);
/* check_no_duplicate_clause (list, OMP_CLAUSE_ORDER, "order"); */
c = build_omp_clause (loc, OMP_CLAUSE_ORDER);
OMP_CLAUSE_CHAIN (c) = list;
return c;
out_err:
parens.skip_until_found_close (parser);
return list;
}
/* OpenMP 5.0:
bind ( teams | parallel | thread ) */
static tree
c_parser_omp_clause_bind (c_parser *parser, tree list)
{
location_t loc = c_parser_peek_token (parser)->location;
tree c;
const char *p;
enum omp_clause_bind_kind kind = OMP_CLAUSE_BIND_THREAD;
matching_parens parens;
if (!parens.require_open (parser))
return list;
if (!c_parser_next_token_is (parser, CPP_NAME))
{
invalid:
c_parser_error (parser,
"expected %<teams%>, %<parallel%> or %<thread%>");
parens.skip_until_found_close (parser);
return list;
}
p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value);
if (strcmp (p, "teams") == 0)
kind = OMP_CLAUSE_BIND_TEAMS;
else if (strcmp (p, "parallel") == 0)
kind = OMP_CLAUSE_BIND_PARALLEL;
else if (strcmp (p, "thread") != 0)
goto invalid;
c_parser_consume_token (parser);
parens.skip_until_found_close (parser);
/* check_no_duplicate_clause (list, OMP_CLAUSE_BIND, "bind"); */
c = build_omp_clause (loc, OMP_CLAUSE_BIND);
OMP_CLAUSE_BIND_KIND (c) = kind;
OMP_CLAUSE_CHAIN (c) = list;
return c;
}
/* OpenMP 2.5:
ordered
OpenMP 4.5:
ordered ( constant-expression ) */
static tree
c_parser_omp_clause_ordered (c_parser *parser, tree list)
{
check_no_duplicate_clause (list, OMP_CLAUSE_ORDERED, "ordered");
tree c, num = NULL_TREE;
HOST_WIDE_INT n;
location_t loc = c_parser_peek_token (parser)->location;
if (c_parser_next_token_is (parser, CPP_OPEN_PAREN))
{
matching_parens parens;
parens.consume_open (parser);
num = c_parser_expr_no_commas (parser, NULL).value;
parens.skip_until_found_close (parser);
}
if (num == error_mark_node)
return list;
if (num)
{
mark_exp_read (num);
num = c_fully_fold (num, false, NULL);
if (!INTEGRAL_TYPE_P (TREE_TYPE (num))
|| !tree_fits_shwi_p (num)
|| (n = tree_to_shwi (num)) <= 0
|| (int) n != n)
{
error_at (loc, "ordered argument needs positive "
"constant integer expression");
return list;
}
}
c = build_omp_clause (loc, OMP_CLAUSE_ORDERED);
OMP_CLAUSE_ORDERED_EXPR (c) = num;
OMP_CLAUSE_CHAIN (c) = list;
return c;
}
/* OpenMP 2.5:
private ( variable-list ) */
static tree
c_parser_omp_clause_private (c_parser *parser, tree list)
{
return c_parser_omp_var_list_parens (parser, OMP_CLAUSE_PRIVATE, list);
}
/* OpenMP 2.5:
reduction ( reduction-operator : variable-list )
reduction-operator:
One of: + * - & ^ | && ||
OpenMP 3.1:
reduction-operator:
One of: + * - & ^ | && || max min
OpenMP 4.0:
reduction-operator:
One of: + * - & ^ | && ||
identifier
OpenMP 5.0:
reduction ( reduction-modifier, reduction-operator : variable-list )
in_reduction ( reduction-operator : variable-list )
task_reduction ( reduction-operator : variable-list ) */
static tree
c_parser_omp_clause_reduction (c_parser *parser, enum omp_clause_code kind,
bool is_omp, tree list)
{
location_t clause_loc = c_parser_peek_token (parser)->location;
matching_parens parens;
if (parens.require_open (parser))
{
bool task = false;
bool inscan = false;
enum tree_code code = ERROR_MARK;
tree reduc_id = NULL_TREE;
if (kind == OMP_CLAUSE_REDUCTION && is_omp)
{
if (c_parser_next_token_is_keyword (parser, RID_DEFAULT)
&& c_parser_peek_2nd_token (parser)->type == CPP_COMMA)
{
c_parser_consume_token (parser);
c_parser_consume_token (parser);
}
else if (c_parser_next_token_is (parser, CPP_NAME)
&& c_parser_peek_2nd_token (parser)->type == CPP_COMMA)
{
const char *p
= IDENTIFIER_POINTER (c_parser_peek_token (parser)->value);
if (strcmp (p, "task") == 0)
task = true;
else if (strcmp (p, "inscan") == 0)
inscan = true;
if (task || inscan)
{
c_parser_consume_token (parser);
c_parser_consume_token (parser);
}
}
}
switch (c_parser_peek_token (parser)->type)
{
case CPP_PLUS:
code = PLUS_EXPR;
break;
case CPP_MULT:
code = MULT_EXPR;
break;
case CPP_MINUS:
code = MINUS_EXPR;
break;
case CPP_AND:
code = BIT_AND_EXPR;
break;
case CPP_XOR:
code = BIT_XOR_EXPR;
break;
case CPP_OR:
code = BIT_IOR_EXPR;
break;
case CPP_AND_AND:
code = TRUTH_ANDIF_EXPR;
break;
case CPP_OR_OR:
code = TRUTH_ORIF_EXPR;
break;
case CPP_NAME:
{
const char *p
= IDENTIFIER_POINTER (c_parser_peek_token (parser)->value);
if (strcmp (p, "min") == 0)
{
code = MIN_EXPR;
break;
}
if (strcmp (p, "max") == 0)
{
code = MAX_EXPR;
break;
}
reduc_id = c_parser_peek_token (parser)->value;
break;
}
default:
c_parser_error (parser,
"expected %<+%>, %<*%>, %<-%>, %<&%>, "
"%<^%>, %<|%>, %<&&%>, %<||%> or identifier");
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, 0);
return list;
}
c_parser_consume_token (parser);
reduc_id = c_omp_reduction_id (code, reduc_id);
if (c_parser_require (parser, CPP_COLON, "expected %<:%>"))
{
tree nl, c;
nl = c_parser_omp_variable_list (parser, clause_loc, kind, list);
for (c = nl; c != list; c = OMP_CLAUSE_CHAIN (c))
{
tree d = OMP_CLAUSE_DECL (c), type;
if (TREE_CODE (d) != TREE_LIST)
type = TREE_TYPE (d);
else
{
int cnt = 0;
tree t;
for (t = d; TREE_CODE (t) == TREE_LIST; t = TREE_CHAIN (t))
cnt++;
type = TREE_TYPE (t);
while (cnt > 0)
{
if (TREE_CODE (type) != POINTER_TYPE
&& TREE_CODE (type) != ARRAY_TYPE)
break;
type = TREE_TYPE (type);
cnt--;
}
}
while (TREE_CODE (type) == ARRAY_TYPE)
type = TREE_TYPE (type);
OMP_CLAUSE_REDUCTION_CODE (c) = code;
if (task)
OMP_CLAUSE_REDUCTION_TASK (c) = 1;
else if (inscan)
OMP_CLAUSE_REDUCTION_INSCAN (c) = 1;
if (code == ERROR_MARK
|| !(INTEGRAL_TYPE_P (type)
|| TREE_CODE (type) == REAL_TYPE
|| TREE_CODE (type) == COMPLEX_TYPE))
OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)
= c_omp_reduction_lookup (reduc_id,
TYPE_MAIN_VARIANT (type));
}
list = nl;
}
parens.skip_until_found_close (parser);
}
return list;
}
/* OpenMP 2.5:
schedule ( schedule-kind )
schedule ( schedule-kind , expression )
schedule-kind:
static | dynamic | guided | runtime | auto
OpenMP 4.5:
schedule ( schedule-modifier : schedule-kind )
schedule ( schedule-modifier [ , schedule-modifier ] : schedule-kind , expression )
schedule-modifier:
simd
monotonic
nonmonotonic */
static tree
c_parser_omp_clause_schedule (c_parser *parser, tree list)
{
tree c, t;
location_t loc = c_parser_peek_token (parser)->location;
int modifiers = 0, nmodifiers = 0;
matching_parens parens;
if (!parens.require_open (parser))
return list;
c = build_omp_clause (loc, OMP_CLAUSE_SCHEDULE);
location_t comma = UNKNOWN_LOCATION;
while (c_parser_next_token_is (parser, CPP_NAME))
{
tree kind = c_parser_peek_token (parser)->value;
const char *p = IDENTIFIER_POINTER (kind);
if (strcmp ("simd", p) == 0)
OMP_CLAUSE_SCHEDULE_SIMD (c) = 1;
else if (strcmp ("monotonic", p) == 0)
modifiers |= OMP_CLAUSE_SCHEDULE_MONOTONIC;
else if (strcmp ("nonmonotonic", p) == 0)
modifiers |= OMP_CLAUSE_SCHEDULE_NONMONOTONIC;
else
break;
comma = UNKNOWN_LOCATION;
c_parser_consume_token (parser);
if (nmodifiers++ == 0
&& c_parser_next_token_is (parser, CPP_COMMA))
{
comma = c_parser_peek_token (parser)->location;
c_parser_consume_token (parser);
}
else
{
c_parser_require (parser, CPP_COLON, "expected %<:%>");
break;
}
}
if (comma != UNKNOWN_LOCATION)
error_at (comma, "expected %<:%>");
if ((modifiers & (OMP_CLAUSE_SCHEDULE_MONOTONIC
| OMP_CLAUSE_SCHEDULE_NONMONOTONIC))
== (OMP_CLAUSE_SCHEDULE_MONOTONIC
| OMP_CLAUSE_SCHEDULE_NONMONOTONIC))
{
error_at (loc, "both %<monotonic%> and %<nonmonotonic%> modifiers "
"specified");
modifiers = 0;
}
if (c_parser_next_token_is (parser, CPP_NAME))
{
tree kind = c_parser_peek_token (parser)->value;
const char *p = IDENTIFIER_POINTER (kind);
switch (p[0])
{
case 'd':
if (strcmp ("dynamic", p) != 0)
goto invalid_kind;
OMP_CLAUSE_SCHEDULE_KIND (c) = OMP_CLAUSE_SCHEDULE_DYNAMIC;
break;
case 'g':
if (strcmp ("guided", p) != 0)
goto invalid_kind;
OMP_CLAUSE_SCHEDULE_KIND (c) = OMP_CLAUSE_SCHEDULE_GUIDED;
break;
case 'r':
if (strcmp ("runtime", p) != 0)
goto invalid_kind;
OMP_CLAUSE_SCHEDULE_KIND (c) = OMP_CLAUSE_SCHEDULE_RUNTIME;
break;
default:
goto invalid_kind;
}
}
else if (c_parser_next_token_is_keyword (parser, RID_STATIC))
OMP_CLAUSE_SCHEDULE_KIND (c) = OMP_CLAUSE_SCHEDULE_STATIC;
else if (c_parser_next_token_is_keyword (parser, RID_AUTO))
OMP_CLAUSE_SCHEDULE_KIND (c) = OMP_CLAUSE_SCHEDULE_AUTO;
else
goto invalid_kind;
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_COMMA))
{
location_t here;
c_parser_consume_token (parser);
here = c_parser_peek_token (parser)->location;
c_expr expr = c_parser_expr_no_commas (parser, NULL);
expr = convert_lvalue_to_rvalue (here, expr, false, true);
t = expr.value;
t = c_fully_fold (t, false, NULL);
if (OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_RUNTIME)
error_at (here, "schedule %<runtime%> does not take "
"a %<chunk_size%> parameter");
else if (OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_AUTO)
error_at (here,
"schedule %<auto%> does not take "
"a %<chunk_size%> parameter");
else if (TREE_CODE (TREE_TYPE (t)) == INTEGER_TYPE)
{
/* Attempt to statically determine when the number isn't
positive. */
tree s = fold_build2_loc (loc, LE_EXPR, boolean_type_node, t,
build_int_cst (TREE_TYPE (t), 0));
protected_set_expr_location (s, loc);
if (s == boolean_true_node)
{
warning_at (loc, 0,
"chunk size value must be positive");
t = integer_one_node;
}
OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (c) = t;
}
else
c_parser_error (parser, "expected integer expression");
parens.skip_until_found_close (parser);
}
else
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<,%> or %<)%>");
OMP_CLAUSE_SCHEDULE_KIND (c)
= (enum omp_clause_schedule_kind)
(OMP_CLAUSE_SCHEDULE_KIND (c) | modifiers);
check_no_duplicate_clause (list, OMP_CLAUSE_SCHEDULE, "schedule");
OMP_CLAUSE_CHAIN (c) = list;
return c;
invalid_kind:
c_parser_error (parser, "invalid schedule kind");
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, 0);
return list;
}
/* OpenMP 2.5:
shared ( variable-list ) */
static tree
c_parser_omp_clause_shared (c_parser *parser, tree list)
{
return c_parser_omp_var_list_parens (parser, OMP_CLAUSE_SHARED, list);
}
/* OpenMP 3.0:
untied */
static tree
c_parser_omp_clause_untied (c_parser *parser ATTRIBUTE_UNUSED, tree list)
{
tree c;
/* FIXME: Should we allow duplicates? */
check_no_duplicate_clause (list, OMP_CLAUSE_UNTIED, "untied");
c = build_omp_clause (c_parser_peek_token (parser)->location,
OMP_CLAUSE_UNTIED);
OMP_CLAUSE_CHAIN (c) = list;
return c;
}
/* OpenMP 4.0:
inbranch
notinbranch */
static tree
c_parser_omp_clause_branch (c_parser *parser ATTRIBUTE_UNUSED,
enum omp_clause_code code, tree list)
{
check_no_duplicate_clause (list, code, omp_clause_code_name[code]);
tree c = build_omp_clause (c_parser_peek_token (parser)->location, code);
OMP_CLAUSE_CHAIN (c) = list;
return c;
}
/* OpenMP 4.0:
parallel
for
sections
taskgroup */
static tree
c_parser_omp_clause_cancelkind (c_parser *parser ATTRIBUTE_UNUSED,
enum omp_clause_code code, tree list)
{
tree c = build_omp_clause (c_parser_peek_token (parser)->location, code);
OMP_CLAUSE_CHAIN (c) = list;
return c;
}
/* OpenMP 4.5:
nogroup */
static tree
c_parser_omp_clause_nogroup (c_parser *parser ATTRIBUTE_UNUSED, tree list)
{
check_no_duplicate_clause (list, OMP_CLAUSE_NOGROUP, "nogroup");
tree c = build_omp_clause (c_parser_peek_token (parser)->location,
OMP_CLAUSE_NOGROUP);
OMP_CLAUSE_CHAIN (c) = list;
return c;
}
/* OpenMP 4.5:
simd
threads */
static tree
c_parser_omp_clause_orderedkind (c_parser *parser ATTRIBUTE_UNUSED,
enum omp_clause_code code, tree list)
{
check_no_duplicate_clause (list, code, omp_clause_code_name[code]);
tree c = build_omp_clause (c_parser_peek_token (parser)->location, code);
OMP_CLAUSE_CHAIN (c) = list;
return c;
}
/* OpenMP 4.0:
num_teams ( expression ) */
static tree
c_parser_omp_clause_num_teams (c_parser *parser, tree list)
{
location_t num_teams_loc = c_parser_peek_token (parser)->location;
matching_parens parens;
if (parens.require_open (parser))
{
location_t expr_loc = c_parser_peek_token (parser)->location;
c_expr expr = c_parser_expr_no_commas (parser, NULL);
expr = convert_lvalue_to_rvalue (expr_loc, expr, false, true);
tree c, t = expr.value;
t = c_fully_fold (t, false, NULL);
parens.skip_until_found_close (parser);
if (!INTEGRAL_TYPE_P (TREE_TYPE (t)))
{
c_parser_error (parser, "expected integer expression");
return list;
}
/* Attempt to statically determine when the number isn't positive. */
c = fold_build2_loc (expr_loc, LE_EXPR, boolean_type_node, t,
build_int_cst (TREE_TYPE (t), 0));
protected_set_expr_location (c, expr_loc);
if (c == boolean_true_node)
{
warning_at (expr_loc, 0, "%<num_teams%> value must be positive");
t = integer_one_node;
}
check_no_duplicate_clause (list, OMP_CLAUSE_NUM_TEAMS, "num_teams");
c = build_omp_clause (num_teams_loc, OMP_CLAUSE_NUM_TEAMS);
OMP_CLAUSE_NUM_TEAMS_EXPR (c) = t;
OMP_CLAUSE_CHAIN (c) = list;
list = c;
}
return list;
}
/* OpenMP 4.0:
thread_limit ( expression ) */
static tree
c_parser_omp_clause_thread_limit (c_parser *parser, tree list)
{
location_t num_thread_limit_loc = c_parser_peek_token (parser)->location;
matching_parens parens;
if (parens.require_open (parser))
{
location_t expr_loc = c_parser_peek_token (parser)->location;
c_expr expr = c_parser_expr_no_commas (parser, NULL);
expr = convert_lvalue_to_rvalue (expr_loc, expr, false, true);
tree c, t = expr.value;
t = c_fully_fold (t, false, NULL);
parens.skip_until_found_close (parser);
if (!INTEGRAL_TYPE_P (TREE_TYPE (t)))
{
c_parser_error (parser, "expected integer expression");
return list;
}
/* Attempt to statically determine when the number isn't positive. */
c = fold_build2_loc (expr_loc, LE_EXPR, boolean_type_node, t,
build_int_cst (TREE_TYPE (t), 0));
protected_set_expr_location (c, expr_loc);
if (c == boolean_true_node)
{
warning_at (expr_loc, 0, "%<thread_limit%> value must be positive");
t = integer_one_node;
}
check_no_duplicate_clause (list, OMP_CLAUSE_THREAD_LIMIT,
"thread_limit");
c = build_omp_clause (num_thread_limit_loc, OMP_CLAUSE_THREAD_LIMIT);
OMP_CLAUSE_THREAD_LIMIT_EXPR (c) = t;
OMP_CLAUSE_CHAIN (c) = list;
list = c;
}
return list;
}
/* OpenMP 4.0:
aligned ( variable-list )
aligned ( variable-list : constant-expression ) */
static tree
c_parser_omp_clause_aligned (c_parser *parser, tree list)
{
location_t clause_loc = c_parser_peek_token (parser)->location;
tree nl, c;
matching_parens parens;
if (!parens.require_open (parser))
return list;
nl = c_parser_omp_variable_list (parser, clause_loc,
OMP_CLAUSE_ALIGNED, list);
if (c_parser_next_token_is (parser, CPP_COLON))
{
c_parser_consume_token (parser);
location_t expr_loc = c_parser_peek_token (parser)->location;
c_expr expr = c_parser_expr_no_commas (parser, NULL);
expr = convert_lvalue_to_rvalue (expr_loc, expr, false, true);
tree alignment = expr.value;
alignment = c_fully_fold (alignment, false, NULL);
if (TREE_CODE (alignment) != INTEGER_CST
|| !INTEGRAL_TYPE_P (TREE_TYPE (alignment))
|| tree_int_cst_sgn (alignment) != 1)
{
error_at (clause_loc, "%<aligned%> clause alignment expression must "
"be positive constant integer expression");
alignment = NULL_TREE;
}
for (c = nl; c != list; c = OMP_CLAUSE_CHAIN (c))
OMP_CLAUSE_ALIGNED_ALIGNMENT (c) = alignment;
}
parens.skip_until_found_close (parser);
return nl;
}
/* OpenMP 4.0:
linear ( variable-list )
linear ( variable-list : expression )
OpenMP 4.5:
linear ( modifier ( variable-list ) )
linear ( modifier ( variable-list ) : expression ) */
static tree
c_parser_omp_clause_linear (c_parser *parser, tree list)
{
location_t clause_loc = c_parser_peek_token (parser)->location;
tree nl, c, step;
enum omp_clause_linear_kind kind = OMP_CLAUSE_LINEAR_DEFAULT;
matching_parens parens;
if (!parens.require_open (parser))
return list;
if (c_parser_next_token_is (parser, CPP_NAME))
{
c_token *tok = c_parser_peek_token (parser);
const char *p = IDENTIFIER_POINTER (tok->value);
if (strcmp ("val", p) == 0)
kind = OMP_CLAUSE_LINEAR_VAL;
if (c_parser_peek_2nd_token (parser)->type != CPP_OPEN_PAREN)
kind = OMP_CLAUSE_LINEAR_DEFAULT;
if (kind != OMP_CLAUSE_LINEAR_DEFAULT)
{
c_parser_consume_token (parser);
c_parser_consume_token (parser);
}
}
nl = c_parser_omp_variable_list (parser, clause_loc,
OMP_CLAUSE_LINEAR, list);
if (kind != OMP_CLAUSE_LINEAR_DEFAULT)
parens.skip_until_found_close (parser);
if (c_parser_next_token_is (parser, CPP_COLON))
{
c_parser_consume_token (parser);
location_t expr_loc = c_parser_peek_token (parser)->location;
c_expr expr = c_parser_expr_no_commas (parser, NULL);
expr = convert_lvalue_to_rvalue (expr_loc, expr, false, true);
step = expr.value;
step = c_fully_fold (step, false, NULL);
if (!INTEGRAL_TYPE_P (TREE_TYPE (step)))
{
error_at (clause_loc, "%<linear%> clause step expression must "
"be integral");
step = integer_one_node;
}
}
else
step = integer_one_node;
for (c = nl; c != list; c = OMP_CLAUSE_CHAIN (c))
{
OMP_CLAUSE_LINEAR_STEP (c) = step;
OMP_CLAUSE_LINEAR_KIND (c) = kind;
}
parens.skip_until_found_close (parser);
return nl;
}
/* OpenMP 5.0:
nontemporal ( variable-list ) */
static tree
c_parser_omp_clause_nontemporal (c_parser *parser, tree list)
{
return c_parser_omp_var_list_parens (parser, OMP_CLAUSE_NONTEMPORAL, list);
}
/* OpenMP 4.0:
safelen ( constant-expression ) */
static tree
c_parser_omp_clause_safelen (c_parser *parser, tree list)
{
location_t clause_loc = c_parser_peek_token (parser)->location;
tree c, t;
matching_parens parens;
if (!parens.require_open (parser))
return list;
location_t expr_loc = c_parser_peek_token (parser)->location;
c_expr expr = c_parser_expr_no_commas (parser, NULL);
expr = convert_lvalue_to_rvalue (expr_loc, expr, false, true);
t = expr.value;
t = c_fully_fold (t, false, NULL);
if (TREE_CODE (t) != INTEGER_CST
|| !INTEGRAL_TYPE_P (TREE_TYPE (t))
|| tree_int_cst_sgn (t) != 1)
{
error_at (clause_loc, "%<safelen%> clause expression must "
"be positive constant integer expression");
t = NULL_TREE;
}
parens.skip_until_found_close (parser);
if (t == NULL_TREE || t == error_mark_node)
return list;
check_no_duplicate_clause (list, OMP_CLAUSE_SAFELEN, "safelen");
c = build_omp_clause (clause_loc, OMP_CLAUSE_SAFELEN);
OMP_CLAUSE_SAFELEN_EXPR (c) = t;
OMP_CLAUSE_CHAIN (c) = list;
return c;
}
/* OpenMP 4.0:
simdlen ( constant-expression ) */
static tree
c_parser_omp_clause_simdlen (c_parser *parser, tree list)
{
location_t clause_loc = c_parser_peek_token (parser)->location;
tree c, t;
matching_parens parens;
if (!parens.require_open (parser))
return list;
location_t expr_loc = c_parser_peek_token (parser)->location;
c_expr expr = c_parser_expr_no_commas (parser, NULL);
expr = convert_lvalue_to_rvalue (expr_loc, expr, false, true);
t = expr.value;
t = c_fully_fold (t, false, NULL);
if (TREE_CODE (t) != INTEGER_CST
|| !INTEGRAL_TYPE_P (TREE_TYPE (t))
|| tree_int_cst_sgn (t) != 1)
{
error_at (clause_loc, "%<simdlen%> clause expression must "
"be positive constant integer expression");
t = NULL_TREE;
}
parens.skip_until_found_close (parser);
if (t == NULL_TREE || t == error_mark_node)
return list;
check_no_duplicate_clause (list, OMP_CLAUSE_SIMDLEN, "simdlen");
c = build_omp_clause (clause_loc, OMP_CLAUSE_SIMDLEN);
OMP_CLAUSE_SIMDLEN_EXPR (c) = t;
OMP_CLAUSE_CHAIN (c) = list;
return c;
}
/* OpenMP 4.5:
vec:
identifier [+/- integer]
vec , identifier [+/- integer]
*/
static tree
c_parser_omp_clause_depend_sink (c_parser *parser, location_t clause_loc,
tree list)
{
tree vec = NULL;
if (c_parser_next_token_is_not (parser, CPP_NAME)
|| c_parser_peek_token (parser)->id_kind != C_ID_ID)
{
c_parser_error (parser, "expected identifier");
return list;
}
while (c_parser_next_token_is (parser, CPP_NAME)
&& c_parser_peek_token (parser)->id_kind == C_ID_ID)
{
tree t = lookup_name (c_parser_peek_token (parser)->value);
tree addend = NULL;
if (t == NULL_TREE)
{
undeclared_variable (c_parser_peek_token (parser)->location,
c_parser_peek_token (parser)->value);
t = error_mark_node;
}
c_parser_consume_token (parser);
bool neg = false;
if (c_parser_next_token_is (parser, CPP_MINUS))
neg = true;
else if (!c_parser_next_token_is (parser, CPP_PLUS))
{
addend = integer_zero_node;
neg = false;
goto add_to_vector;
}
c_parser_consume_token (parser);
if (c_parser_next_token_is_not (parser, CPP_NUMBER))
{
c_parser_error (parser, "expected integer");
return list;
}
addend = c_parser_peek_token (parser)->value;
if (TREE_CODE (addend) != INTEGER_CST)
{
c_parser_error (parser, "expected integer");
return list;
}
c_parser_consume_token (parser);
add_to_vector:
if (t != error_mark_node)
{
vec = tree_cons (addend, t, vec);
if (neg)
OMP_CLAUSE_DEPEND_SINK_NEGATIVE (vec) = 1;
}
if (c_parser_next_token_is_not (parser, CPP_COMMA))
break;
c_parser_consume_token (parser);
}
if (vec == NULL_TREE)
return list;
tree u = build_omp_clause (clause_loc, OMP_CLAUSE_DEPEND);
OMP_CLAUSE_DEPEND_KIND (u) = OMP_CLAUSE_DEPEND_SINK;
OMP_CLAUSE_DECL (u) = nreverse (vec);
OMP_CLAUSE_CHAIN (u) = list;
return u;
}
/* OpenMP 5.0:
iterators ( iterators-definition )
iterators-definition:
iterator-specifier
iterator-specifier , iterators-definition
iterator-specifier:
identifier = range-specification
iterator-type identifier = range-specification
range-specification:
begin : end
begin : end : step */
static tree
c_parser_omp_iterators (c_parser *parser)
{
tree ret = NULL_TREE, *last = &ret;
c_parser_consume_token (parser);
push_scope ();
matching_parens parens;
if (!parens.require_open (parser))
return error_mark_node;
do
{
tree iter_type = NULL_TREE, type_expr = NULL_TREE;
if (c_parser_next_tokens_start_typename (parser, cla_prefer_id))
{
struct c_type_name *type = c_parser_type_name (parser);
if (type != NULL)
iter_type = groktypename (type, &type_expr, NULL);
}
if (iter_type == NULL_TREE)
iter_type = integer_type_node;
location_t loc = c_parser_peek_token (parser)->location;
if (!c_parser_next_token_is (parser, CPP_NAME))
{
c_parser_error (parser, "expected identifier");
break;
}
tree id = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
if (!c_parser_require (parser, CPP_EQ, "expected %<=%>"))
break;
location_t eloc = c_parser_peek_token (parser)->location;
c_expr expr = c_parser_expr_no_commas (parser, NULL);
expr = convert_lvalue_to_rvalue (eloc, expr, true, false);
tree begin = expr.value;
if (!c_parser_require (parser, CPP_COLON, "expected %<:%>"))
break;
eloc = c_parser_peek_token (parser)->location;
expr = c_parser_expr_no_commas (parser, NULL);
expr = convert_lvalue_to_rvalue (eloc, expr, true, false);
tree end = expr.value;
tree step = integer_one_node;
if (c_parser_next_token_is (parser, CPP_COLON))
{
c_parser_consume_token (parser);
eloc = c_parser_peek_token (parser)->location;
expr = c_parser_expr_no_commas (parser, NULL);
expr = convert_lvalue_to_rvalue (eloc, expr, true, false);
step = expr.value;
}
tree iter_var = build_decl (loc, VAR_DECL, id, iter_type);
DECL_ARTIFICIAL (iter_var) = 1;
DECL_CONTEXT (iter_var) = current_function_decl;
pushdecl (iter_var);
*last = make_tree_vec (6);
TREE_VEC_ELT (*last, 0) = iter_var;
TREE_VEC_ELT (*last, 1) = begin;
TREE_VEC_ELT (*last, 2) = end;
TREE_VEC_ELT (*last, 3) = step;
last = &TREE_CHAIN (*last);
if (c_parser_next_token_is (parser, CPP_COMMA))
{
c_parser_consume_token (parser);
continue;
}
break;
}
while (1);
parens.skip_until_found_close (parser);
return ret ? ret : error_mark_node;
}
/* OpenMP 4.0:
depend ( depend-kind: variable-list )
depend-kind:
in | out | inout
OpenMP 4.5:
depend ( source )
depend ( sink : vec )
OpenMP 5.0:
depend ( depend-modifier , depend-kind: variable-list )
depend-kind:
in | out | inout | mutexinoutset | depobj
depend-modifier:
iterator ( iterators-definition ) */
static tree
c_parser_omp_clause_depend (c_parser *parser, tree list)
{
location_t clause_loc = c_parser_peek_token (parser)->location;
enum omp_clause_depend_kind kind = OMP_CLAUSE_DEPEND_LAST;
tree nl, c, iterators = NULL_TREE;
matching_parens parens;
if (!parens.require_open (parser))
return list;
do
{
if (c_parser_next_token_is_not (parser, CPP_NAME))
goto invalid_kind;
const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value);
if (strcmp ("iterator", p) == 0 && iterators == NULL_TREE)
{
iterators = c_parser_omp_iterators (parser);
c_parser_require (parser, CPP_COMMA, "expected %<,%>");
continue;
}
if (strcmp ("in", p) == 0)
kind = OMP_CLAUSE_DEPEND_IN;
else if (strcmp ("inout", p) == 0)
kind = OMP_CLAUSE_DEPEND_INOUT;
else if (strcmp ("mutexinoutset", p) == 0)
kind = OMP_CLAUSE_DEPEND_MUTEXINOUTSET;
else if (strcmp ("out", p) == 0)
kind = OMP_CLAUSE_DEPEND_OUT;
else if (strcmp ("depobj", p) == 0)
kind = OMP_CLAUSE_DEPEND_DEPOBJ;
else if (strcmp ("sink", p) == 0)
kind = OMP_CLAUSE_DEPEND_SINK;
else if (strcmp ("source", p) == 0)
kind = OMP_CLAUSE_DEPEND_SOURCE;
else
goto invalid_kind;
break;
}
while (1);
c_parser_consume_token (parser);
if (iterators
&& (kind == OMP_CLAUSE_DEPEND_SOURCE || kind == OMP_CLAUSE_DEPEND_SINK))
{
pop_scope ();
error_at (clause_loc, "%<iterator%> modifier incompatible with %qs",
kind == OMP_CLAUSE_DEPEND_SOURCE ? "source" : "sink");
iterators = NULL_TREE;
}
if (kind == OMP_CLAUSE_DEPEND_SOURCE)
{
c = build_omp_clause (clause_loc, OMP_CLAUSE_DEPEND);
OMP_CLAUSE_DEPEND_KIND (c) = kind;
OMP_CLAUSE_DECL (c) = NULL_TREE;
OMP_CLAUSE_CHAIN (c) = list;
parens.skip_until_found_close (parser);
return c;
}
if (!c_parser_require (parser, CPP_COLON, "expected %<:%>"))
goto resync_fail;
if (kind == OMP_CLAUSE_DEPEND_SINK)
nl = c_parser_omp_clause_depend_sink (parser, clause_loc, list);
else
{
nl = c_parser_omp_variable_list (parser, clause_loc,
OMP_CLAUSE_DEPEND, list);
if (iterators)
{
tree block = pop_scope ();
if (iterators == error_mark_node)
iterators = NULL_TREE;
else
TREE_VEC_ELT (iterators, 5) = block;
}
for (c = nl; c != list; c = OMP_CLAUSE_CHAIN (c))
{
OMP_CLAUSE_DEPEND_KIND (c) = kind;
if (iterators)
OMP_CLAUSE_DECL (c)
= build_tree_list (iterators, OMP_CLAUSE_DECL (c));
}
}
parens.skip_until_found_close (parser);
return nl;
invalid_kind:
c_parser_error (parser, "invalid depend kind");
resync_fail:
parens.skip_until_found_close (parser);
if (iterators)
pop_scope ();
return list;
}
/* OpenMP 4.0:
map ( map-kind: variable-list )
map ( variable-list )
map-kind:
alloc | to | from | tofrom
OpenMP 4.5:
map-kind:
alloc | to | from | tofrom | release | delete
map ( always [,] map-kind: variable-list ) */
static tree
c_parser_omp_clause_map (c_parser *parser, tree list)
{
location_t clause_loc = c_parser_peek_token (parser)->location;
enum gomp_map_kind kind = GOMP_MAP_TOFROM;
int always = 0;
enum c_id_kind always_id_kind = C_ID_NONE;
location_t always_loc = UNKNOWN_LOCATION;
tree always_id = NULL_TREE;
tree nl, c;
matching_parens parens;
if (!parens.require_open (parser))
return list;
if (c_parser_next_token_is (parser, CPP_NAME))
{
c_token *tok = c_parser_peek_token (parser);
const char *p = IDENTIFIER_POINTER (tok->value);
always_id_kind = tok->id_kind;
always_loc = tok->location;
always_id = tok->value;
if (strcmp ("always", p) == 0)
{
c_token *sectok = c_parser_peek_2nd_token (parser);
if (sectok->type == CPP_COMMA)
{
c_parser_consume_token (parser);
c_parser_consume_token (parser);
always = 2;
}
else if (sectok->type == CPP_NAME)
{
p = IDENTIFIER_POINTER (sectok->value);
if (strcmp ("alloc", p) == 0
|| strcmp ("to", p) == 0
|| strcmp ("from", p) == 0
|| strcmp ("tofrom", p) == 0
|| strcmp ("release", p) == 0
|| strcmp ("delete", p) == 0)
{
c_parser_consume_token (parser);
always = 1;
}
}
}
}
if (c_parser_next_token_is (parser, CPP_NAME)
&& c_parser_peek_2nd_token (parser)->type == CPP_COLON)
{
const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value);
if (strcmp ("alloc", p) == 0)
kind = GOMP_MAP_ALLOC;
else if (strcmp ("to", p) == 0)
kind = always ? GOMP_MAP_ALWAYS_TO : GOMP_MAP_TO;
else if (strcmp ("from", p) == 0)
kind = always ? GOMP_MAP_ALWAYS_FROM : GOMP_MAP_FROM;
else if (strcmp ("tofrom", p) == 0)
kind = always ? GOMP_MAP_ALWAYS_TOFROM : GOMP_MAP_TOFROM;
else if (strcmp ("release", p) == 0)
kind = GOMP_MAP_RELEASE;
else if (strcmp ("delete", p) == 0)
kind = GOMP_MAP_DELETE;
else
{
c_parser_error (parser, "invalid map kind");
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<)%>");
return list;
}
c_parser_consume_token (parser);
c_parser_consume_token (parser);
}
else if (always)
{
if (always_id_kind != C_ID_ID)
{
c_parser_error (parser, "expected identifier");
parens.skip_until_found_close (parser);
return list;
}
tree t = lookup_name (always_id);
if (t == NULL_TREE)
{
undeclared_variable (always_loc, always_id);
t = error_mark_node;
}
if (t != error_mark_node)
{
tree u = build_omp_clause (clause_loc, OMP_CLAUSE_MAP);
OMP_CLAUSE_DECL (u) = t;
OMP_CLAUSE_CHAIN (u) = list;
OMP_CLAUSE_SET_MAP_KIND (u, kind);
list = u;
}
if (always == 1)
{
parens.skip_until_found_close (parser);
return list;
}
}
nl = c_parser_omp_variable_list (parser, clause_loc, OMP_CLAUSE_MAP, list);
for (c = nl; c != list; c = OMP_CLAUSE_CHAIN (c))
OMP_CLAUSE_SET_MAP_KIND (c, kind);
parens.skip_until_found_close (parser);
return nl;
}
/* OpenMP 4.0:
device ( expression ) */
static tree
c_parser_omp_clause_device (c_parser *parser, tree list)
{
location_t clause_loc = c_parser_peek_token (parser)->location;
matching_parens parens;
if (parens.require_open (parser))
{
location_t expr_loc = c_parser_peek_token (parser)->location;
c_expr expr = c_parser_expr_no_commas (parser, NULL);
expr = convert_lvalue_to_rvalue (expr_loc, expr, false, true);
tree c, t = expr.value;
t = c_fully_fold (t, false, NULL);
parens.skip_until_found_close (parser);
if (!INTEGRAL_TYPE_P (TREE_TYPE (t)))
{
c_parser_error (parser, "expected integer expression");
return list;
}
check_no_duplicate_clause (list, OMP_CLAUSE_DEVICE, "device");
c = build_omp_clause (clause_loc, OMP_CLAUSE_DEVICE);
OMP_CLAUSE_DEVICE_ID (c) = t;
OMP_CLAUSE_CHAIN (c) = list;
list = c;
}
return list;
}
/* OpenMP 4.0:
dist_schedule ( static )
dist_schedule ( static , expression ) */
static tree
c_parser_omp_clause_dist_schedule (c_parser *parser, tree list)
{
tree c, t = NULL_TREE;
location_t loc = c_parser_peek_token (parser)->location;
matching_parens parens;
if (!parens.require_open (parser))
return list;
if (!c_parser_next_token_is_keyword (parser, RID_STATIC))
{
c_parser_error (parser, "invalid dist_schedule kind");
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<)%>");
return list;
}
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_COMMA))
{
c_parser_consume_token (parser);
location_t expr_loc = c_parser_peek_token (parser)->location;
c_expr expr = c_parser_expr_no_commas (parser, NULL);
expr = convert_lvalue_to_rvalue (expr_loc, expr, false, true);
t = expr.value;
t = c_fully_fold (t, false, NULL);
parens.skip_until_found_close (parser);
}
else
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<,%> or %<)%>");
/* check_no_duplicate_clause (list, OMP_CLAUSE_DIST_SCHEDULE,
"dist_schedule"); */
if (omp_find_clause (list, OMP_CLAUSE_DIST_SCHEDULE))
warning_at (loc, 0, "too many %qs clauses", "dist_schedule");
if (t == error_mark_node)
return list;
c = build_omp_clause (loc, OMP_CLAUSE_DIST_SCHEDULE);
OMP_CLAUSE_DIST_SCHEDULE_CHUNK_EXPR (c) = t;
OMP_CLAUSE_CHAIN (c) = list;
return c;
}
/* OpenMP 4.0:
proc_bind ( proc-bind-kind )
proc-bind-kind:
master | close | spread */
static tree
c_parser_omp_clause_proc_bind (c_parser *parser, tree list)
{
location_t clause_loc = c_parser_peek_token (parser)->location;
enum omp_clause_proc_bind_kind kind;
tree c;
matching_parens parens;
if (!parens.require_open (parser))
return list;
if (c_parser_next_token_is (parser, CPP_NAME))
{
const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value);
if (strcmp ("master", p) == 0)
kind = OMP_CLAUSE_PROC_BIND_MASTER;
else if (strcmp ("close", p) == 0)
kind = OMP_CLAUSE_PROC_BIND_CLOSE;
else if (strcmp ("spread", p) == 0)
kind = OMP_CLAUSE_PROC_BIND_SPREAD;
else
goto invalid_kind;
}
else
goto invalid_kind;
check_no_duplicate_clause (list, OMP_CLAUSE_PROC_BIND, "proc_bind");
c_parser_consume_token (parser);
parens.skip_until_found_close (parser);
c = build_omp_clause (clause_loc, OMP_CLAUSE_PROC_BIND);
OMP_CLAUSE_PROC_BIND_KIND (c) = kind;
OMP_CLAUSE_CHAIN (c) = list;
return c;
invalid_kind:
c_parser_error (parser, "invalid proc_bind kind");
parens.skip_until_found_close (parser);
return list;
}
/* OpenMP 5.0:
device_type ( host | nohost | any ) */
static tree
c_parser_omp_clause_device_type (c_parser *parser, tree list)
{
location_t clause_loc = c_parser_peek_token (parser)->location;
enum omp_clause_device_type_kind kind;
tree c;
matching_parens parens;
if (!parens.require_open (parser))
return list;
if (c_parser_next_token_is (parser, CPP_NAME))
{
const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value);
if (strcmp ("host", p) == 0)
kind = OMP_CLAUSE_DEVICE_TYPE_HOST;
else if (strcmp ("nohost", p) == 0)
kind = OMP_CLAUSE_DEVICE_TYPE_NOHOST;
else if (strcmp ("any", p) == 0)
kind = OMP_CLAUSE_DEVICE_TYPE_ANY;
else
goto invalid_kind;
}
else
goto invalid_kind;
/* check_no_duplicate_clause (list, OMP_CLAUSE_DEVICE_TYPE,
"device_type"); */
c_parser_consume_token (parser);
parens.skip_until_found_close (parser);
c = build_omp_clause (clause_loc, OMP_CLAUSE_DEVICE_TYPE);
OMP_CLAUSE_DEVICE_TYPE_KIND (c) = kind;
OMP_CLAUSE_CHAIN (c) = list;
return c;
invalid_kind:
c_parser_error (parser, "expected %<host%>, %<nohost%> or %<any%>");
parens.skip_until_found_close (parser);
return list;
}
/* OpenMP 4.0:
to ( variable-list ) */
static tree
c_parser_omp_clause_to (c_parser *parser, tree list)
{
return c_parser_omp_var_list_parens (parser, OMP_CLAUSE_TO, list);
}
/* OpenMP 4.0:
from ( variable-list ) */
static tree
c_parser_omp_clause_from (c_parser *parser, tree list)
{
return c_parser_omp_var_list_parens (parser, OMP_CLAUSE_FROM, list);
}
/* OpenMP 4.0:
uniform ( variable-list ) */
static tree
c_parser_omp_clause_uniform (c_parser *parser, tree list)
{
/* The clauses location. */
location_t loc = c_parser_peek_token (parser)->location;
matching_parens parens;
if (parens.require_open (parser))
{
list = c_parser_omp_variable_list (parser, loc, OMP_CLAUSE_UNIFORM,
list);
parens.skip_until_found_close (parser);
}
return list;
}
/* Parse all OpenACC clauses. The set clauses allowed by the directive
is a bitmask in MASK. Return the list of clauses found. */
static tree
c_parser_oacc_all_clauses (c_parser *parser, omp_clause_mask mask,
const char *where, bool finish_p = true)
{
tree clauses = NULL;
bool first = true;
while (c_parser_next_token_is_not (parser, CPP_PRAGMA_EOL))
{
location_t here;
pragma_omp_clause c_kind;
const char *c_name;
tree prev = clauses;
if (!first && c_parser_next_token_is (parser, CPP_COMMA))
c_parser_consume_token (parser);
here = c_parser_peek_token (parser)->location;
c_kind = c_parser_omp_clause_name (parser);
switch (c_kind)
{
case PRAGMA_OACC_CLAUSE_ASYNC:
clauses = c_parser_oacc_clause_async (parser, clauses);
c_name = "async";
break;
case PRAGMA_OACC_CLAUSE_AUTO:
clauses = c_parser_oacc_simple_clause (here, OMP_CLAUSE_AUTO,
clauses);
c_name = "auto";
break;
case PRAGMA_OACC_CLAUSE_ATTACH:
clauses = c_parser_oacc_data_clause (parser, c_kind, clauses);
c_name = "attach";
break;
case PRAGMA_OACC_CLAUSE_COLLAPSE:
clauses = c_parser_omp_clause_collapse (parser, clauses);
c_name = "collapse";
break;
case PRAGMA_OACC_CLAUSE_COPY:
clauses = c_parser_oacc_data_clause (parser, c_kind, clauses);
c_name = "copy";
break;
case PRAGMA_OACC_CLAUSE_COPYIN:
clauses = c_parser_oacc_data_clause (parser, c_kind, clauses);
c_name = "copyin";
break;
case PRAGMA_OACC_CLAUSE_COPYOUT:
clauses = c_parser_oacc_data_clause (parser, c_kind, clauses);
c_name = "copyout";
break;
case PRAGMA_OACC_CLAUSE_CREATE:
clauses = c_parser_oacc_data_clause (parser, c_kind, clauses);
c_name = "create";
break;
case PRAGMA_OACC_CLAUSE_DELETE:
clauses = c_parser_oacc_data_clause (parser, c_kind, clauses);
c_name = "delete";
break;
case PRAGMA_OMP_CLAUSE_DEFAULT:
clauses = c_parser_omp_clause_default (parser, clauses, true);
c_name = "default";
break;
case PRAGMA_OACC_CLAUSE_DETACH:
clauses = c_parser_oacc_data_clause (parser, c_kind, clauses);
c_name = "detach";
break;
case PRAGMA_OACC_CLAUSE_DEVICE:
clauses = c_parser_oacc_data_clause (parser, c_kind, clauses);
c_name = "device";
break;
case PRAGMA_OACC_CLAUSE_DEVICEPTR:
clauses = c_parser_oacc_data_clause_deviceptr (parser, clauses);
c_name = "deviceptr";
break;
case PRAGMA_OACC_CLAUSE_DEVICE_RESIDENT:
clauses = c_parser_oacc_data_clause (parser, c_kind, clauses);
c_name = "device_resident";
break;
case PRAGMA_OACC_CLAUSE_FINALIZE:
clauses = c_parser_oacc_simple_clause (here, OMP_CLAUSE_FINALIZE,
clauses);
c_name = "finalize";
break;
case PRAGMA_OACC_CLAUSE_FIRSTPRIVATE:
clauses = c_parser_omp_clause_firstprivate (parser, clauses);
c_name = "firstprivate";
break;
case PRAGMA_OACC_CLAUSE_GANG:
c_name = "gang";
clauses = c_parser_oacc_shape_clause (parser, here, OMP_CLAUSE_GANG,
c_name, clauses);
break;
case PRAGMA_OACC_CLAUSE_HOST:
clauses = c_parser_oacc_data_clause (parser, c_kind, clauses);
c_name = "host";
break;
case PRAGMA_OACC_CLAUSE_IF:
clauses = c_parser_omp_clause_if (parser, clauses, false);
c_name = "if";
break;
case PRAGMA_OACC_CLAUSE_IF_PRESENT:
clauses = c_parser_oacc_simple_clause (here, OMP_CLAUSE_IF_PRESENT,
clauses);
c_name = "if_present";
break;
case PRAGMA_OACC_CLAUSE_INDEPENDENT:
clauses = c_parser_oacc_simple_clause (here, OMP_CLAUSE_INDEPENDENT,
clauses);
c_name = "independent";
break;
case PRAGMA_OACC_CLAUSE_LINK:
clauses = c_parser_oacc_data_clause (parser, c_kind, clauses);
c_name = "link";
break;
case PRAGMA_OACC_CLAUSE_NO_CREATE:
clauses = c_parser_oacc_data_clause (parser, c_kind, clauses);
c_name = "no_create";
break;
case PRAGMA_OACC_CLAUSE_NUM_GANGS:
clauses = c_parser_oacc_single_int_clause (parser,
OMP_CLAUSE_NUM_GANGS,
clauses);
c_name = "num_gangs";
break;
case PRAGMA_OACC_CLAUSE_NUM_WORKERS:
clauses = c_parser_oacc_single_int_clause (parser,
OMP_CLAUSE_NUM_WORKERS,
clauses);
c_name = "num_workers";
break;
case PRAGMA_OACC_CLAUSE_PRESENT:
clauses = c_parser_oacc_data_clause (parser, c_kind, clauses);
c_name = "present";
break;
case PRAGMA_OACC_CLAUSE_PRIVATE:
clauses = c_parser_omp_clause_private (parser, clauses);
c_name = "private";
break;
case PRAGMA_OACC_CLAUSE_REDUCTION:
clauses
= c_parser_omp_clause_reduction (parser, OMP_CLAUSE_REDUCTION,
false, clauses);
c_name = "reduction";
break;
case PRAGMA_OACC_CLAUSE_SEQ:
clauses = c_parser_oacc_simple_clause (here, OMP_CLAUSE_SEQ,
clauses);
c_name = "seq";
break;
case PRAGMA_OACC_CLAUSE_TILE:
clauses = c_parser_oacc_clause_tile (parser, clauses);
c_name = "tile";
break;
case PRAGMA_OACC_CLAUSE_USE_DEVICE:
clauses = c_parser_omp_clause_use_device_ptr (parser, clauses);
c_name = "use_device";
break;
case PRAGMA_OACC_CLAUSE_VECTOR:
c_name = "vector";
clauses = c_parser_oacc_shape_clause (parser, here, OMP_CLAUSE_VECTOR,
c_name, clauses);
break;
case PRAGMA_OACC_CLAUSE_VECTOR_LENGTH:
clauses = c_parser_oacc_single_int_clause (parser,
OMP_CLAUSE_VECTOR_LENGTH,
clauses);
c_name = "vector_length";
break;
case PRAGMA_OACC_CLAUSE_WAIT:
clauses = c_parser_oacc_clause_wait (parser, clauses);
c_name = "wait";
break;
case PRAGMA_OACC_CLAUSE_WORKER:
c_name = "worker";
clauses = c_parser_oacc_shape_clause (parser, here, OMP_CLAUSE_WORKER,
c_name, clauses);
break;
default:
c_parser_error (parser, "expected %<#pragma acc%> clause");
goto saw_error;
}
first = false;
if (((mask >> c_kind) & 1) == 0)
{
/* Remove the invalid clause(s) from the list to avoid
confusing the rest of the compiler. */
clauses = prev;
error_at (here, "%qs is not valid for %qs", c_name, where);
}
}
saw_error:
c_parser_skip_to_pragma_eol (parser);
if (finish_p)
return c_finish_omp_clauses (clauses, C_ORT_ACC);
return clauses;
}
/* Parse all OpenMP clauses. The set clauses allowed by the directive
is a bitmask in MASK. Return the list of clauses found.
FINISH_P set if c_finish_omp_clauses should be called.
NESTED non-zero if clauses should be terminated by closing paren instead
of end of pragma. If it is 2, additionally commas are required in between
the clauses. */
static tree
c_parser_omp_all_clauses (c_parser *parser, omp_clause_mask mask,
const char *where, bool finish_p = true,
int nested = 0)
{
tree clauses = NULL;
bool first = true;
while (c_parser_next_token_is_not (parser, CPP_PRAGMA_EOL))
{
location_t here;
pragma_omp_clause c_kind;
const char *c_name;
tree prev = clauses;
if (nested && c_parser_next_token_is (parser, CPP_CLOSE_PAREN))
break;
if (!first)
{
if (c_parser_next_token_is (parser, CPP_COMMA))
c_parser_consume_token (parser);
else if (nested == 2)
error_at (c_parser_peek_token (parser)->location,
"clauses in %<simd%> trait should be separated "
"by %<,%>");
}
here = c_parser_peek_token (parser)->location;
c_kind = c_parser_omp_clause_name (parser);
switch (c_kind)
{
case PRAGMA_OMP_CLAUSE_BIND:
clauses = c_parser_omp_clause_bind (parser, clauses);
c_name = "bind";
break;
case PRAGMA_OMP_CLAUSE_COLLAPSE:
clauses = c_parser_omp_clause_collapse (parser, clauses);
c_name = "collapse";
break;
case PRAGMA_OMP_CLAUSE_COPYIN:
clauses = c_parser_omp_clause_copyin (parser, clauses);
c_name = "copyin";
break;
case PRAGMA_OMP_CLAUSE_COPYPRIVATE:
clauses = c_parser_omp_clause_copyprivate (parser, clauses);
c_name = "copyprivate";
break;
case PRAGMA_OMP_CLAUSE_DEFAULT:
clauses = c_parser_omp_clause_default (parser, clauses, false);
c_name = "default";
break;
case PRAGMA_OMP_CLAUSE_FIRSTPRIVATE:
clauses = c_parser_omp_clause_firstprivate (parser, clauses);
c_name = "firstprivate";
break;
case PRAGMA_OMP_CLAUSE_FINAL:
clauses = c_parser_omp_clause_final (parser, clauses);
c_name = "final";
break;
case PRAGMA_OMP_CLAUSE_GRAINSIZE:
clauses = c_parser_omp_clause_grainsize (parser, clauses);
c_name = "grainsize";
break;
case PRAGMA_OMP_CLAUSE_HINT:
clauses = c_parser_omp_clause_hint (parser, clauses);
c_name = "hint";
break;
case PRAGMA_OMP_CLAUSE_DEFAULTMAP:
clauses = c_parser_omp_clause_defaultmap (parser, clauses);
c_name = "defaultmap";
break;
case PRAGMA_OMP_CLAUSE_IF:
clauses = c_parser_omp_clause_if (parser, clauses, true);
c_name = "if";
break;
case PRAGMA_OMP_CLAUSE_IN_REDUCTION:
clauses
= c_parser_omp_clause_reduction (parser, OMP_CLAUSE_IN_REDUCTION,
true, clauses);
c_name = "in_reduction";
break;
case PRAGMA_OMP_CLAUSE_LASTPRIVATE:
clauses = c_parser_omp_clause_lastprivate (parser, clauses);
c_name = "lastprivate";
break;
case PRAGMA_OMP_CLAUSE_MERGEABLE:
clauses = c_parser_omp_clause_mergeable (parser, clauses);
c_name = "mergeable";
break;
case PRAGMA_OMP_CLAUSE_NOWAIT:
clauses = c_parser_omp_clause_nowait (parser, clauses);
c_name = "nowait";
break;
case PRAGMA_OMP_CLAUSE_NUM_TASKS:
clauses = c_parser_omp_clause_num_tasks (parser, clauses);
c_name = "num_tasks";
break;
case PRAGMA_OMP_CLAUSE_NUM_THREADS:
clauses = c_parser_omp_clause_num_threads (parser, clauses);
c_name = "num_threads";
break;
case PRAGMA_OMP_CLAUSE_ORDER:
clauses = c_parser_omp_clause_order (parser, clauses);
c_name = "order";
break;
case PRAGMA_OMP_CLAUSE_ORDERED:
clauses = c_parser_omp_clause_ordered (parser, clauses);
c_name = "ordered";
break;
case PRAGMA_OMP_CLAUSE_PRIORITY:
clauses = c_parser_omp_clause_priority (parser, clauses);
c_name = "priority";
break;
case PRAGMA_OMP_CLAUSE_PRIVATE:
clauses = c_parser_omp_clause_private (parser, clauses);
c_name = "private";
break;
case PRAGMA_OMP_CLAUSE_REDUCTION:
clauses
= c_parser_omp_clause_reduction (parser, OMP_CLAUSE_REDUCTION,
true, clauses);
c_name = "reduction";
break;
case PRAGMA_OMP_CLAUSE_SCHEDULE:
clauses = c_parser_omp_clause_schedule (parser, clauses);
c_name = "schedule";
break;
case PRAGMA_OMP_CLAUSE_SHARED:
clauses = c_parser_omp_clause_shared (parser, clauses);
c_name = "shared";
break;
case PRAGMA_OMP_CLAUSE_TASK_REDUCTION:
clauses
= c_parser_omp_clause_reduction (parser, OMP_CLAUSE_TASK_REDUCTION,
true, clauses);
c_name = "task_reduction";
break;
case PRAGMA_OMP_CLAUSE_UNTIED:
clauses = c_parser_omp_clause_untied (parser, clauses);
c_name = "untied";
break;
case PRAGMA_OMP_CLAUSE_INBRANCH:
clauses = c_parser_omp_clause_branch (parser, OMP_CLAUSE_INBRANCH,
clauses);
c_name = "inbranch";
break;
case PRAGMA_OMP_CLAUSE_NONTEMPORAL:
clauses = c_parser_omp_clause_nontemporal (parser, clauses);
c_name = "nontemporal";
break;
case PRAGMA_OMP_CLAUSE_NOTINBRANCH:
clauses = c_parser_omp_clause_branch (parser, OMP_CLAUSE_NOTINBRANCH,
clauses);
c_name = "notinbranch";
break;
case PRAGMA_OMP_CLAUSE_PARALLEL:
clauses
= c_parser_omp_clause_cancelkind (parser, OMP_CLAUSE_PARALLEL,
clauses);
c_name = "parallel";
if (!first)
{
clause_not_first:
error_at (here, "%qs must be the first clause of %qs",
c_name, where);
clauses = prev;
}
break;
case PRAGMA_OMP_CLAUSE_FOR:
clauses
= c_parser_omp_clause_cancelkind (parser, OMP_CLAUSE_FOR,
clauses);
c_name = "for";
if (!first)
goto clause_not_first;
break;
case PRAGMA_OMP_CLAUSE_SECTIONS:
clauses
= c_parser_omp_clause_cancelkind (parser, OMP_CLAUSE_SECTIONS,
clauses);
c_name = "sections";
if (!first)
goto clause_not_first;
break;
case PRAGMA_OMP_CLAUSE_TASKGROUP:
clauses
= c_parser_omp_clause_cancelkind (parser, OMP_CLAUSE_TASKGROUP,
clauses);
c_name = "taskgroup";
if (!first)
goto clause_not_first;
break;
case PRAGMA_OMP_CLAUSE_LINK:
clauses
= c_parser_omp_var_list_parens (parser, OMP_CLAUSE_LINK, clauses);
c_name = "link";
break;
case PRAGMA_OMP_CLAUSE_TO:
if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_LINK)) != 0)
clauses
= c_parser_omp_var_list_parens (parser, OMP_CLAUSE_TO_DECLARE,
clauses);
else
clauses = c_parser_omp_clause_to (parser, clauses);
c_name = "to";
break;
case PRAGMA_OMP_CLAUSE_FROM:
clauses = c_parser_omp_clause_from (parser, clauses);
c_name = "from";
break;
case PRAGMA_OMP_CLAUSE_UNIFORM:
clauses = c_parser_omp_clause_uniform (parser, clauses);
c_name = "uniform";
break;
case PRAGMA_OMP_CLAUSE_NUM_TEAMS:
clauses = c_parser_omp_clause_num_teams (parser, clauses);
c_name = "num_teams";
break;
case PRAGMA_OMP_CLAUSE_THREAD_LIMIT:
clauses = c_parser_omp_clause_thread_limit (parser, clauses);
c_name = "thread_limit";
break;
case PRAGMA_OMP_CLAUSE_ALIGNED:
clauses = c_parser_omp_clause_aligned (parser, clauses);
c_name = "aligned";
break;
case PRAGMA_OMP_CLAUSE_LINEAR:
clauses = c_parser_omp_clause_linear (parser, clauses);
c_name = "linear";
break;
case PRAGMA_OMP_CLAUSE_DEPEND:
clauses = c_parser_omp_clause_depend (parser, clauses);
c_name = "depend";
break;
case PRAGMA_OMP_CLAUSE_MAP:
clauses = c_parser_omp_clause_map (parser, clauses);
c_name = "map";
break;
case PRAGMA_OMP_CLAUSE_USE_DEVICE_PTR:
clauses = c_parser_omp_clause_use_device_ptr (parser, clauses);
c_name = "use_device_ptr";
break;
case PRAGMA_OMP_CLAUSE_USE_DEVICE_ADDR:
clauses = c_parser_omp_clause_use_device_addr (parser, clauses);
c_name = "use_device_addr";
break;
case PRAGMA_OMP_CLAUSE_IS_DEVICE_PTR:
clauses = c_parser_omp_clause_is_device_ptr (parser, clauses);
c_name = "is_device_ptr";
break;
case PRAGMA_OMP_CLAUSE_DEVICE:
clauses = c_parser_omp_clause_device (parser, clauses);
c_name = "device";
break;
case PRAGMA_OMP_CLAUSE_DIST_SCHEDULE:
clauses = c_parser_omp_clause_dist_schedule (parser, clauses);
c_name = "dist_schedule";
break;
case PRAGMA_OMP_CLAUSE_PROC_BIND:
clauses = c_parser_omp_clause_proc_bind (parser, clauses);
c_name = "proc_bind";
break;
case PRAGMA_OMP_CLAUSE_DEVICE_TYPE:
clauses = c_parser_omp_clause_device_type (parser, clauses);
c_name = "device_type";
break;
case PRAGMA_OMP_CLAUSE_SAFELEN:
clauses = c_parser_omp_clause_safelen (parser, clauses);
c_name = "safelen";
break;
case PRAGMA_OMP_CLAUSE_SIMDLEN:
clauses = c_parser_omp_clause_simdlen (parser, clauses);
c_name = "simdlen";
break;
case PRAGMA_OMP_CLAUSE_NOGROUP:
clauses = c_parser_omp_clause_nogroup (parser, clauses);
c_name = "nogroup";
break;
case PRAGMA_OMP_CLAUSE_THREADS:
clauses
= c_parser_omp_clause_orderedkind (parser, OMP_CLAUSE_THREADS,
clauses);
c_name = "threads";
break;
case PRAGMA_OMP_CLAUSE_SIMD:
clauses
= c_parser_omp_clause_orderedkind (parser, OMP_CLAUSE_SIMD,
clauses);
c_name = "simd";
break;
default:
c_parser_error (parser, "expected %<#pragma omp%> clause");
goto saw_error;
}
first = false;
if (((mask >> c_kind) & 1) == 0)
{
/* Remove the invalid clause(s) from the list to avoid
confusing the rest of the compiler. */
clauses = prev;
error_at (here, "%qs is not valid for %qs", c_name, where);
}
}
saw_error:
if (!nested)
c_parser_skip_to_pragma_eol (parser);
if (finish_p)
{
if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_UNIFORM)) != 0)
return c_finish_omp_clauses (clauses, C_ORT_OMP_DECLARE_SIMD);
return c_finish_omp_clauses (clauses, C_ORT_OMP);
}
return clauses;
}
/* OpenACC 2.0, OpenMP 2.5:
structured-block:
statement
In practice, we're also interested in adding the statement to an
outer node. So it is convenient if we work around the fact that
c_parser_statement calls add_stmt. */
static tree
c_parser_omp_structured_block (c_parser *parser, bool *if_p)
{
tree stmt = push_stmt_list ();
c_parser_statement (parser, if_p);
return pop_stmt_list (stmt);
}
/* OpenACC 2.0:
# pragma acc cache (variable-list) new-line
LOC is the location of the #pragma token.
*/
static tree
c_parser_oacc_cache (location_t loc, c_parser *parser)
{
tree stmt, clauses;
clauses = c_parser_omp_var_list_parens (parser, OMP_CLAUSE__CACHE_, NULL);
clauses = c_finish_omp_clauses (clauses, C_ORT_ACC);
c_parser_skip_to_pragma_eol (parser);
stmt = make_node (OACC_CACHE);
TREE_TYPE (stmt) = void_type_node;
OACC_CACHE_CLAUSES (stmt) = clauses;
SET_EXPR_LOCATION (stmt, loc);
add_stmt (stmt);
return stmt;
}
/* OpenACC 2.0:
# pragma acc data oacc-data-clause[optseq] new-line
structured-block
LOC is the location of the #pragma token.
*/
#define OACC_DATA_CLAUSE_MASK \
( (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_ATTACH) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_COPY) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_COPYIN) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_COPYOUT) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_CREATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_DEVICEPTR) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_IF) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_NO_CREATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRESENT))
static tree
c_parser_oacc_data (location_t loc, c_parser *parser, bool *if_p)
{
tree stmt, clauses, block;
clauses = c_parser_oacc_all_clauses (parser, OACC_DATA_CLAUSE_MASK,
"#pragma acc data");
block = c_begin_omp_parallel ();
add_stmt (c_parser_omp_structured_block (parser, if_p));
stmt = c_finish_oacc_data (loc, clauses, block);
return stmt;
}
/* OpenACC 2.0:
# pragma acc declare oacc-data-clause[optseq] new-line
*/
#define OACC_DECLARE_CLAUSE_MASK \
( (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_COPY) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_COPYIN) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_COPYOUT) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_CREATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_DEVICEPTR) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_DEVICE_RESIDENT) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_LINK) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRESENT))
static void
c_parser_oacc_declare (c_parser *parser)
{
location_t pragma_loc = c_parser_peek_token (parser)->location;
tree clauses, stmt, t, decl;
bool error = false;
c_parser_consume_pragma (parser);
clauses = c_parser_oacc_all_clauses (parser, OACC_DECLARE_CLAUSE_MASK,
"#pragma acc declare");
if (!clauses)
{
error_at (pragma_loc,
"no valid clauses specified in %<#pragma acc declare%>");
return;
}
for (t = clauses; t; t = OMP_CLAUSE_CHAIN (t))
{
location_t loc = OMP_CLAUSE_LOCATION (t);
decl = OMP_CLAUSE_DECL (t);
if (!DECL_P (decl))
{
error_at (loc, "array section in %<#pragma acc declare%>");
error = true;
continue;
}
switch (OMP_CLAUSE_MAP_KIND (t))
{
case GOMP_MAP_FIRSTPRIVATE_POINTER:
case GOMP_MAP_ALLOC:
case GOMP_MAP_TO:
case GOMP_MAP_FORCE_DEVICEPTR:
case GOMP_MAP_DEVICE_RESIDENT:
break;
case GOMP_MAP_LINK:
if (!global_bindings_p ()
&& (TREE_STATIC (decl)
|| !DECL_EXTERNAL (decl)))
{
error_at (loc,
"%qD must be a global variable in "
"%<#pragma acc declare link%>",
decl);
error = true;
continue;
}
break;
default:
if (global_bindings_p ())
{
error_at (loc, "invalid OpenACC clause at file scope");
error = true;
continue;
}
if (DECL_EXTERNAL (decl))
{
error_at (loc,
"invalid use of %<extern%> variable %qD "
"in %<#pragma acc declare%>", decl);
error = true;
continue;
}
else if (TREE_PUBLIC (decl))
{
error_at (loc,
"invalid use of %<global%> variable %qD "
"in %<#pragma acc declare%>", decl);
error = true;
continue;
}
break;
}
if (!c_check_in_current_scope (decl))
{
error_at (loc,
"%qD must be a variable declared in the same scope as "
"%<#pragma acc declare%>", decl);
error = true;
continue;
}
if (lookup_attribute ("omp declare target", DECL_ATTRIBUTES (decl))
|| lookup_attribute ("omp declare target link",
DECL_ATTRIBUTES (decl)))
{
error_at (loc, "variable %qD used more than once with "
"%<#pragma acc declare%>", decl);
error = true;
continue;
}
if (!error)
{
tree id;
if (OMP_CLAUSE_MAP_KIND (t) == GOMP_MAP_LINK)
id = get_identifier ("omp declare target link");
else
id = get_identifier ("omp declare target");
DECL_ATTRIBUTES (decl)
= tree_cons (id, NULL_TREE, DECL_ATTRIBUTES (decl));
if (global_bindings_p ())
{
symtab_node *node = symtab_node::get (decl);
if (node != NULL)
{
node->offloadable = 1;
if (ENABLE_OFFLOADING)
{
g->have_offload = true;
if (is_a <varpool_node *> (node))
vec_safe_push (offload_vars, decl);
}
}
}
}
}
if (error || global_bindings_p ())
return;
stmt = make_node (OACC_DECLARE);
TREE_TYPE (stmt) = void_type_node;
OACC_DECLARE_CLAUSES (stmt) = clauses;
SET_EXPR_LOCATION (stmt, pragma_loc);
add_stmt (stmt);
return;
}
/* OpenACC 2.0:
# pragma acc enter data oacc-enter-data-clause[optseq] new-line
or
# pragma acc exit data oacc-exit-data-clause[optseq] new-line
LOC is the location of the #pragma token.
*/
#define OACC_ENTER_DATA_CLAUSE_MASK \
( (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_IF) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_ASYNC) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_ATTACH) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_COPYIN) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_CREATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_WAIT) )
#define OACC_EXIT_DATA_CLAUSE_MASK \
( (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_IF) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_ASYNC) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_COPYOUT) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_DELETE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_DETACH) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_FINALIZE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_WAIT) )
static void
c_parser_oacc_enter_exit_data (c_parser *parser, bool enter)
{
location_t loc = c_parser_peek_token (parser)->location;
tree clauses, stmt;
const char *p = "";
c_parser_consume_pragma (parser);
if (c_parser_next_token_is (parser, CPP_NAME))
{
p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value);
c_parser_consume_token (parser);
}
if (strcmp (p, "data") != 0)
{
error_at (loc, "expected %<data%> after %<#pragma acc %s%>",
enter ? "enter" : "exit");
parser->error = true;
c_parser_skip_to_pragma_eol (parser);
return;
}
if (enter)
clauses = c_parser_oacc_all_clauses (parser, OACC_ENTER_DATA_CLAUSE_MASK,
"#pragma acc enter data");
else
clauses = c_parser_oacc_all_clauses (parser, OACC_EXIT_DATA_CLAUSE_MASK,
"#pragma acc exit data");
if (omp_find_clause (clauses, OMP_CLAUSE_MAP) == NULL_TREE)
{
error_at (loc, "%<#pragma acc %s data%> has no data movement clause",
enter ? "enter" : "exit");
return;
}
stmt = enter ? make_node (OACC_ENTER_DATA) : make_node (OACC_EXIT_DATA);
TREE_TYPE (stmt) = void_type_node;
OMP_STANDALONE_CLAUSES (stmt) = clauses;
SET_EXPR_LOCATION (stmt, loc);
add_stmt (stmt);
}
/* OpenACC 2.0:
# pragma acc host_data oacc-data-clause[optseq] new-line
structured-block
*/
#define OACC_HOST_DATA_CLAUSE_MASK \
( (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_USE_DEVICE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_IF) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_IF_PRESENT) )
static tree
c_parser_oacc_host_data (location_t loc, c_parser *parser, bool *if_p)
{
tree stmt, clauses, block;
clauses = c_parser_oacc_all_clauses (parser, OACC_HOST_DATA_CLAUSE_MASK,
"#pragma acc host_data");
block = c_begin_omp_parallel ();
add_stmt (c_parser_omp_structured_block (parser, if_p));
stmt = c_finish_oacc_host_data (loc, clauses, block);
return stmt;
}
/* OpenACC 2.0:
# pragma acc loop oacc-loop-clause[optseq] new-line
structured-block
LOC is the location of the #pragma token.
*/
#define OACC_LOOP_CLAUSE_MASK \
( (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_COLLAPSE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_REDUCTION) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_GANG) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_WORKER) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_VECTOR) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_AUTO) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_INDEPENDENT) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_SEQ) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_TILE) )
static tree
c_parser_oacc_loop (location_t loc, c_parser *parser, char *p_name,
omp_clause_mask mask, tree *cclauses, bool *if_p)
{
bool is_parallel = ((mask >> PRAGMA_OACC_CLAUSE_REDUCTION) & 1) == 1;
strcat (p_name, " loop");
mask |= OACC_LOOP_CLAUSE_MASK;
tree clauses = c_parser_oacc_all_clauses (parser, mask, p_name,
cclauses == NULL);
if (cclauses)
{
clauses = c_oacc_split_loop_clauses (clauses, cclauses, is_parallel);
if (*cclauses)
*cclauses = c_finish_omp_clauses (*cclauses, C_ORT_ACC);
if (clauses)
clauses = c_finish_omp_clauses (clauses, C_ORT_ACC);
}
tree block = c_begin_compound_stmt (true);
tree stmt = c_parser_omp_for_loop (loc, parser, OACC_LOOP, clauses, NULL,
if_p);
block = c_end_compound_stmt (loc, block, true);
add_stmt (block);
return stmt;
}
/* OpenACC 2.0:
# pragma acc kernels oacc-kernels-clause[optseq] new-line
structured-block
or
# pragma acc parallel oacc-parallel-clause[optseq] new-line
structured-block
OpenACC 2.6:
# pragma acc serial oacc-serial-clause[optseq] new-line
structured-block
LOC is the location of the #pragma token.
*/
#define OACC_KERNELS_CLAUSE_MASK \
( (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_ASYNC) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_ATTACH) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_COPY) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_COPYIN) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_COPYOUT) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_CREATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_DEFAULT) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_DEVICEPTR) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_IF) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_NO_CREATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_NUM_GANGS) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_NUM_WORKERS) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRESENT) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_VECTOR_LENGTH) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_WAIT) )
#define OACC_PARALLEL_CLAUSE_MASK \
( (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_ASYNC) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_ATTACH) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_COPY) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_COPYIN) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_COPYOUT) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_CREATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_DEFAULT) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_DEVICEPTR) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_IF) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_NO_CREATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_FIRSTPRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_NUM_GANGS) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_NUM_WORKERS) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRESENT) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_REDUCTION) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_VECTOR_LENGTH) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_WAIT) )
#define OACC_SERIAL_CLAUSE_MASK \
( (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_ASYNC) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_ATTACH) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_COPY) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_COPYIN) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_COPYOUT) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_CREATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_DEFAULT) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_DEVICEPTR) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_IF) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_NO_CREATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_FIRSTPRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRESENT) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_REDUCTION) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_WAIT) )
static tree
c_parser_oacc_compute (location_t loc, c_parser *parser,
enum pragma_kind p_kind, char *p_name, bool *if_p)
{
omp_clause_mask mask;
enum tree_code code;
switch (p_kind)
{
case PRAGMA_OACC_KERNELS:
strcat (p_name, " kernels");
mask = OACC_KERNELS_CLAUSE_MASK;
code = OACC_KERNELS;
break;
case PRAGMA_OACC_PARALLEL:
strcat (p_name, " parallel");
mask = OACC_PARALLEL_CLAUSE_MASK;
code = OACC_PARALLEL;
break;
case PRAGMA_OACC_SERIAL:
strcat (p_name, " serial");
mask = OACC_SERIAL_CLAUSE_MASK;
code = OACC_SERIAL;
break;
default:
gcc_unreachable ();
}
if (c_parser_next_token_is (parser, CPP_NAME))
{
const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value);
if (strcmp (p, "loop") == 0)
{
c_parser_consume_token (parser);
tree block = c_begin_omp_parallel ();
tree clauses;
c_parser_oacc_loop (loc, parser, p_name, mask, &clauses, if_p);
return c_finish_omp_construct (loc, code, block, clauses);
}
}
tree clauses = c_parser_oacc_all_clauses (parser, mask, p_name);
tree block = c_begin_omp_parallel ();
add_stmt (c_parser_omp_structured_block (parser, if_p));
return c_finish_omp_construct (loc, code, block, clauses);
}
/* OpenACC 2.0:
# pragma acc routine oacc-routine-clause[optseq] new-line
function-definition
# pragma acc routine ( name ) oacc-routine-clause[optseq] new-line
*/
#define OACC_ROUTINE_CLAUSE_MASK \
( (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_GANG) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_WORKER) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_VECTOR) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_SEQ) )
/* Parse an OpenACC routine directive. For named directives, we apply
immediately to the named function. For unnamed ones we then parse
a declaration or definition, which must be for a function. */
static void
c_parser_oacc_routine (c_parser *parser, enum pragma_context context)
{
gcc_checking_assert (context == pragma_external);
oacc_routine_data data;
data.error_seen = false;
data.fndecl_seen = false;
data.loc = c_parser_peek_token (parser)->location;
c_parser_consume_pragma (parser);
/* Look for optional '( name )'. */
if (c_parser_next_token_is (parser, CPP_OPEN_PAREN))
{
c_parser_consume_token (parser); /* '(' */
tree decl = NULL_TREE;
c_token *name_token = c_parser_peek_token (parser);
location_t name_loc = name_token->location;
if (name_token->type == CPP_NAME
&& (name_token->id_kind == C_ID_ID
|| name_token->id_kind == C_ID_TYPENAME))
{
decl = lookup_name (name_token->value);
if (!decl)
error_at (name_loc,
"%qE has not been declared", name_token->value);
c_parser_consume_token (parser);
}
else
c_parser_error (parser, "expected function name");
if (!decl
|| !c_parser_require (parser, CPP_CLOSE_PAREN, "expected %<)%>"))
{
c_parser_skip_to_pragma_eol (parser, false);
return;
}
data.clauses
= c_parser_oacc_all_clauses (parser, OACC_ROUTINE_CLAUSE_MASK,
"#pragma acc routine");
/* The clauses are in reverse order; fix that to make later diagnostic
emission easier. */
data.clauses = nreverse (data.clauses);
if (TREE_CODE (decl) != FUNCTION_DECL)
{
error_at (name_loc, "%qD does not refer to a function", decl);
return;
}
c_finish_oacc_routine (&data, decl, false);
}
else /* No optional '( name )'. */
{
data.clauses
= c_parser_oacc_all_clauses (parser, OACC_ROUTINE_CLAUSE_MASK,
"#pragma acc routine");
/* The clauses are in reverse order; fix that to make later diagnostic
emission easier. */
data.clauses = nreverse (data.clauses);
/* Emit a helpful diagnostic if there's another pragma following this
one. Also don't allow a static assertion declaration, as in the
following we'll just parse a *single* "declaration or function
definition", and the static assertion counts an one. */
if (c_parser_next_token_is (parser, CPP_PRAGMA)
|| c_parser_next_token_is_keyword (parser, RID_STATIC_ASSERT))
{
error_at (data.loc,
"%<#pragma acc routine%> not immediately followed by"
" function declaration or definition");
/* ..., and then just keep going. */
return;
}
/* We only have to consider the pragma_external case here. */
if (c_parser_next_token_is (parser, CPP_KEYWORD)
&& c_parser_peek_token (parser)->keyword == RID_EXTENSION)
{
int ext = disable_extension_diagnostics ();
do
c_parser_consume_token (parser);
while (c_parser_next_token_is (parser, CPP_KEYWORD)
&& c_parser_peek_token (parser)->keyword == RID_EXTENSION);
c_parser_declaration_or_fndef (parser, true, true, true, false, true,
NULL, vNULL, false, NULL, &data);
restore_extension_diagnostics (ext);
}
else
c_parser_declaration_or_fndef (parser, true, true, true, false, true,
NULL, vNULL, false, NULL, &data);
}
}
/* Finalize an OpenACC routine pragma, applying it to FNDECL.
IS_DEFN is true if we're applying it to the definition. */
static void
c_finish_oacc_routine (struct oacc_routine_data *data, tree fndecl,
bool is_defn)
{
/* Keep going if we're in error reporting mode. */
if (data->error_seen
|| fndecl == error_mark_node)
return;
if (data->fndecl_seen)
{
error_at (data->loc,
"%<#pragma acc routine%> not immediately followed by"
" a single function declaration or definition");
data->error_seen = true;
return;
}
if (fndecl == NULL_TREE || TREE_CODE (fndecl) != FUNCTION_DECL)
{
error_at (data->loc,
"%<#pragma acc routine%> not immediately followed by"
" function declaration or definition");
data->error_seen = true;
return;
}
int compatible
= oacc_verify_routine_clauses (fndecl, &data->clauses, data->loc,
"#pragma acc routine");
if (compatible < 0)
{
data->error_seen = true;
return;
}
if (compatible > 0)
{
}
else
{
if (TREE_USED (fndecl) || (!is_defn && DECL_SAVED_TREE (fndecl)))
{
error_at (data->loc,
TREE_USED (fndecl)
? G_("%<#pragma acc routine%> must be applied before use")
: G_("%<#pragma acc routine%> must be applied before"
" definition"));
data->error_seen = true;
return;
}
/* Set the routine's level of parallelism. */
tree dims = oacc_build_routine_dims (data->clauses);
oacc_replace_fn_attrib (fndecl, dims);
/* Add an "omp declare target" attribute. */
DECL_ATTRIBUTES (fndecl)
= tree_cons (get_identifier ("omp declare target"),
data->clauses, DECL_ATTRIBUTES (fndecl));
}
/* Remember that we've used this "#pragma acc routine". */
data->fndecl_seen = true;
}
/* OpenACC 2.0:
# pragma acc update oacc-update-clause[optseq] new-line
*/
#define OACC_UPDATE_CLAUSE_MASK \
( (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_ASYNC) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_DEVICE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_HOST) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_IF) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_IF_PRESENT) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_WAIT) )
static void
c_parser_oacc_update (c_parser *parser)
{
location_t loc = c_parser_peek_token (parser)->location;
c_parser_consume_pragma (parser);
tree clauses = c_parser_oacc_all_clauses (parser, OACC_UPDATE_CLAUSE_MASK,
"#pragma acc update");
if (omp_find_clause (clauses, OMP_CLAUSE_MAP) == NULL_TREE)
{
error_at (loc,
"%<#pragma acc update%> must contain at least one "
"%<device%> or %<host%> or %<self%> clause");
return;
}
if (parser->error)
return;
tree stmt = make_node (OACC_UPDATE);
TREE_TYPE (stmt) = void_type_node;
OACC_UPDATE_CLAUSES (stmt) = clauses;
SET_EXPR_LOCATION (stmt, loc);
add_stmt (stmt);
}
/* OpenACC 2.0:
# pragma acc wait [(intseq)] oacc-wait-clause[optseq] new-line
LOC is the location of the #pragma token.
*/
#define OACC_WAIT_CLAUSE_MASK \
( (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_ASYNC) )
static tree
c_parser_oacc_wait (location_t loc, c_parser *parser, char *p_name)
{
tree clauses, list = NULL_TREE, stmt = NULL_TREE;
if (c_parser_peek_token (parser)->type == CPP_OPEN_PAREN)
list = c_parser_oacc_wait_list (parser, loc, list);
strcpy (p_name, " wait");
clauses = c_parser_oacc_all_clauses (parser, OACC_WAIT_CLAUSE_MASK, p_name);
stmt = c_finish_oacc_wait (loc, list, clauses);
add_stmt (stmt);
return stmt;
}
/* OpenMP 2.5:
# pragma omp atomic new-line
expression-stmt
expression-stmt:
x binop= expr | x++ | ++x | x-- | --x
binop:
+, *, -, /, &, ^, |, <<, >>
where x is an lvalue expression with scalar type.
OpenMP 3.1:
# pragma omp atomic new-line
update-stmt
# pragma omp atomic read new-line
read-stmt
# pragma omp atomic write new-line
write-stmt
# pragma omp atomic update new-line
update-stmt
# pragma omp atomic capture new-line
capture-stmt
# pragma omp atomic capture new-line
capture-block
read-stmt:
v = x
write-stmt:
x = expr
update-stmt:
expression-stmt | x = x binop expr
capture-stmt:
v = expression-stmt
capture-block:
{ v = x; update-stmt; } | { update-stmt; v = x; }
OpenMP 4.0:
update-stmt:
expression-stmt | x = x binop expr | x = expr binop x
capture-stmt:
v = update-stmt
capture-block:
{ v = x; update-stmt; } | { update-stmt; v = x; } | { v = x; x = expr; }
where x and v are lvalue expressions with scalar type.
LOC is the location of the #pragma token. */
static void
c_parser_omp_atomic (location_t loc, c_parser *parser)
{
tree lhs = NULL_TREE, rhs = NULL_TREE, v = NULL_TREE;
tree lhs1 = NULL_TREE, rhs1 = NULL_TREE;
tree stmt, orig_lhs, unfolded_lhs = NULL_TREE, unfolded_lhs1 = NULL_TREE;
enum tree_code code = ERROR_MARK, opcode = NOP_EXPR;
enum omp_memory_order memory_order = OMP_MEMORY_ORDER_UNSPECIFIED;
struct c_expr expr;
location_t eloc;
bool structured_block = false;
bool swapped = false;
bool non_lvalue_p;
bool first = true;
tree clauses = NULL_TREE;
while (c_parser_next_token_is_not (parser, CPP_PRAGMA_EOL))
{
if (!first && c_parser_next_token_is (parser, CPP_COMMA))
c_parser_consume_token (parser);
first = false;
if (c_parser_next_token_is (parser, CPP_NAME))
{
const char *p
= IDENTIFIER_POINTER (c_parser_peek_token (parser)->value);
location_t cloc = c_parser_peek_token (parser)->location;
enum tree_code new_code = ERROR_MARK;
enum omp_memory_order new_memory_order
= OMP_MEMORY_ORDER_UNSPECIFIED;
if (!strcmp (p, "read"))
new_code = OMP_ATOMIC_READ;
else if (!strcmp (p, "write"))
new_code = NOP_EXPR;
else if (!strcmp (p, "update"))
new_code = OMP_ATOMIC;
else if (!strcmp (p, "capture"))
new_code = OMP_ATOMIC_CAPTURE_NEW;
else if (!strcmp (p, "seq_cst"))
new_memory_order = OMP_MEMORY_ORDER_SEQ_CST;
else if (!strcmp (p, "acq_rel"))
new_memory_order = OMP_MEMORY_ORDER_ACQ_REL;
else if (!strcmp (p, "release"))
new_memory_order = OMP_MEMORY_ORDER_RELEASE;
else if (!strcmp (p, "acquire"))
new_memory_order = OMP_MEMORY_ORDER_ACQUIRE;
else if (!strcmp (p, "relaxed"))
new_memory_order = OMP_MEMORY_ORDER_RELAXED;
else if (!strcmp (p, "hint"))
{
c_parser_consume_token (parser);
clauses = c_parser_omp_clause_hint (parser, clauses);
continue;
}
else
{
p = NULL;
error_at (cloc, "expected %<read%>, %<write%>, %<update%>, "
"%<capture%>, %<seq_cst%>, %<acq_rel%>, "
"%<release%>, %<relaxed%> or %<hint%> clause");
}
if (p)
{
if (new_code != ERROR_MARK)
{
if (code != ERROR_MARK)
error_at (cloc, "too many atomic clauses");
else
code = new_code;
}
else if (new_memory_order != OMP_MEMORY_ORDER_UNSPECIFIED)
{
if (memory_order != OMP_MEMORY_ORDER_UNSPECIFIED)
error_at (cloc, "too many memory order clauses");
else
memory_order = new_memory_order;
}
c_parser_consume_token (parser);
continue;
}
}
break;
}
c_parser_skip_to_pragma_eol (parser);
if (code == ERROR_MARK)
code = OMP_ATOMIC;
if (memory_order == OMP_MEMORY_ORDER_UNSPECIFIED)
{
omp_requires_mask
= (enum omp_requires) (omp_requires_mask
| OMP_REQUIRES_ATOMIC_DEFAULT_MEM_ORDER_USED);
switch ((enum omp_memory_order)
(omp_requires_mask & OMP_REQUIRES_ATOMIC_DEFAULT_MEM_ORDER))
{
case OMP_MEMORY_ORDER_UNSPECIFIED:
case OMP_MEMORY_ORDER_RELAXED:
memory_order = OMP_MEMORY_ORDER_RELAXED;
break;
case OMP_MEMORY_ORDER_SEQ_CST:
memory_order = OMP_MEMORY_ORDER_SEQ_CST;
break;
case OMP_MEMORY_ORDER_ACQ_REL:
switch (code)
{
case OMP_ATOMIC_READ:
memory_order = OMP_MEMORY_ORDER_ACQUIRE;
break;
case NOP_EXPR: /* atomic write */
case OMP_ATOMIC:
memory_order = OMP_MEMORY_ORDER_RELEASE;
break;
default:
memory_order = OMP_MEMORY_ORDER_ACQ_REL;
break;
}
break;
default:
gcc_unreachable ();
}
}
else
switch (code)
{
case OMP_ATOMIC_READ:
if (memory_order == OMP_MEMORY_ORDER_ACQ_REL
|| memory_order == OMP_MEMORY_ORDER_RELEASE)
{
error_at (loc, "%<#pragma omp atomic read%> incompatible with "
"%<acq_rel%> or %<release%> clauses");
memory_order = OMP_MEMORY_ORDER_SEQ_CST;
}
break;
case NOP_EXPR: /* atomic write */
if (memory_order == OMP_MEMORY_ORDER_ACQ_REL
|| memory_order == OMP_MEMORY_ORDER_ACQUIRE)
{
error_at (loc, "%<#pragma omp atomic write%> incompatible with "
"%<acq_rel%> or %<acquire%> clauses");
memory_order = OMP_MEMORY_ORDER_SEQ_CST;
}
break;
case OMP_ATOMIC:
if (memory_order == OMP_MEMORY_ORDER_ACQ_REL
|| memory_order == OMP_MEMORY_ORDER_ACQUIRE)
{
error_at (loc, "%<#pragma omp atomic update%> incompatible with "
"%<acq_rel%> or %<acquire%> clauses");
memory_order = OMP_MEMORY_ORDER_SEQ_CST;
}
break;
default:
break;
}
switch (code)
{
case OMP_ATOMIC_READ:
case NOP_EXPR: /* atomic write */
v = c_parser_cast_expression (parser, NULL).value;
non_lvalue_p = !lvalue_p (v);
v = c_fully_fold (v, false, NULL, true);
if (v == error_mark_node)
goto saw_error;
if (non_lvalue_p)
v = non_lvalue (v);
loc = c_parser_peek_token (parser)->location;
if (!c_parser_require (parser, CPP_EQ, "expected %<=%>"))
goto saw_error;
if (code == NOP_EXPR)
{
lhs = c_parser_expression (parser).value;
lhs = c_fully_fold (lhs, false, NULL);
if (lhs == error_mark_node)
goto saw_error;
}
else
{
lhs = c_parser_cast_expression (parser, NULL).value;
non_lvalue_p = !lvalue_p (lhs);
lhs = c_fully_fold (lhs, false, NULL, true);
if (lhs == error_mark_node)
goto saw_error;
if (non_lvalue_p)
lhs = non_lvalue (lhs);
}
if (code == NOP_EXPR)
{
/* atomic write is represented by OMP_ATOMIC with NOP_EXPR
opcode. */
code = OMP_ATOMIC;
rhs = lhs;
lhs = v;
v = NULL_TREE;
}
goto done;
case OMP_ATOMIC_CAPTURE_NEW:
if (c_parser_next_token_is (parser, CPP_OPEN_BRACE))
{
c_parser_consume_token (parser);
structured_block = true;
}
else
{
v = c_parser_cast_expression (parser, NULL).value;
non_lvalue_p = !lvalue_p (v);
v = c_fully_fold (v, false, NULL, true);
if (v == error_mark_node)
goto saw_error;
if (non_lvalue_p)
v = non_lvalue (v);
if (!c_parser_require (parser, CPP_EQ, "expected %<=%>"))
goto saw_error;
}
break;
default:
break;
}
/* For structured_block case we don't know yet whether
old or new x should be captured. */
restart:
eloc = c_parser_peek_token (parser)->location;
expr = c_parser_cast_expression (parser, NULL);
lhs = expr.value;
expr = default_function_array_conversion (eloc, expr);
unfolded_lhs = expr.value;
lhs = c_fully_fold (lhs, false, NULL, true);
orig_lhs = lhs;
switch (TREE_CODE (lhs))
{
case ERROR_MARK:
saw_error:
c_parser_skip_to_end_of_block_or_statement (parser);
if (structured_block)
{
if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE))
c_parser_consume_token (parser);
else if (code == OMP_ATOMIC_CAPTURE_NEW)
{
c_parser_skip_to_end_of_block_or_statement (parser);
if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE))
c_parser_consume_token (parser);
}
}
return;
case POSTINCREMENT_EXPR:
if (code == OMP_ATOMIC_CAPTURE_NEW && !structured_block)
code = OMP_ATOMIC_CAPTURE_OLD;
/* FALLTHROUGH */
case PREINCREMENT_EXPR:
lhs = TREE_OPERAND (lhs, 0);
unfolded_lhs = NULL_TREE;
opcode = PLUS_EXPR;
rhs = integer_one_node;
break;
case POSTDECREMENT_EXPR:
if (code == OMP_ATOMIC_CAPTURE_NEW && !structured_block)
code = OMP_ATOMIC_CAPTURE_OLD;
/* FALLTHROUGH */
case PREDECREMENT_EXPR:
lhs = TREE_OPERAND (lhs, 0);
unfolded_lhs = NULL_TREE;
opcode = MINUS_EXPR;
rhs = integer_one_node;
break;
case COMPOUND_EXPR:
if (TREE_CODE (TREE_OPERAND (lhs, 0)) == SAVE_EXPR
&& TREE_CODE (TREE_OPERAND (lhs, 1)) == COMPOUND_EXPR
&& TREE_CODE (TREE_OPERAND (TREE_OPERAND (lhs, 1), 0)) == MODIFY_EXPR
&& TREE_OPERAND (TREE_OPERAND (lhs, 1), 1) == TREE_OPERAND (lhs, 0)
&& TREE_CODE (TREE_TYPE (TREE_OPERAND (TREE_OPERAND
(TREE_OPERAND (lhs, 1), 0), 0)))
== BOOLEAN_TYPE)
/* Undo effects of boolean_increment for post {in,de}crement. */
lhs = TREE_OPERAND (TREE_OPERAND (lhs, 1), 0);
/* FALLTHRU */
case MODIFY_EXPR:
if (TREE_CODE (lhs) == MODIFY_EXPR
&& TREE_CODE (TREE_TYPE (TREE_OPERAND (lhs, 0))) == BOOLEAN_TYPE)
{
/* Undo effects of boolean_increment. */
if (integer_onep (TREE_OPERAND (lhs, 1)))
{
/* This is pre or post increment. */
rhs = TREE_OPERAND (lhs, 1);
lhs = TREE_OPERAND (lhs, 0);
unfolded_lhs = NULL_TREE;
opcode = NOP_EXPR;
if (code == OMP_ATOMIC_CAPTURE_NEW
&& !structured_block
&& TREE_CODE (orig_lhs) == COMPOUND_EXPR)
code = OMP_ATOMIC_CAPTURE_OLD;
break;
}
if (TREE_CODE (TREE_OPERAND (lhs, 1)) == TRUTH_NOT_EXPR
&& TREE_OPERAND (lhs, 0)
== TREE_OPERAND (TREE_OPERAND (lhs, 1), 0))
{
/* This is pre or post decrement. */
rhs = TREE_OPERAND (lhs, 1);
lhs = TREE_OPERAND (lhs, 0);
unfolded_lhs = NULL_TREE;
opcode = NOP_EXPR;
if (code == OMP_ATOMIC_CAPTURE_NEW
&& !structured_block
&& TREE_CODE (orig_lhs) == COMPOUND_EXPR)
code = OMP_ATOMIC_CAPTURE_OLD;
break;
}
}
/* FALLTHRU */
default:
if (!lvalue_p (unfolded_lhs))
lhs = non_lvalue (lhs);
switch (c_parser_peek_token (parser)->type)
{
case CPP_MULT_EQ:
opcode = MULT_EXPR;
break;
case CPP_DIV_EQ:
opcode = TRUNC_DIV_EXPR;
break;
case CPP_PLUS_EQ:
opcode = PLUS_EXPR;
break;
case CPP_MINUS_EQ:
opcode = MINUS_EXPR;
break;
case CPP_LSHIFT_EQ:
opcode = LSHIFT_EXPR;
break;
case CPP_RSHIFT_EQ:
opcode = RSHIFT_EXPR;
break;
case CPP_AND_EQ:
opcode = BIT_AND_EXPR;
break;
case CPP_OR_EQ:
opcode = BIT_IOR_EXPR;
break;
case CPP_XOR_EQ:
opcode = BIT_XOR_EXPR;
break;
case CPP_EQ:
c_parser_consume_token (parser);
eloc = c_parser_peek_token (parser)->location;
expr = c_parser_expr_no_commas (parser, NULL, unfolded_lhs);
rhs1 = expr.value;
switch (TREE_CODE (rhs1))
{
case MULT_EXPR:
case TRUNC_DIV_EXPR:
case RDIV_EXPR:
case PLUS_EXPR:
case MINUS_EXPR:
case LSHIFT_EXPR:
case RSHIFT_EXPR:
case BIT_AND_EXPR:
case BIT_IOR_EXPR:
case BIT_XOR_EXPR:
if (c_tree_equal (TREE_OPERAND (rhs1, 0), unfolded_lhs))
{
opcode = TREE_CODE (rhs1);
rhs = c_fully_fold (TREE_OPERAND (rhs1, 1), false, NULL,
true);
rhs1 = c_fully_fold (TREE_OPERAND (rhs1, 0), false, NULL,
true);
goto stmt_done;
}
if (c_tree_equal (TREE_OPERAND (rhs1, 1), unfolded_lhs))
{
opcode = TREE_CODE (rhs1);
rhs = c_fully_fold (TREE_OPERAND (rhs1, 0), false, NULL,
true);
rhs1 = c_fully_fold (TREE_OPERAND (rhs1, 1), false, NULL,
true);
swapped = !commutative_tree_code (opcode);
goto stmt_done;
}
break;
case ERROR_MARK:
goto saw_error;
default:
break;
}
if (c_parser_peek_token (parser)->type == CPP_SEMICOLON)
{
if (structured_block && code == OMP_ATOMIC_CAPTURE_NEW)
{
code = OMP_ATOMIC_CAPTURE_OLD;
v = lhs;
lhs = NULL_TREE;
expr = default_function_array_read_conversion (eloc, expr);
unfolded_lhs1 = expr.value;
lhs1 = c_fully_fold (unfolded_lhs1, false, NULL, true);
rhs1 = NULL_TREE;
c_parser_consume_token (parser);
goto restart;
}
if (structured_block)
{
opcode = NOP_EXPR;
expr = default_function_array_read_conversion (eloc, expr);
rhs = c_fully_fold (expr.value, false, NULL, true);
rhs1 = NULL_TREE;
goto stmt_done;
}
}
c_parser_error (parser, "invalid form of %<#pragma omp atomic%>");
goto saw_error;
default:
c_parser_error (parser,
"invalid operator for %<#pragma omp atomic%>");
goto saw_error;
}
/* Arrange to pass the location of the assignment operator to
c_finish_omp_atomic. */
loc = c_parser_peek_token (parser)->location;
c_parser_consume_token (parser);
eloc = c_parser_peek_token (parser)->location;
expr = c_parser_expression (parser);
expr = default_function_array_read_conversion (eloc, expr);
rhs = expr.value;
rhs = c_fully_fold (rhs, false, NULL, true);
break;
}
stmt_done:
if (structured_block && code == OMP_ATOMIC_CAPTURE_NEW)
{
if (!c_parser_require (parser, CPP_SEMICOLON, "expected %<;%>"))
goto saw_error;
v = c_parser_cast_expression (parser, NULL).value;
non_lvalue_p = !lvalue_p (v);
v = c_fully_fold (v, false, NULL, true);
if (v == error_mark_node)
goto saw_error;
if (non_lvalue_p)
v = non_lvalue (v);
if (!c_parser_require (parser, CPP_EQ, "expected %<=%>"))
goto saw_error;
eloc = c_parser_peek_token (parser)->location;
expr = c_parser_cast_expression (parser, NULL);
lhs1 = expr.value;
expr = default_function_array_read_conversion (eloc, expr);
unfolded_lhs1 = expr.value;
lhs1 = c_fully_fold (lhs1, false, NULL, true);
if (lhs1 == error_mark_node)
goto saw_error;
if (!lvalue_p (unfolded_lhs1))
lhs1 = non_lvalue (lhs1);
}
if (structured_block)
{
c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>");
c_parser_require (parser, CPP_CLOSE_BRACE, "expected %<}%>");
}
done:
if (unfolded_lhs && unfolded_lhs1
&& !c_tree_equal (unfolded_lhs, unfolded_lhs1))
{
error ("%<#pragma omp atomic capture%> uses two different "
"expressions for memory");
stmt = error_mark_node;
}
else
stmt = c_finish_omp_atomic (loc, code, opcode, lhs, rhs, v, lhs1, rhs1,
swapped, memory_order);
if (stmt != error_mark_node)
add_stmt (stmt);
if (!structured_block)
c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>");
}
/* OpenMP 2.5:
# pragma omp barrier new-line
*/
static void
c_parser_omp_barrier (c_parser *parser)
{
location_t loc = c_parser_peek_token (parser)->location;
c_parser_consume_pragma (parser);
c_parser_skip_to_pragma_eol (parser);
c_finish_omp_barrier (loc);
}
/* OpenMP 2.5:
# pragma omp critical [(name)] new-line
structured-block
OpenMP 4.5:
# pragma omp critical [(name) [hint(expression)]] new-line
LOC is the location of the #pragma itself. */
#define OMP_CRITICAL_CLAUSE_MASK \
( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_HINT) )
static tree
c_parser_omp_critical (location_t loc, c_parser *parser, bool *if_p)
{
tree stmt, name = NULL_TREE, clauses = NULL_TREE;
if (c_parser_next_token_is (parser, CPP_OPEN_PAREN))
{
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_NAME))
{
name = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
c_parser_require (parser, CPP_CLOSE_PAREN, "expected %<)%>");
}
else
c_parser_error (parser, "expected identifier");
if (c_parser_next_token_is (parser, CPP_COMMA)
&& c_parser_peek_2nd_token (parser)->type == CPP_NAME)
c_parser_consume_token (parser);
}
clauses = c_parser_omp_all_clauses (parser, OMP_CRITICAL_CLAUSE_MASK,
"#pragma omp critical");
stmt = c_parser_omp_structured_block (parser, if_p);
return c_finish_omp_critical (loc, stmt, name, clauses);
}
/* OpenMP 5.0:
# pragma omp depobj ( depobj ) depobj-clause new-line
depobj-clause:
depend (dependence-type : locator)
destroy
update (dependence-type)
dependence-type:
in
out
inout
mutexinout */
static void
c_parser_omp_depobj (c_parser *parser)
{
location_t loc = c_parser_peek_token (parser)->location;
c_parser_consume_pragma (parser);
matching_parens parens;
if (!parens.require_open (parser))
{
c_parser_skip_to_pragma_eol (parser);
return;
}
tree depobj = c_parser_expr_no_commas (parser, NULL).value;
if (depobj != error_mark_node)
{
if (!lvalue_p (depobj))
{
error_at (EXPR_LOC_OR_LOC (depobj, loc),
"%<depobj%> expression is not lvalue expression");
depobj = error_mark_node;
}
else
{
tree addr = build_unary_op (EXPR_LOC_OR_LOC (depobj, loc), ADDR_EXPR,
depobj, false);
if (addr == error_mark_node)
depobj = error_mark_node;
else
depobj = build_indirect_ref (EXPR_LOC_OR_LOC (depobj, loc),
addr, RO_UNARY_STAR);
}
}
parens.skip_until_found_close (parser);
tree clause = NULL_TREE;
enum omp_clause_depend_kind kind = OMP_CLAUSE_DEPEND_SOURCE;
location_t c_loc = c_parser_peek_token (parser)->location;
if (c_parser_next_token_is (parser, CPP_NAME))
{
const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value);
c_parser_consume_token (parser);
if (!strcmp ("depend", p))
{
clause = c_parser_omp_clause_depend (parser, NULL_TREE);
clause = c_finish_omp_clauses (clause, C_ORT_OMP);
if (!clause)
clause = error_mark_node;
}
else if (!strcmp ("destroy", p))
kind = OMP_CLAUSE_DEPEND_LAST;
else if (!strcmp ("update", p))
{
matching_parens c_parens;
if (c_parens.require_open (parser))
{
location_t c2_loc = c_parser_peek_token (parser)->location;
if (c_parser_next_token_is (parser, CPP_NAME))
{
const char *p2
= IDENTIFIER_POINTER (c_parser_peek_token (parser)->value);
c_parser_consume_token (parser);
if (!strcmp ("in", p2))
kind = OMP_CLAUSE_DEPEND_IN;
else if (!strcmp ("out", p2))
kind = OMP_CLAUSE_DEPEND_OUT;
else if (!strcmp ("inout", p2))
kind = OMP_CLAUSE_DEPEND_INOUT;
else if (!strcmp ("mutexinoutset", p2))
kind = OMP_CLAUSE_DEPEND_MUTEXINOUTSET;
}
if (kind == OMP_CLAUSE_DEPEND_SOURCE)
{
clause = error_mark_node;
error_at (c2_loc, "expected %<in%>, %<out%>, %<inout%> or "
"%<mutexinoutset%>");
}
c_parens.skip_until_found_close (parser);
}
else
clause = error_mark_node;
}
}
if (!clause && kind == OMP_CLAUSE_DEPEND_SOURCE)
{
clause = error_mark_node;
error_at (c_loc, "expected %<depend%>, %<destroy%> or %<update%> clause");
}
c_parser_skip_to_pragma_eol (parser);
c_finish_omp_depobj (loc, depobj, kind, clause);
}
/* OpenMP 2.5:
# pragma omp flush flush-vars[opt] new-line
flush-vars:
( variable-list )
OpenMP 5.0:
# pragma omp flush memory-order-clause new-line */
static void
c_parser_omp_flush (c_parser *parser)
{
location_t loc = c_parser_peek_token (parser)->location;
c_parser_consume_pragma (parser);
enum memmodel mo = MEMMODEL_LAST;
if (c_parser_next_token_is (parser, CPP_NAME))
{
const char *p
= IDENTIFIER_POINTER (c_parser_peek_token (parser)->value);
if (!strcmp (p, "acq_rel"))
mo = MEMMODEL_ACQ_REL;
else if (!strcmp (p, "release"))
mo = MEMMODEL_RELEASE;
else if (!strcmp (p, "acquire"))
mo = MEMMODEL_ACQUIRE;
else
error_at (c_parser_peek_token (parser)->location,
"expected %<acq_rel%>, %<release%> or %<acquire%>");
c_parser_consume_token (parser);
}
if (c_parser_next_token_is (parser, CPP_OPEN_PAREN))
{
if (mo != MEMMODEL_LAST)
error_at (c_parser_peek_token (parser)->location,
"%<flush%> list specified together with memory order "
"clause");
c_parser_omp_var_list_parens (parser, OMP_CLAUSE_ERROR, NULL);
}
else if (c_parser_next_token_is_not (parser, CPP_PRAGMA_EOL))
c_parser_error (parser, "expected %<(%> or end of line");
c_parser_skip_to_pragma_eol (parser);
c_finish_omp_flush (loc, mo);
}
/* OpenMP 5.0:
scan-loop-body:
{ structured-block scan-directive structured-block } */
static void
c_parser_omp_scan_loop_body (c_parser *parser, bool open_brace_parsed)
{
tree substmt;
location_t loc;
tree clauses = NULL_TREE;
loc = c_parser_peek_token (parser)->location;
if (!open_brace_parsed
&& !c_parser_require (parser, CPP_OPEN_BRACE, "expected %<{%>"))
{
/* Avoid skipping until the end of the block. */
parser->error = false;
return;
}
substmt = c_parser_omp_structured_block (parser, NULL);
substmt = build2 (OMP_SCAN, void_type_node, substmt, NULL_TREE);
SET_EXPR_LOCATION (substmt, loc);
add_stmt (substmt);
loc = c_parser_peek_token (parser)->location;
if (c_parser_peek_token (parser)->pragma_kind == PRAGMA_OMP_SCAN)
{
enum omp_clause_code clause = OMP_CLAUSE_ERROR;
c_parser_consume_pragma (parser);
if (c_parser_next_token_is (parser, CPP_NAME))
{
const char *p
= IDENTIFIER_POINTER (c_parser_peek_token (parser)->value);
if (strcmp (p, "inclusive") == 0)
clause = OMP_CLAUSE_INCLUSIVE;
else if (strcmp (p, "exclusive") == 0)
clause = OMP_CLAUSE_EXCLUSIVE;
}
if (clause != OMP_CLAUSE_ERROR)
{
c_parser_consume_token (parser);
clauses = c_parser_omp_var_list_parens (parser, clause, NULL_TREE);
}
else
c_parser_error (parser, "expected %<inclusive%> or "
"%<exclusive%> clause");
c_parser_skip_to_pragma_eol (parser);
}
else
error ("expected %<#pragma omp scan%>");
clauses = c_finish_omp_clauses (clauses, C_ORT_OMP);
substmt = c_parser_omp_structured_block (parser, NULL);
substmt = build2 (OMP_SCAN, void_type_node, substmt, clauses);
SET_EXPR_LOCATION (substmt, loc);
add_stmt (substmt);
c_parser_skip_until_found (parser, CPP_CLOSE_BRACE,
"expected %<}%>");
}
/* Parse the restricted form of loop statements allowed by OpenACC and OpenMP.
The real trick here is to determine the loop control variable early
so that we can push a new decl if necessary to make it private.
LOC is the location of the "acc" or "omp" in "#pragma acc" or "#pragma omp",
respectively. */
static tree
c_parser_omp_for_loop (location_t loc, c_parser *parser, enum tree_code code,
tree clauses, tree *cclauses, bool *if_p)
{
tree decl, cond, incr, body, init, stmt, cl;
unsigned char save_in_statement;
tree declv, condv, incrv, initv, ret = NULL_TREE;
tree pre_body = NULL_TREE, this_pre_body;
tree ordered_cl = NULL_TREE;
bool fail = false, open_brace_parsed = false;
int i, collapse = 1, ordered = 0, count, nbraces = 0;
location_t for_loc;
bool tiling = false;
bool inscan = false;
vec<tree, va_gc> *for_block = make_tree_vector ();
for (cl = clauses; cl; cl = OMP_CLAUSE_CHAIN (cl))
if (OMP_CLAUSE_CODE (cl) == OMP_CLAUSE_COLLAPSE)
collapse = tree_to_shwi (OMP_CLAUSE_COLLAPSE_EXPR (cl));
else if (OMP_CLAUSE_CODE (cl) == OMP_CLAUSE_TILE)
{
tiling = true;
collapse = list_length (OMP_CLAUSE_TILE_LIST (cl));
}
else if (OMP_CLAUSE_CODE (cl) == OMP_CLAUSE_ORDERED
&& OMP_CLAUSE_ORDERED_EXPR (cl))
{
ordered_cl = cl;
ordered = tree_to_shwi (OMP_CLAUSE_ORDERED_EXPR (cl));
}
else if (OMP_CLAUSE_CODE (cl) == OMP_CLAUSE_REDUCTION
&& OMP_CLAUSE_REDUCTION_INSCAN (cl)
&& (code == OMP_SIMD || code == OMP_FOR))
inscan = true;
if (ordered && ordered < collapse)
{
error_at (OMP_CLAUSE_LOCATION (ordered_cl),
"%<ordered%> clause parameter is less than %<collapse%>");
OMP_CLAUSE_ORDERED_EXPR (ordered_cl)
= build_int_cst (NULL_TREE, collapse);
ordered = collapse;
}
if (ordered)
{
for (tree *pc = &clauses; *pc; )
if (OMP_CLAUSE_CODE (*pc) == OMP_CLAUSE_LINEAR)
{
error_at (OMP_CLAUSE_LOCATION (*pc),
"%<linear%> clause may not be specified together "
"with %<ordered%> clause with a parameter");
*pc = OMP_CLAUSE_CHAIN (*pc);
}
else
pc = &OMP_CLAUSE_CHAIN (*pc);
}
gcc_assert (tiling || (collapse >= 1 && ordered >= 0));
count = ordered ? ordered : collapse;
declv = make_tree_vec (count);
initv = make_tree_vec (count);
condv = make_tree_vec (count);
incrv = make_tree_vec (count);
if (!c_parser_next_token_is_keyword (parser, RID_FOR))
{
c_parser_error (parser, "for statement expected");
return NULL;
}
for_loc = c_parser_peek_token (parser)->location;
c_parser_consume_token (parser);
/* Forbid break/continue in the loop initializer, condition, and
increment expressions. */
save_in_statement = in_statement;
in_statement = IN_OMP_BLOCK;
for (i = 0; i < count; i++)
{
int bracecount = 0;
matching_parens parens;
if (!parens.require_open (parser))
goto pop_scopes;
/* Parse the initialization declaration or expression. */
if (c_parser_next_tokens_start_declaration (parser))
{
if (i > 0)
vec_safe_push (for_block, c_begin_compound_stmt (true));
this_pre_body = push_stmt_list ();
c_in_omp_for = true;
c_parser_declaration_or_fndef (parser, true, true, true, true, true,
NULL, vNULL);
c_in_omp_for = false;
if (this_pre_body)
{
this_pre_body = pop_stmt_list (this_pre_body);
if (pre_body)
{
tree t = pre_body;
pre_body = push_stmt_list ();
add_stmt (t);
add_stmt (this_pre_body);
pre_body = pop_stmt_list (pre_body);
}
else
pre_body = this_pre_body;
}
decl = check_for_loop_decls (for_loc, flag_isoc99);
if (decl == NULL)
goto error_init;
if (DECL_INITIAL (decl) == error_mark_node)
decl = error_mark_node;
init = decl;
}
else if (c_parser_next_token_is (parser, CPP_NAME)
&& c_parser_peek_2nd_token (parser)->type == CPP_EQ)
{
struct c_expr decl_exp;
struct c_expr init_exp;
location_t init_loc;
decl_exp = c_parser_postfix_expression (parser);
decl = decl_exp.value;
c_parser_require (parser, CPP_EQ, "expected %<=%>");
init_loc = c_parser_peek_token (parser)->location;
init_exp = c_parser_expr_no_commas (parser, NULL);
init_exp = default_function_array_read_conversion (init_loc,
init_exp);
c_in_omp_for = true;
init = build_modify_expr (init_loc, decl, decl_exp.original_type,
NOP_EXPR, init_loc, init_exp.value,
init_exp.original_type);
c_in_omp_for = false;
init = c_process_expr_stmt (init_loc, init);
c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>");
}
else
{
error_init:
c_parser_error (parser,
"expected iteration declaration or initialization");
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN,
"expected %<)%>");
fail = true;
goto parse_next;
}
/* Parse the loop condition. */
cond = NULL_TREE;
if (c_parser_next_token_is_not (parser, CPP_SEMICOLON))
{
location_t cond_loc = c_parser_peek_token (parser)->location;
c_in_omp_for = true;
struct c_expr cond_expr
= c_parser_binary_expression (parser, NULL, NULL_TREE);
c_in_omp_for = false;
cond = cond_expr.value;
cond = c_objc_common_truthvalue_conversion (cond_loc, cond);
switch (cond_expr.original_code)
{
case GT_EXPR:
case GE_EXPR:
case LT_EXPR:
case LE_EXPR:
break;
case NE_EXPR:
if (code != OACC_LOOP)
break;
/* FALLTHRU. */
default:
/* Can't be cond = error_mark_node, because we want to preserve
the location until c_finish_omp_for. */
cond = build1 (NOP_EXPR, boolean_type_node, error_mark_node);
break;
}
protected_set_expr_location (cond, cond_loc);
}
c_parser_skip_until_found (parser, CPP_SEMICOLON, "expected %<;%>");
/* Parse the increment expression. */
incr = NULL_TREE;
if (c_parser_next_token_is_not (parser, CPP_CLOSE_PAREN))
{
location_t incr_loc = c_parser_peek_token (parser)->location;
incr = c_process_expr_stmt (incr_loc,
c_parser_expression (parser).value);
}
parens.skip_until_found_close (parser);
if (decl == NULL || decl == error_mark_node || init == error_mark_node)
fail = true;
else
{
TREE_VEC_ELT (declv, i) = decl;
TREE_VEC_ELT (initv, i) = init;
TREE_VEC_ELT (condv, i) = cond;
TREE_VEC_ELT (incrv, i) = incr;
}
parse_next:
if (i == count - 1)
break;
/* FIXME: OpenMP 3.0 draft isn't very clear on what exactly is allowed
in between the collapsed for loops to be still considered perfectly
nested. Hopefully the final version clarifies this.
For now handle (multiple) {'s and empty statements. */
do
{
if (c_parser_next_token_is_keyword (parser, RID_FOR))
{
c_parser_consume_token (parser);
break;
}
else if (c_parser_next_token_is (parser, CPP_OPEN_BRACE))
{
c_parser_consume_token (parser);
bracecount++;
}
else if (bracecount
&& c_parser_next_token_is (parser, CPP_SEMICOLON))
c_parser_consume_token (parser);
else
{
c_parser_error (parser, "not enough perfectly nested loops");
if (bracecount)
{
open_brace_parsed = true;
bracecount--;
}
fail = true;
count = 0;
break;
}
}
while (1);
nbraces += bracecount;
}
if (nbraces)
if_p = NULL;
in_statement = IN_OMP_FOR;
body = push_stmt_list ();
if (inscan)
c_parser_omp_scan_loop_body (parser, open_brace_parsed);
else if (open_brace_parsed)
{
location_t here = c_parser_peek_token (parser)->location;
stmt = c_begin_compound_stmt (true);
c_parser_compound_statement_nostart (parser);
add_stmt (c_end_compound_stmt (here, stmt, true));
}
else
add_stmt (c_parser_c99_block_statement (parser, if_p));
body = pop_stmt_list (body);
in_statement = save_in_statement;
while (nbraces)
{
if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE))
{
c_parser_consume_token (parser);
nbraces--;
}
else if (c_parser_next_token_is (parser, CPP_SEMICOLON))
c_parser_consume_token (parser);
else
{
c_parser_error (parser, "collapsed loops not perfectly nested");
while (nbraces)
{
location_t here = c_parser_peek_token (parser)->location;
stmt = c_begin_compound_stmt (true);
add_stmt (body);
c_parser_compound_statement_nostart (parser);
body = c_end_compound_stmt (here, stmt, true);
nbraces--;
}
goto pop_scopes;
}
}
/* Only bother calling c_finish_omp_for if we haven't already generated
an error from the initialization parsing. */
if (!fail)
{
c_in_omp_for = true;
stmt = c_finish_omp_for (loc, code, declv, NULL, initv, condv,
incrv, body, pre_body, true);
c_in_omp_for = false;
/* Check for iterators appearing in lb, b or incr expressions. */
if (stmt && !c_omp_check_loop_iv (stmt, declv, NULL))
stmt = NULL_TREE;
if (stmt)
{
add_stmt (stmt);
for (i = 0; i < TREE_VEC_LENGTH (OMP_FOR_INIT (stmt)); i++)
{
tree init = TREE_VEC_ELT (OMP_FOR_INIT (stmt), i);
gcc_assert (TREE_CODE (init) == MODIFY_EXPR);
tree decl = TREE_OPERAND (init, 0);
tree cond = TREE_VEC_ELT (OMP_FOR_COND (stmt), i);
gcc_assert (COMPARISON_CLASS_P (cond));
gcc_assert (TREE_OPERAND (cond, 0) == decl);
tree op0 = TREE_OPERAND (init, 1);
if (!OMP_FOR_NON_RECTANGULAR (stmt)
|| TREE_CODE (op0) != TREE_VEC)
TREE_OPERAND (init, 1) = c_fully_fold (op0, false, NULL);
else
{
TREE_VEC_ELT (op0, 1)
= c_fully_fold (TREE_VEC_ELT (op0, 1), false, NULL);
TREE_VEC_ELT (op0, 2)
= c_fully_fold (TREE_VEC_ELT (op0, 2), false, NULL);
}
tree op1 = TREE_OPERAND (cond, 1);
if (!OMP_FOR_NON_RECTANGULAR (stmt)
|| TREE_CODE (op1) != TREE_VEC)
TREE_OPERAND (cond, 1) = c_fully_fold (op1, false, NULL);
else
{
TREE_VEC_ELT (op1, 1)
= c_fully_fold (TREE_VEC_ELT (op1, 1), false, NULL);
TREE_VEC_ELT (op1, 2)
= c_fully_fold (TREE_VEC_ELT (op1, 2), false, NULL);
}
}
if (cclauses != NULL
&& cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL] != NULL)
{
tree *c;
for (c = &cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL]; *c ; )
if (OMP_CLAUSE_CODE (*c) != OMP_CLAUSE_FIRSTPRIVATE
&& OMP_CLAUSE_CODE (*c) != OMP_CLAUSE_LASTPRIVATE)
c = &OMP_CLAUSE_CHAIN (*c);
else
{
for (i = 0; i < count; i++)
if (TREE_VEC_ELT (declv, i) == OMP_CLAUSE_DECL (*c))
break;
if (i == count)
c = &OMP_CLAUSE_CHAIN (*c);
else if (OMP_CLAUSE_CODE (*c) == OMP_CLAUSE_FIRSTPRIVATE)
{
error_at (loc,
"iteration variable %qD should not be firstprivate",
OMP_CLAUSE_DECL (*c));
*c = OMP_CLAUSE_CHAIN (*c);
}
else
{
/* Move lastprivate (decl) clause to OMP_FOR_CLAUSES. */
tree l = *c;
*c = OMP_CLAUSE_CHAIN (*c);
if (code == OMP_SIMD)
{
OMP_CLAUSE_CHAIN (l)
= cclauses[C_OMP_CLAUSE_SPLIT_FOR];
cclauses[C_OMP_CLAUSE_SPLIT_FOR] = l;
}
else
{
OMP_CLAUSE_CHAIN (l) = clauses;
clauses = l;
}
}
}
}
OMP_FOR_CLAUSES (stmt) = clauses;
}
ret = stmt;
}
pop_scopes:
while (!for_block->is_empty ())
{
/* FIXME diagnostics: LOC below should be the actual location of
this particular for block. We need to build a list of
locations to go along with FOR_BLOCK. */
stmt = c_end_compound_stmt (loc, for_block->pop (), true);
add_stmt (stmt);
}
release_tree_vector (for_block);
return ret;
}
/* Helper function for OpenMP parsing, split clauses and call
finish_omp_clauses on each of the set of clauses afterwards. */
static void
omp_split_clauses (location_t loc, enum tree_code code,
omp_clause_mask mask, tree clauses, tree *cclauses)
{
int i;
c_omp_split_clauses (loc, code, mask, clauses, cclauses);
for (i = 0; i < C_OMP_CLAUSE_SPLIT_COUNT; i++)
if (cclauses[i])
cclauses[i] = c_finish_omp_clauses (cclauses[i], C_ORT_OMP);
}
/* OpenMP 5.0:
#pragma omp loop loop-clause[optseq] new-line
for-loop
LOC is the location of the #pragma token.
*/
#define OMP_LOOP_CLAUSE_MASK \
( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_LASTPRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_REDUCTION) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_COLLAPSE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_BIND) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_ORDER))
static tree
c_parser_omp_loop (location_t loc, c_parser *parser,
char *p_name, omp_clause_mask mask, tree *cclauses,
bool *if_p)
{
tree block, clauses, ret;
strcat (p_name, " loop");
mask |= OMP_LOOP_CLAUSE_MASK;
clauses = c_parser_omp_all_clauses (parser, mask, p_name, cclauses == NULL);
if (cclauses)
{
omp_split_clauses (loc, OMP_LOOP, mask, clauses, cclauses);
clauses = cclauses[C_OMP_CLAUSE_SPLIT_LOOP];
}
block = c_begin_compound_stmt (true);
ret = c_parser_omp_for_loop (loc, parser, OMP_LOOP, clauses, cclauses, if_p);
block = c_end_compound_stmt (loc, block, true);
add_stmt (block);
return ret;
}
/* OpenMP 4.0:
#pragma omp simd simd-clause[optseq] new-line
for-loop
LOC is the location of the #pragma token.
*/
#define OMP_SIMD_CLAUSE_MASK \
( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SAFELEN) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SIMDLEN) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_LINEAR) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_ALIGNED) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_LASTPRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_REDUCTION) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_COLLAPSE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_IF) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NONTEMPORAL) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_ORDER))
static tree
c_parser_omp_simd (location_t loc, c_parser *parser,
char *p_name, omp_clause_mask mask, tree *cclauses,
bool *if_p)
{
tree block, clauses, ret;
strcat (p_name, " simd");
mask |= OMP_SIMD_CLAUSE_MASK;
clauses = c_parser_omp_all_clauses (parser, mask, p_name, cclauses == NULL);
if (cclauses)
{
omp_split_clauses (loc, OMP_SIMD, mask, clauses, cclauses);
clauses = cclauses[C_OMP_CLAUSE_SPLIT_SIMD];
tree c = omp_find_clause (cclauses[C_OMP_CLAUSE_SPLIT_FOR],
OMP_CLAUSE_ORDERED);
if (c && OMP_CLAUSE_ORDERED_EXPR (c))
{
error_at (OMP_CLAUSE_LOCATION (c),
"%<ordered%> clause with parameter may not be specified "
"on %qs construct", p_name);
OMP_CLAUSE_ORDERED_EXPR (c) = NULL_TREE;
}
}
block = c_begin_compound_stmt (true);
ret = c_parser_omp_for_loop (loc, parser, OMP_SIMD, clauses, cclauses, if_p);
block = c_end_compound_stmt (loc, block, true);
add_stmt (block);
return ret;
}
/* OpenMP 2.5:
#pragma omp for for-clause[optseq] new-line
for-loop
OpenMP 4.0:
#pragma omp for simd for-simd-clause[optseq] new-line
for-loop
LOC is the location of the #pragma token.
*/
#define OMP_FOR_CLAUSE_MASK \
( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_LASTPRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_LINEAR) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_REDUCTION) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_ORDERED) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_COLLAPSE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOWAIT) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_ORDER))
static tree
c_parser_omp_for (location_t loc, c_parser *parser,
char *p_name, omp_clause_mask mask, tree *cclauses,
bool *if_p)
{
tree block, clauses, ret;
strcat (p_name, " for");
mask |= OMP_FOR_CLAUSE_MASK;
/* parallel for{, simd} disallows nowait clause, but for
target {teams distribute ,}parallel for{, simd} it should be accepted. */
if (cclauses && (mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP)) == 0)
mask &= ~(OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOWAIT);
/* Composite distribute parallel for{, simd} disallows ordered clause. */
if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) != 0)
mask &= ~(OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_ORDERED);
if (c_parser_next_token_is (parser, CPP_NAME))
{
const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value);
if (strcmp (p, "simd") == 0)
{
tree cclauses_buf[C_OMP_CLAUSE_SPLIT_COUNT];
if (cclauses == NULL)
cclauses = cclauses_buf;
c_parser_consume_token (parser);
if (!flag_openmp) /* flag_openmp_simd */
return c_parser_omp_simd (loc, parser, p_name, mask, cclauses,
if_p);
block = c_begin_compound_stmt (true);
ret = c_parser_omp_simd (loc, parser, p_name, mask, cclauses, if_p);
block = c_end_compound_stmt (loc, block, true);
if (ret == NULL_TREE)
return ret;
ret = make_node (OMP_FOR);
TREE_TYPE (ret) = void_type_node;
OMP_FOR_BODY (ret) = block;
OMP_FOR_CLAUSES (ret) = cclauses[C_OMP_CLAUSE_SPLIT_FOR];
SET_EXPR_LOCATION (ret, loc);
add_stmt (ret);
return ret;
}
}
if (!flag_openmp) /* flag_openmp_simd */
{
c_parser_skip_to_pragma_eol (parser, false);
return NULL_TREE;
}
/* Composite distribute parallel for disallows linear clause. */
if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) != 0)
mask &= ~(OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_LINEAR);
clauses = c_parser_omp_all_clauses (parser, mask, p_name, cclauses == NULL);
if (cclauses)
{
omp_split_clauses (loc, OMP_FOR, mask, clauses, cclauses);
clauses = cclauses[C_OMP_CLAUSE_SPLIT_FOR];
}
block = c_begin_compound_stmt (true);
ret = c_parser_omp_for_loop (loc, parser, OMP_FOR, clauses, cclauses, if_p);
block = c_end_compound_stmt (loc, block, true);
add_stmt (block);
return ret;
}
static tree c_parser_omp_taskloop (location_t, c_parser *, char *,
omp_clause_mask, tree *, bool *);
/* OpenMP 2.5:
# pragma omp master new-line
structured-block
LOC is the location of the #pragma token.
*/
static tree
c_parser_omp_master (location_t loc, c_parser *parser,
char *p_name, omp_clause_mask mask, tree *cclauses,
bool *if_p)
{
tree block, clauses, ret;
strcat (p_name, " master");
if (c_parser_next_token_is (parser, CPP_NAME))
{
const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value);
if (strcmp (p, "taskloop") == 0)
{
tree cclauses_buf[C_OMP_CLAUSE_SPLIT_COUNT];
if (cclauses == NULL)
cclauses = cclauses_buf;
c_parser_consume_token (parser);
if (!flag_openmp) /* flag_openmp_simd */
return c_parser_omp_taskloop (loc, parser, p_name, mask, cclauses,
if_p);
block = c_begin_compound_stmt (true);
ret = c_parser_omp_taskloop (loc, parser, p_name, mask, cclauses,
if_p);
block = c_end_compound_stmt (loc, block, true);
if (ret == NULL_TREE)
return ret;
ret = c_finish_omp_master (loc, block);
return ret;
}
}
if (!flag_openmp) /* flag_openmp_simd */
{
c_parser_skip_to_pragma_eol (parser, false);
return NULL_TREE;
}
if (cclauses)
{
clauses = c_parser_omp_all_clauses (parser, mask, p_name, false);
omp_split_clauses (loc, OMP_MASTER, mask, clauses, cclauses);
}
else
c_parser_skip_to_pragma_eol (parser);
return c_finish_omp_master (loc, c_parser_omp_structured_block (parser,
if_p));
}
/* OpenMP 2.5:
# pragma omp ordered new-line
structured-block
OpenMP 4.5:
# pragma omp ordered ordered-clauses new-line
structured-block
# pragma omp ordered depend-clauses new-line */
#define OMP_ORDERED_CLAUSE_MASK \
( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_THREADS) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SIMD))
#define OMP_ORDERED_DEPEND_CLAUSE_MASK \
(OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DEPEND)
static bool
c_parser_omp_ordered (c_parser *parser, enum pragma_context context,
bool *if_p)
{
location_t loc = c_parser_peek_token (parser)->location;
c_parser_consume_pragma (parser);
if (context != pragma_stmt && context != pragma_compound)
{
c_parser_error (parser, "expected declaration specifiers");
c_parser_skip_to_pragma_eol (parser, false);
return false;
}
if (c_parser_next_token_is (parser, CPP_NAME))
{
const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value);
if (!strcmp ("depend", p))
{
if (!flag_openmp) /* flag_openmp_simd */
{
c_parser_skip_to_pragma_eol (parser, false);
return false;
}
if (context == pragma_stmt)
{
error_at (loc,
"%<#pragma omp ordered%> with %<depend%> clause may "
"only be used in compound statements");
c_parser_skip_to_pragma_eol (parser, false);
return false;
}
tree clauses
= c_parser_omp_all_clauses (parser,
OMP_ORDERED_DEPEND_CLAUSE_MASK,
"#pragma omp ordered");
c_finish_omp_ordered (loc, clauses, NULL_TREE);
return false;
}
}
tree clauses = c_parser_omp_all_clauses (parser, OMP_ORDERED_CLAUSE_MASK,
"#pragma omp ordered");
if (!flag_openmp /* flag_openmp_simd */
&& omp_find_clause (clauses, OMP_CLAUSE_SIMD) == NULL_TREE)
return false;
c_finish_omp_ordered (loc, clauses,
c_parser_omp_structured_block (parser, if_p));
return true;
}
/* OpenMP 2.5:
section-scope:
{ section-sequence }
section-sequence:
section-directive[opt] structured-block
section-sequence section-directive structured-block
SECTIONS_LOC is the location of the #pragma omp sections. */
static tree
c_parser_omp_sections_scope (location_t sections_loc, c_parser *parser)
{
tree stmt, substmt;
bool error_suppress = false;
location_t loc;
loc = c_parser_peek_token (parser)->location;
if (!c_parser_require (parser, CPP_OPEN_BRACE, "expected %<{%>"))
{
/* Avoid skipping until the end of the block. */
parser->error = false;
return NULL_TREE;
}
stmt = push_stmt_list ();
if (c_parser_peek_token (parser)->pragma_kind != PRAGMA_OMP_SECTION)
{
substmt = c_parser_omp_structured_block (parser, NULL);
substmt = build1 (OMP_SECTION, void_type_node, substmt);
SET_EXPR_LOCATION (substmt, loc);
add_stmt (substmt);
}
while (1)
{
if (c_parser_next_token_is (parser, CPP_CLOSE_BRACE))
break;
if (c_parser_next_token_is (parser, CPP_EOF))
break;
loc = c_parser_peek_token (parser)->location;
if (c_parser_peek_token (parser)->pragma_kind == PRAGMA_OMP_SECTION)
{
c_parser_consume_pragma (parser);
c_parser_skip_to_pragma_eol (parser);
error_suppress = false;
}
else if (!error_suppress)
{
error_at (loc, "expected %<#pragma omp section%> or %<}%>");
error_suppress = true;
}
substmt = c_parser_omp_structured_block (parser, NULL);
substmt = build1 (OMP_SECTION, void_type_node, substmt);
SET_EXPR_LOCATION (substmt, loc);
add_stmt (substmt);
}
c_parser_skip_until_found (parser, CPP_CLOSE_BRACE,
"expected %<#pragma omp section%> or %<}%>");
substmt = pop_stmt_list (stmt);
stmt = make_node (OMP_SECTIONS);
SET_EXPR_LOCATION (stmt, sections_loc);
TREE_TYPE (stmt) = void_type_node;
OMP_SECTIONS_BODY (stmt) = substmt;
return add_stmt (stmt);
}
/* OpenMP 2.5:
# pragma omp sections sections-clause[optseq] newline
sections-scope
LOC is the location of the #pragma token.
*/
#define OMP_SECTIONS_CLAUSE_MASK \
( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_LASTPRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_REDUCTION) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOWAIT))
static tree
c_parser_omp_sections (location_t loc, c_parser *parser,
char *p_name, omp_clause_mask mask, tree *cclauses)
{
tree block, clauses, ret;
strcat (p_name, " sections");
mask |= OMP_SECTIONS_CLAUSE_MASK;
if (cclauses)
mask &= ~(OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOWAIT);
clauses = c_parser_omp_all_clauses (parser, mask, p_name, cclauses == NULL);
if (cclauses)
{
omp_split_clauses (loc, OMP_SECTIONS, mask, clauses, cclauses);
clauses = cclauses[C_OMP_CLAUSE_SPLIT_SECTIONS];
}
block = c_begin_compound_stmt (true);
ret = c_parser_omp_sections_scope (loc, parser);
if (ret)
OMP_SECTIONS_CLAUSES (ret) = clauses;
block = c_end_compound_stmt (loc, block, true);
add_stmt (block);
return ret;
}
/* OpenMP 2.5:
# pragma omp parallel parallel-clause[optseq] new-line
structured-block
# pragma omp parallel for parallel-for-clause[optseq] new-line
structured-block
# pragma omp parallel sections parallel-sections-clause[optseq] new-line
structured-block
OpenMP 4.0:
# pragma omp parallel for simd parallel-for-simd-clause[optseq] new-line
structured-block
LOC is the location of the #pragma token.
*/
#define OMP_PARALLEL_CLAUSE_MASK \
( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_IF) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DEFAULT) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SHARED) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_COPYIN) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_REDUCTION) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PROC_BIND))
static tree
c_parser_omp_parallel (location_t loc, c_parser *parser,
char *p_name, omp_clause_mask mask, tree *cclauses,
bool *if_p)
{
tree stmt, clauses, block;
strcat (p_name, " parallel");
mask |= OMP_PARALLEL_CLAUSE_MASK;
/* #pragma omp target parallel{, for, for simd} disallow copyin clause. */
if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP)) != 0
&& (mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) == 0)
mask &= ~(OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_COPYIN);
if (c_parser_next_token_is_keyword (parser, RID_FOR))
{
tree cclauses_buf[C_OMP_CLAUSE_SPLIT_COUNT];
if (cclauses == NULL)
cclauses = cclauses_buf;
c_parser_consume_token (parser);
if (!flag_openmp) /* flag_openmp_simd */
return c_parser_omp_for (loc, parser, p_name, mask, cclauses, if_p);
block = c_begin_omp_parallel ();
tree ret = c_parser_omp_for (loc, parser, p_name, mask, cclauses, if_p);
stmt
= c_finish_omp_parallel (loc, cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL],
block);
if (ret == NULL_TREE)
return ret;
OMP_PARALLEL_COMBINED (stmt) = 1;
return stmt;
}
/* When combined with distribute, parallel has to be followed by for.
#pragma omp target parallel is allowed though. */
else if (cclauses
&& (mask & (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) != 0)
{
error_at (loc, "expected %<for%> after %qs", p_name);
c_parser_skip_to_pragma_eol (parser);
return NULL_TREE;
}
else if (c_parser_next_token_is (parser, CPP_NAME))
{
const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value);
if (cclauses == NULL && strcmp (p, "master") == 0)
{
tree cclauses_buf[C_OMP_CLAUSE_SPLIT_COUNT];
cclauses = cclauses_buf;
c_parser_consume_token (parser);
if (!flag_openmp) /* flag_openmp_simd */
return c_parser_omp_master (loc, parser, p_name, mask, cclauses,
if_p);
block = c_begin_omp_parallel ();
tree ret = c_parser_omp_master (loc, parser, p_name, mask, cclauses,
if_p);
stmt = c_finish_omp_parallel (loc,
cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL],
block);
if (ret == NULL)
return ret;
OMP_PARALLEL_COMBINED (stmt) = 1;
return stmt;
}
else if (strcmp (p, "loop") == 0)
{
tree cclauses_buf[C_OMP_CLAUSE_SPLIT_COUNT];
if (cclauses == NULL)
cclauses = cclauses_buf;
c_parser_consume_token (parser);
if (!flag_openmp) /* flag_openmp_simd */
return c_parser_omp_loop (loc, parser, p_name, mask, cclauses,
if_p);
block = c_begin_omp_parallel ();
tree ret = c_parser_omp_loop (loc, parser, p_name, mask, cclauses,
if_p);
stmt
= c_finish_omp_parallel (loc,
cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL],
block);
if (ret == NULL_TREE)
return ret;
OMP_PARALLEL_COMBINED (stmt) = 1;
return stmt;
}
else if (!flag_openmp) /* flag_openmp_simd */
{
c_parser_skip_to_pragma_eol (parser, false);
return NULL_TREE;
}
else if (cclauses == NULL && strcmp (p, "sections") == 0)
{
tree cclauses_buf[C_OMP_CLAUSE_SPLIT_COUNT];
cclauses = cclauses_buf;
c_parser_consume_token (parser);
block = c_begin_omp_parallel ();
c_parser_omp_sections (loc, parser, p_name, mask, cclauses);
stmt = c_finish_omp_parallel (loc,
cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL],
block);
OMP_PARALLEL_COMBINED (stmt) = 1;
return stmt;
}
}
else if (!flag_openmp) /* flag_openmp_simd */
{
c_parser_skip_to_pragma_eol (parser, false);
return NULL_TREE;
}
clauses = c_parser_omp_all_clauses (parser, mask, p_name, cclauses == NULL);
if (cclauses)
{
omp_split_clauses (loc, OMP_PARALLEL, mask, clauses, cclauses);
clauses = cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL];
}
block = c_begin_omp_parallel ();
c_parser_statement (parser, if_p);
stmt = c_finish_omp_parallel (loc, clauses, block);
return stmt;
}
/* OpenMP 2.5:
# pragma omp single single-clause[optseq] new-line
structured-block
LOC is the location of the #pragma.
*/
#define OMP_SINGLE_CLAUSE_MASK \
( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_COPYPRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOWAIT))
static tree
c_parser_omp_single (location_t loc, c_parser *parser, bool *if_p)
{
tree stmt = make_node (OMP_SINGLE);
SET_EXPR_LOCATION (stmt, loc);
TREE_TYPE (stmt) = void_type_node;
OMP_SINGLE_CLAUSES (stmt)
= c_parser_omp_all_clauses (parser, OMP_SINGLE_CLAUSE_MASK,
"#pragma omp single");
OMP_SINGLE_BODY (stmt) = c_parser_omp_structured_block (parser, if_p);
return add_stmt (stmt);
}
/* OpenMP 3.0:
# pragma omp task task-clause[optseq] new-line
LOC is the location of the #pragma.
*/
#define OMP_TASK_CLAUSE_MASK \
( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_IF) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_UNTIED) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DEFAULT) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SHARED) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_FINAL) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MERGEABLE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DEPEND) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PRIORITY) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_IN_REDUCTION))
static tree
c_parser_omp_task (location_t loc, c_parser *parser, bool *if_p)
{
tree clauses, block;
clauses = c_parser_omp_all_clauses (parser, OMP_TASK_CLAUSE_MASK,
"#pragma omp task");
block = c_begin_omp_task ();
c_parser_statement (parser, if_p);
return c_finish_omp_task (loc, clauses, block);
}
/* OpenMP 3.0:
# pragma omp taskwait new-line
OpenMP 5.0:
# pragma omp taskwait taskwait-clause[optseq] new-line
*/
#define OMP_TASKWAIT_CLAUSE_MASK \
(OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DEPEND)
static void
c_parser_omp_taskwait (c_parser *parser)
{
location_t loc = c_parser_peek_token (parser)->location;
c_parser_consume_pragma (parser);
tree clauses
= c_parser_omp_all_clauses (parser, OMP_TASKWAIT_CLAUSE_MASK,
"#pragma omp taskwait");
if (clauses)
{
tree stmt = make_node (OMP_TASK);
TREE_TYPE (stmt) = void_node;
OMP_TASK_CLAUSES (stmt) = clauses;
OMP_TASK_BODY (stmt) = NULL_TREE;
SET_EXPR_LOCATION (stmt, loc);
add_stmt (stmt);
}
else
c_finish_omp_taskwait (loc);
}
/* OpenMP 3.1:
# pragma omp taskyield new-line
*/
static void
c_parser_omp_taskyield (c_parser *parser)
{
location_t loc = c_parser_peek_token (parser)->location;
c_parser_consume_pragma (parser);
c_parser_skip_to_pragma_eol (parser);
c_finish_omp_taskyield (loc);
}
/* OpenMP 4.0:
# pragma omp taskgroup new-line
OpenMP 5.0:
# pragma omp taskgroup taskgroup-clause[optseq] new-line
*/
#define OMP_TASKGROUP_CLAUSE_MASK \
( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_TASK_REDUCTION))
static tree
c_parser_omp_taskgroup (location_t loc, c_parser *parser, bool *if_p)
{
tree clauses = c_parser_omp_all_clauses (parser, OMP_TASKGROUP_CLAUSE_MASK,
"#pragma omp taskgroup");
tree body = c_parser_omp_structured_block (parser, if_p);
return c_finish_omp_taskgroup (loc, body, clauses);
}
/* OpenMP 4.0:
# pragma omp cancel cancel-clause[optseq] new-line
LOC is the location of the #pragma.
*/
#define OMP_CANCEL_CLAUSE_MASK \
( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PARALLEL) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_FOR) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SECTIONS) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_TASKGROUP) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_IF))
static void
c_parser_omp_cancel (c_parser *parser)
{
location_t loc = c_parser_peek_token (parser)->location;
c_parser_consume_pragma (parser);
tree clauses = c_parser_omp_all_clauses (parser, OMP_CANCEL_CLAUSE_MASK,
"#pragma omp cancel");
c_finish_omp_cancel (loc, clauses);
}
/* OpenMP 4.0:
# pragma omp cancellation point cancelpt-clause[optseq] new-line
LOC is the location of the #pragma.
*/
#define OMP_CANCELLATION_POINT_CLAUSE_MASK \
( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PARALLEL) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_FOR) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SECTIONS) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_TASKGROUP))
static void
c_parser_omp_cancellation_point (c_parser *parser, enum pragma_context context)
{
location_t loc = c_parser_peek_token (parser)->location;
tree clauses;
bool point_seen = false;
c_parser_consume_pragma (parser);
if (c_parser_next_token_is (parser, CPP_NAME))
{
const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value);
if (strcmp (p, "point") == 0)
{
c_parser_consume_token (parser);
point_seen = true;
}
}
if (!point_seen)
{
c_parser_error (parser, "expected %<point%>");
c_parser_skip_to_pragma_eol (parser);
return;
}
if (context != pragma_compound)
{
if (context == pragma_stmt)
error_at (loc,
"%<#pragma %s%> may only be used in compound statements",
"omp cancellation point");
else
c_parser_error (parser, "expected declaration specifiers");
c_parser_skip_to_pragma_eol (parser, false);
return;
}
clauses
= c_parser_omp_all_clauses (parser, OMP_CANCELLATION_POINT_CLAUSE_MASK,
"#pragma omp cancellation point");
c_finish_omp_cancellation_point (loc, clauses);
}
/* OpenMP 4.0:
#pragma omp distribute distribute-clause[optseq] new-line
for-loop */
#define OMP_DISTRIBUTE_CLAUSE_MASK \
( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_LASTPRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)\
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_COLLAPSE))
static tree
c_parser_omp_distribute (location_t loc, c_parser *parser,
char *p_name, omp_clause_mask mask, tree *cclauses,
bool *if_p)
{
tree clauses, block, ret;
strcat (p_name, " distribute");
mask |= OMP_DISTRIBUTE_CLAUSE_MASK;
if (c_parser_next_token_is (parser, CPP_NAME))
{
const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value);
bool simd = false;
bool parallel = false;
if (strcmp (p, "simd") == 0)
simd = true;
else
parallel = strcmp (p, "parallel") == 0;
if (parallel || simd)
{
tree cclauses_buf[C_OMP_CLAUSE_SPLIT_COUNT];
if (cclauses == NULL)
cclauses = cclauses_buf;
c_parser_consume_token (parser);
if (!flag_openmp) /* flag_openmp_simd */
{
if (simd)
return c_parser_omp_simd (loc, parser, p_name, mask, cclauses,
if_p);
else
return c_parser_omp_parallel (loc, parser, p_name, mask,
cclauses, if_p);
}
block = c_begin_compound_stmt (true);
if (simd)
ret = c_parser_omp_simd (loc, parser, p_name, mask, cclauses,
if_p);
else
ret = c_parser_omp_parallel (loc, parser, p_name, mask, cclauses,
if_p);
block = c_end_compound_stmt (loc, block, true);
if (ret == NULL)
return ret;
ret = make_node (OMP_DISTRIBUTE);
TREE_TYPE (ret) = void_type_node;
OMP_FOR_BODY (ret) = block;
OMP_FOR_CLAUSES (ret) = cclauses[C_OMP_CLAUSE_SPLIT_DISTRIBUTE];
SET_EXPR_LOCATION (ret, loc);
add_stmt (ret);
return ret;
}
}
if (!flag_openmp) /* flag_openmp_simd */
{
c_parser_skip_to_pragma_eol (parser, false);
return NULL_TREE;
}
clauses = c_parser_omp_all_clauses (parser, mask, p_name, cclauses == NULL);
if (cclauses)
{
omp_split_clauses (loc, OMP_DISTRIBUTE, mask, clauses, cclauses);
clauses = cclauses[C_OMP_CLAUSE_SPLIT_DISTRIBUTE];
}
block = c_begin_compound_stmt (true);
ret = c_parser_omp_for_loop (loc, parser, OMP_DISTRIBUTE, clauses, NULL,
if_p);
block = c_end_compound_stmt (loc, block, true);
add_stmt (block);
return ret;
}
/* OpenMP 4.0:
# pragma omp teams teams-clause[optseq] new-line
structured-block */
#define OMP_TEAMS_CLAUSE_MASK \
( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SHARED) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_REDUCTION) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_THREAD_LIMIT) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DEFAULT))
static tree
c_parser_omp_teams (location_t loc, c_parser *parser,
char *p_name, omp_clause_mask mask, tree *cclauses,
bool *if_p)
{
tree clauses, block, ret;
strcat (p_name, " teams");
mask |= OMP_TEAMS_CLAUSE_MASK;
if (c_parser_next_token_is (parser, CPP_NAME))
{
const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value);
if (strcmp (p, "distribute") == 0)
{
tree cclauses_buf[C_OMP_CLAUSE_SPLIT_COUNT];
if (cclauses == NULL)
cclauses = cclauses_buf;
c_parser_consume_token (parser);
if (!flag_openmp) /* flag_openmp_simd */
return c_parser_omp_distribute (loc, parser, p_name, mask,
cclauses, if_p);
block = c_begin_omp_parallel ();
ret = c_parser_omp_distribute (loc, parser, p_name, mask, cclauses,
if_p);
block = c_end_compound_stmt (loc, block, true);
if (ret == NULL)
return ret;
clauses = cclauses[C_OMP_CLAUSE_SPLIT_TEAMS];
ret = make_node (OMP_TEAMS);
TREE_TYPE (ret) = void_type_node;
OMP_TEAMS_CLAUSES (ret) = clauses;
OMP_TEAMS_BODY (ret) = block;
OMP_TEAMS_COMBINED (ret) = 1;
SET_EXPR_LOCATION (ret, loc);
return add_stmt (ret);
}
else if (strcmp (p, "loop") == 0)
{
tree cclauses_buf[C_OMP_CLAUSE_SPLIT_COUNT];
if (cclauses == NULL)
cclauses = cclauses_buf;
c_parser_consume_token (parser);
if (!flag_openmp) /* flag_openmp_simd */
return c_parser_omp_loop (loc, parser, p_name, mask, cclauses,
if_p);
block = c_begin_omp_parallel ();
ret = c_parser_omp_loop (loc, parser, p_name, mask, cclauses, if_p);
block = c_end_compound_stmt (loc, block, true);
if (ret == NULL)
return ret;
clauses = cclauses[C_OMP_CLAUSE_SPLIT_TEAMS];
ret = make_node (OMP_TEAMS);
TREE_TYPE (ret) = void_type_node;
OMP_TEAMS_CLAUSES (ret) = clauses;
OMP_TEAMS_BODY (ret) = block;
OMP_TEAMS_COMBINED (ret) = 1;
SET_EXPR_LOCATION (ret, loc);
return add_stmt (ret);
}
}
if (!flag_openmp) /* flag_openmp_simd */
{
c_parser_skip_to_pragma_eol (parser, false);
return NULL_TREE;
}
clauses = c_parser_omp_all_clauses (parser, mask, p_name, cclauses == NULL);
if (cclauses)
{
omp_split_clauses (loc, OMP_TEAMS, mask, clauses, cclauses);
clauses = cclauses[C_OMP_CLAUSE_SPLIT_TEAMS];
}
tree stmt = make_node (OMP_TEAMS);
TREE_TYPE (stmt) = void_type_node;
OMP_TEAMS_CLAUSES (stmt) = clauses;
block = c_begin_omp_parallel ();
add_stmt (c_parser_omp_structured_block (parser, if_p));
OMP_TEAMS_BODY (stmt) = c_end_compound_stmt (loc, block, true);
SET_EXPR_LOCATION (stmt, loc);
return add_stmt (stmt);
}
/* OpenMP 4.0:
# pragma omp target data target-data-clause[optseq] new-line
structured-block */
#define OMP_TARGET_DATA_CLAUSE_MASK \
( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DEVICE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_IF) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_USE_DEVICE_PTR) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_USE_DEVICE_ADDR))
static tree
c_parser_omp_target_data (location_t loc, c_parser *parser, bool *if_p)
{
tree clauses
= c_parser_omp_all_clauses (parser, OMP_TARGET_DATA_CLAUSE_MASK,
"#pragma omp target data");
int map_seen = 0;
for (tree *pc = &clauses; *pc;)
{
if (OMP_CLAUSE_CODE (*pc) == OMP_CLAUSE_MAP)
switch (OMP_CLAUSE_MAP_KIND (*pc))
{
case GOMP_MAP_TO:
case GOMP_MAP_ALWAYS_TO:
case GOMP_MAP_FROM:
case GOMP_MAP_ALWAYS_FROM:
case GOMP_MAP_TOFROM:
case GOMP_MAP_ALWAYS_TOFROM:
case GOMP_MAP_ALLOC:
map_seen = 3;
break;
case GOMP_MAP_FIRSTPRIVATE_POINTER:
case GOMP_MAP_ALWAYS_POINTER:
break;
default:
map_seen |= 1;
error_at (OMP_CLAUSE_LOCATION (*pc),
"%<#pragma omp target data%> with map-type other "
"than %<to%>, %<from%>, %<tofrom%> or %<alloc%> "
"on %<map%> clause");
*pc = OMP_CLAUSE_CHAIN (*pc);
continue;
}
else if (OMP_CLAUSE_CODE (*pc) == OMP_CLAUSE_USE_DEVICE_PTR
|| OMP_CLAUSE_CODE (*pc) == OMP_CLAUSE_USE_DEVICE_ADDR)
map_seen = 3;
pc = &OMP_CLAUSE_CHAIN (*pc);
}
if (map_seen != 3)
{
if (map_seen == 0)
error_at (loc,
"%<#pragma omp target data%> must contain at least "
"one %<map%>, %<use_device_ptr%> or %<use_device_addr%> "
"clause");
return NULL_TREE;
}
tree stmt = make_node (OMP_TARGET_DATA);
TREE_TYPE (stmt) = void_type_node;
OMP_TARGET_DATA_CLAUSES (stmt) = clauses;
keep_next_level ();
tree block = c_begin_compound_stmt (true);
add_stmt (c_parser_omp_structured_block (parser, if_p));
OMP_TARGET_DATA_BODY (stmt) = c_end_compound_stmt (loc, block, true);
SET_EXPR_LOCATION (stmt, loc);
return add_stmt (stmt);
}
/* OpenMP 4.0:
# pragma omp target update target-update-clause[optseq] new-line */
#define OMP_TARGET_UPDATE_CLAUSE_MASK \
( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_FROM) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_TO) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DEVICE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_IF) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DEPEND) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOWAIT))
static bool
c_parser_omp_target_update (location_t loc, c_parser *parser,
enum pragma_context context)
{
if (context == pragma_stmt)
{
error_at (loc, "%<#pragma %s%> may only be used in compound statements",
"omp target update");
c_parser_skip_to_pragma_eol (parser, false);
return false;
}
tree clauses
= c_parser_omp_all_clauses (parser, OMP_TARGET_UPDATE_CLAUSE_MASK,
"#pragma omp target update");
if (omp_find_clause (clauses, OMP_CLAUSE_TO) == NULL_TREE
&& omp_find_clause (clauses, OMP_CLAUSE_FROM) == NULL_TREE)
{
error_at (loc,
"%<#pragma omp target update%> must contain at least one "
"%<from%> or %<to%> clauses");
return false;
}
tree stmt = make_node (OMP_TARGET_UPDATE);
TREE_TYPE (stmt) = void_type_node;
OMP_TARGET_UPDATE_CLAUSES (stmt) = clauses;
SET_EXPR_LOCATION (stmt, loc);
add_stmt (stmt);
return false;
}
/* OpenMP 4.5:
# pragma omp target enter data target-data-clause[optseq] new-line */
#define OMP_TARGET_ENTER_DATA_CLAUSE_MASK \
( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DEVICE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_IF) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DEPEND) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOWAIT))
static tree
c_parser_omp_target_enter_data (location_t loc, c_parser *parser,
enum pragma_context context)
{
bool data_seen = false;
if (c_parser_next_token_is (parser, CPP_NAME))
{
const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value);
if (strcmp (p, "data") == 0)
{
c_parser_consume_token (parser);
data_seen = true;
}
}
if (!data_seen)
{
c_parser_error (parser, "expected %<data%>");
c_parser_skip_to_pragma_eol (parser);
return NULL_TREE;
}
if (context == pragma_stmt)
{
error_at (loc, "%<#pragma %s%> may only be used in compound statements",
"omp target enter data");
c_parser_skip_to_pragma_eol (parser, false);
return NULL_TREE;
}
tree clauses
= c_parser_omp_all_clauses (parser, OMP_TARGET_ENTER_DATA_CLAUSE_MASK,
"#pragma omp target enter data");
int map_seen = 0;
for (tree *pc = &clauses; *pc;)
{
if (OMP_CLAUSE_CODE (*pc) == OMP_CLAUSE_MAP)
switch (OMP_CLAUSE_MAP_KIND (*pc))
{
case GOMP_MAP_TO:
case GOMP_MAP_ALWAYS_TO:
case GOMP_MAP_ALLOC:
map_seen = 3;
break;
case GOMP_MAP_FIRSTPRIVATE_POINTER:
case GOMP_MAP_ALWAYS_POINTER:
break;
default:
map_seen |= 1;
error_at (OMP_CLAUSE_LOCATION (*pc),
"%<#pragma omp target enter data%> with map-type other "
"than %<to%> or %<alloc%> on %<map%> clause");
*pc = OMP_CLAUSE_CHAIN (*pc);
continue;
}
pc = &OMP_CLAUSE_CHAIN (*pc);
}
if (map_seen != 3)
{
if (map_seen == 0)
error_at (loc,
"%<#pragma omp target enter data%> must contain at least "
"one %<map%> clause");
return NULL_TREE;
}
tree stmt = make_node (OMP_TARGET_ENTER_DATA);
TREE_TYPE (stmt) = void_type_node;
OMP_TARGET_ENTER_DATA_CLAUSES (stmt) = clauses;
SET_EXPR_LOCATION (stmt, loc);
add_stmt (stmt);
return stmt;
}
/* OpenMP 4.5:
# pragma omp target exit data target-data-clause[optseq] new-line */
#define OMP_TARGET_EXIT_DATA_CLAUSE_MASK \
( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DEVICE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_IF) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DEPEND) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOWAIT))
static tree
c_parser_omp_target_exit_data (location_t loc, c_parser *parser,
enum pragma_context context)
{
bool data_seen = false;
if (c_parser_next_token_is (parser, CPP_NAME))
{
const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value);
if (strcmp (p, "data") == 0)
{
c_parser_consume_token (parser);
data_seen = true;
}
}
if (!data_seen)
{
c_parser_error (parser, "expected %<data%>");
c_parser_skip_to_pragma_eol (parser);
return NULL_TREE;
}
if (context == pragma_stmt)
{
error_at (loc, "%<#pragma %s%> may only be used in compound statements",
"omp target exit data");
c_parser_skip_to_pragma_eol (parser, false);
return NULL_TREE;
}
tree clauses
= c_parser_omp_all_clauses (parser, OMP_TARGET_EXIT_DATA_CLAUSE_MASK,
"#pragma omp target exit data");
int map_seen = 0;
for (tree *pc = &clauses; *pc;)
{
if (OMP_CLAUSE_CODE (*pc) == OMP_CLAUSE_MAP)
switch (OMP_CLAUSE_MAP_KIND (*pc))
{
case GOMP_MAP_FROM:
case GOMP_MAP_ALWAYS_FROM:
case GOMP_MAP_RELEASE:
case GOMP_MAP_DELETE:
map_seen = 3;
break;
case GOMP_MAP_FIRSTPRIVATE_POINTER:
case GOMP_MAP_ALWAYS_POINTER:
break;
default:
map_seen |= 1;
error_at (OMP_CLAUSE_LOCATION (*pc),
"%<#pragma omp target exit data%> with map-type other "
"than %<from%>, %<release%> or %<delete%> on %<map%>"
" clause");
*pc = OMP_CLAUSE_CHAIN (*pc);
continue;
}
pc = &OMP_CLAUSE_CHAIN (*pc);
}
if (map_seen != 3)
{
if (map_seen == 0)
error_at (loc,
"%<#pragma omp target exit data%> must contain at least one "
"%<map%> clause");
return NULL_TREE;
}
tree stmt = make_node (OMP_TARGET_EXIT_DATA);
TREE_TYPE (stmt) = void_type_node;
OMP_TARGET_EXIT_DATA_CLAUSES (stmt) = clauses;
SET_EXPR_LOCATION (stmt, loc);
add_stmt (stmt);
return stmt;
}
/* OpenMP 4.0:
# pragma omp target target-clause[optseq] new-line
structured-block */
#define OMP_TARGET_CLAUSE_MASK \
( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DEVICE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_IF) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DEPEND) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOWAIT) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DEFAULTMAP) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_IS_DEVICE_PTR))
static bool
c_parser_omp_target (c_parser *parser, enum pragma_context context, bool *if_p)
{
location_t loc = c_parser_peek_token (parser)->location;
c_parser_consume_pragma (parser);
tree *pc = NULL, stmt, block;
if (context != pragma_stmt && context != pragma_compound)
{
c_parser_error (parser, "expected declaration specifiers");
c_parser_skip_to_pragma_eol (parser);
return false;
}
if (flag_openmp)
omp_requires_mask
= (enum omp_requires) (omp_requires_mask | OMP_REQUIRES_TARGET_USED);
if (c_parser_next_token_is (parser, CPP_NAME))
{
const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value);
enum tree_code ccode = ERROR_MARK;
if (strcmp (p, "teams") == 0)
ccode = OMP_TEAMS;
else if (strcmp (p, "parallel") == 0)
ccode = OMP_PARALLEL;
else if (strcmp (p, "simd") == 0)
ccode = OMP_SIMD;
if (ccode != ERROR_MARK)
{
tree cclauses[C_OMP_CLAUSE_SPLIT_COUNT];
char p_name[sizeof ("#pragma omp target teams distribute "
"parallel for simd")];
c_parser_consume_token (parser);
strcpy (p_name, "#pragma omp target");
if (!flag_openmp) /* flag_openmp_simd */
{
tree stmt;
switch (ccode)
{
case OMP_TEAMS:
stmt = c_parser_omp_teams (loc, parser, p_name,
OMP_TARGET_CLAUSE_MASK,
cclauses, if_p);
break;
case OMP_PARALLEL:
stmt = c_parser_omp_parallel (loc, parser, p_name,
OMP_TARGET_CLAUSE_MASK,
cclauses, if_p);
break;
case OMP_SIMD:
stmt = c_parser_omp_simd (loc, parser, p_name,
OMP_TARGET_CLAUSE_MASK,
cclauses, if_p);
break;
default:
gcc_unreachable ();
}
return stmt != NULL_TREE;
}
keep_next_level ();
tree block = c_begin_compound_stmt (true), ret;
switch (ccode)
{
case OMP_TEAMS:
ret = c_parser_omp_teams (loc, parser, p_name,
OMP_TARGET_CLAUSE_MASK, cclauses,
if_p);
break;
case OMP_PARALLEL:
ret = c_parser_omp_parallel (loc, parser, p_name,
OMP_TARGET_CLAUSE_MASK, cclauses,
if_p);
break;
case OMP_SIMD:
ret = c_parser_omp_simd (loc, parser, p_name,
OMP_TARGET_CLAUSE_MASK, cclauses,
if_p);
break;
default:
gcc_unreachable ();
}
block = c_end_compound_stmt (loc, block, true);
if (ret == NULL_TREE)
return false;
if (ccode == OMP_TEAMS)
{
/* For combined target teams, ensure the num_teams and
thread_limit clause expressions are evaluated on the host,
before entering the target construct. */
tree c;
for (c = cclauses[C_OMP_CLAUSE_SPLIT_TEAMS];
c; c = OMP_CLAUSE_CHAIN (c))
if ((OMP_CLAUSE_CODE (c) == OMP_CLAUSE_NUM_TEAMS
|| OMP_CLAUSE_CODE (c) == OMP_CLAUSE_THREAD_LIMIT)
&& TREE_CODE (OMP_CLAUSE_OPERAND (c, 0)) != INTEGER_CST)
{
tree expr = OMP_CLAUSE_OPERAND (c, 0);
tree tmp = create_tmp_var_raw (TREE_TYPE (expr));
expr = build4 (TARGET_EXPR, TREE_TYPE (expr), tmp,
expr, NULL_TREE, NULL_TREE);
add_stmt (expr);
OMP_CLAUSE_OPERAND (c, 0) = expr;
tree tc = build_omp_clause (OMP_CLAUSE_LOCATION (c),
OMP_CLAUSE_FIRSTPRIVATE);
OMP_CLAUSE_DECL (tc) = tmp;
OMP_CLAUSE_CHAIN (tc)
= cclauses[C_OMP_CLAUSE_SPLIT_TARGET];
cclauses[C_OMP_CLAUSE_SPLIT_TARGET] = tc;
}
}
tree stmt = make_node (OMP_TARGET);
TREE_TYPE (stmt) = void_type_node;
OMP_TARGET_CLAUSES (stmt) = cclauses[C_OMP_CLAUSE_SPLIT_TARGET];
OMP_TARGET_BODY (stmt) = block;
OMP_TARGET_COMBINED (stmt) = 1;
SET_EXPR_LOCATION (stmt, loc);
add_stmt (stmt);
pc = &OMP_TARGET_CLAUSES (stmt);
goto check_clauses;
}
else if (!flag_openmp) /* flag_openmp_simd */
{
c_parser_skip_to_pragma_eol (parser, false);
return false;
}
else if (strcmp (p, "data") == 0)
{
c_parser_consume_token (parser);
c_parser_omp_target_data (loc, parser, if_p);
return true;
}
else if (strcmp (p, "enter") == 0)
{
c_parser_consume_token (parser);
c_parser_omp_target_enter_data (loc, parser, context);
return false;
}
else if (strcmp (p, "exit") == 0)
{
c_parser_consume_token (parser);
c_parser_omp_target_exit_data (loc, parser, context);
return false;
}
else if (strcmp (p, "update") == 0)
{
c_parser_consume_token (parser);
return c_parser_omp_target_update (loc, parser, context);
}
}
if (!flag_openmp) /* flag_openmp_simd */
{
c_parser_skip_to_pragma_eol (parser, false);
return false;
}
stmt = make_node (OMP_TARGET);
TREE_TYPE (stmt) = void_type_node;
OMP_TARGET_CLAUSES (stmt)
= c_parser_omp_all_clauses (parser, OMP_TARGET_CLAUSE_MASK,
"#pragma omp target");
pc = &OMP_TARGET_CLAUSES (stmt);
keep_next_level ();
block = c_begin_compound_stmt (true);
add_stmt (c_parser_omp_structured_block (parser, if_p));
OMP_TARGET_BODY (stmt) = c_end_compound_stmt (loc, block, true);
SET_EXPR_LOCATION (stmt, loc);
add_stmt (stmt);
check_clauses:
while (*pc)
{
if (OMP_CLAUSE_CODE (*pc) == OMP_CLAUSE_MAP)
switch (OMP_CLAUSE_MAP_KIND (*pc))
{
case GOMP_MAP_TO:
case GOMP_MAP_ALWAYS_TO:
case GOMP_MAP_FROM:
case GOMP_MAP_ALWAYS_FROM:
case GOMP_MAP_TOFROM:
case GOMP_MAP_ALWAYS_TOFROM:
case GOMP_MAP_ALLOC:
case GOMP_MAP_FIRSTPRIVATE_POINTER:
case GOMP_MAP_ALWAYS_POINTER:
break;
default:
error_at (OMP_CLAUSE_LOCATION (*pc),
"%<#pragma omp target%> with map-type other "
"than %<to%>, %<from%>, %<tofrom%> or %<alloc%> "
"on %<map%> clause");
*pc = OMP_CLAUSE_CHAIN (*pc);
continue;
}
pc = &OMP_CLAUSE_CHAIN (*pc);
}
cfun->has_omp_target = true;
return true;
}
/* OpenMP 4.0:
# pragma omp declare simd declare-simd-clauses[optseq] new-line
OpenMP 5.0:
# pragma omp declare variant (identifier) match(context-selector) new-line
*/
#define OMP_DECLARE_SIMD_CLAUSE_MASK \
( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SIMDLEN) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_LINEAR) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_ALIGNED) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_UNIFORM) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_INBRANCH) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOTINBRANCH))
static void
c_parser_omp_declare_simd (c_parser *parser, enum pragma_context context)
{
c_token *token = c_parser_peek_token (parser);
gcc_assert (token->type == CPP_NAME);
tree kind = token->value;
gcc_assert (strcmp (IDENTIFIER_POINTER (kind), "simd") == 0
|| strcmp (IDENTIFIER_POINTER (kind), "variant") == 0);
auto_vec<c_token> clauses;
while (c_parser_next_token_is_not (parser, CPP_PRAGMA_EOL))
{
c_token *token = c_parser_peek_token (parser);
if (token->type == CPP_EOF)
{
c_parser_skip_to_pragma_eol (parser);
return;
}
clauses.safe_push (*token);
c_parser_consume_token (parser);
}
clauses.safe_push (*c_parser_peek_token (parser));
c_parser_skip_to_pragma_eol (parser);
while (c_parser_next_token_is (parser, CPP_PRAGMA))
{
if (c_parser_peek_token (parser)->pragma_kind != PRAGMA_OMP_DECLARE
|| c_parser_peek_2nd_token (parser)->type != CPP_NAME
|| c_parser_peek_2nd_token (parser)->value != kind)
{
error ("%<#pragma omp declare %s%> must be followed by "
"function declaration or definition or another "
"%<#pragma omp declare %s%>",
IDENTIFIER_POINTER (kind), IDENTIFIER_POINTER (kind));
return;
}
c_parser_consume_pragma (parser);
while (c_parser_next_token_is_not (parser, CPP_PRAGMA_EOL))
{
c_token *token = c_parser_peek_token (parser);
if (token->type == CPP_EOF)
{
c_parser_skip_to_pragma_eol (parser);
return;
}
clauses.safe_push (*token);
c_parser_consume_token (parser);
}
clauses.safe_push (*c_parser_peek_token (parser));
c_parser_skip_to_pragma_eol (parser);
}
/* Make sure nothing tries to read past the end of the tokens. */
c_token eof_token;
memset (&eof_token, 0, sizeof (eof_token));
eof_token.type = CPP_EOF;
clauses.safe_push (eof_token);
clauses.safe_push (eof_token);
switch (context)
{
case pragma_external:
if (c_parser_next_token_is (parser, CPP_KEYWORD)
&& c_parser_peek_token (parser)->keyword == RID_EXTENSION)
{
int ext = disable_extension_diagnostics ();
do
c_parser_consume_token (parser);
while (c_parser_next_token_is (parser, CPP_KEYWORD)
&& c_parser_peek_token (parser)->keyword == RID_EXTENSION);
c_parser_declaration_or_fndef (parser, true, true, true, false, true,
NULL, clauses);
restore_extension_diagnostics (ext);
}
else
c_parser_declaration_or_fndef (parser, true, true, true, false, true,
NULL, clauses);
break;
case pragma_struct:
case pragma_param:
case pragma_stmt:
error ("%<#pragma omp declare %s%> must be followed by "
"function declaration or definition",
IDENTIFIER_POINTER (kind));
break;
case pragma_compound:
if (c_parser_next_token_is (parser, CPP_KEYWORD)
&& c_parser_peek_token (parser)->keyword == RID_EXTENSION)
{
int ext = disable_extension_diagnostics ();
do
c_parser_consume_token (parser);
while (c_parser_next_token_is (parser, CPP_KEYWORD)
&& c_parser_peek_token (parser)->keyword == RID_EXTENSION);
if (c_parser_next_tokens_start_declaration (parser))
{
c_parser_declaration_or_fndef (parser, true, true, true, true,
true, NULL, clauses);
restore_extension_diagnostics (ext);
break;
}
restore_extension_diagnostics (ext);
}
else if (c_parser_next_tokens_start_declaration (parser))
{
c_parser_declaration_or_fndef (parser, true, true, true, true, true,
NULL, clauses);
break;
}
error ("%<#pragma omp declare %s%> must be followed by "
"function declaration or definition",
IDENTIFIER_POINTER (kind));
break;
default:
gcc_unreachable ();
}
}
static const char *const omp_construct_selectors[] = {
"simd", "target", "teams", "parallel", "for", NULL };
static const char *const omp_device_selectors[] = {
"kind", "isa", "arch", NULL };
static const char *const omp_implementation_selectors[] = {
"vendor", "extension", "atomic_default_mem_order", "unified_address",
"unified_shared_memory", "dynamic_allocators", "reverse_offload", NULL };
static const char *const omp_user_selectors[] = {
"condition", NULL };
/* OpenMP 5.0:
trait-selector:
trait-selector-name[([trait-score:]trait-property[,trait-property[,...]])]
trait-score:
score(score-expression) */
static tree
c_parser_omp_context_selector (c_parser *parser, tree set, tree parms)
{
tree ret = NULL_TREE;
do
{
tree selector;
if (c_parser_next_token_is (parser, CPP_KEYWORD)
|| c_parser_next_token_is (parser, CPP_NAME))
selector = c_parser_peek_token (parser)->value;
else
{
c_parser_error (parser, "expected trait selector name");
return error_mark_node;
}
tree properties = NULL_TREE;
const char *const *selectors = NULL;
bool allow_score = true;
bool allow_user = false;
int property_limit = 0;
enum { CTX_PROPERTY_NONE, CTX_PROPERTY_USER, CTX_PROPERTY_NAME_LIST,
CTX_PROPERTY_ID, CTX_PROPERTY_EXPR,
CTX_PROPERTY_SIMD } property_kind = CTX_PROPERTY_NONE;
switch (IDENTIFIER_POINTER (set)[0])
{
case 'c': /* construct */
selectors = omp_construct_selectors;
allow_score = false;
property_limit = 1;
property_kind = CTX_PROPERTY_SIMD;
break;
case 'd': /* device */
selectors = omp_device_selectors;
allow_score = false;
allow_user = true;
property_limit = 3;
property_kind = CTX_PROPERTY_NAME_LIST;
break;
case 'i': /* implementation */
selectors = omp_implementation_selectors;
allow_user = true;
property_limit = 3;
property_kind = CTX_PROPERTY_NAME_LIST;
break;
case 'u': /* user */
selectors = omp_user_selectors;
property_limit = 1;
property_kind = CTX_PROPERTY_EXPR;
break;
default:
gcc_unreachable ();
}
for (int i = 0; ; i++)
{
if (selectors[i] == NULL)
{
if (allow_user)
{
property_kind = CTX_PROPERTY_USER;
break;
}
else
{
error_at (c_parser_peek_token (parser)->location,
"selector %qs not allowed for context selector "
"set %qs", IDENTIFIER_POINTER (selector),
IDENTIFIER_POINTER (set));
c_parser_consume_token (parser);
return error_mark_node;
}
}
if (i == property_limit)
property_kind = CTX_PROPERTY_NONE;
if (strcmp (selectors[i], IDENTIFIER_POINTER (selector)) == 0)
break;
}
if (property_kind == CTX_PROPERTY_NAME_LIST
&& IDENTIFIER_POINTER (set)[0] == 'i'
&& strcmp (IDENTIFIER_POINTER (selector),
"atomic_default_mem_order") == 0)
property_kind = CTX_PROPERTY_ID;
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_OPEN_PAREN))
{
if (property_kind == CTX_PROPERTY_NONE)
{
error_at (c_parser_peek_token (parser)->location,
"selector %qs does not accept any properties",
IDENTIFIER_POINTER (selector));
return error_mark_node;
}
matching_parens parens;
parens.require_open (parser);
c_token *token = c_parser_peek_token (parser);
if (allow_score
&& c_parser_next_token_is (parser, CPP_NAME)
&& strcmp (IDENTIFIER_POINTER (token->value), "score") == 0
&& c_parser_peek_2nd_token (parser)->type == CPP_OPEN_PAREN)
{
c_parser_consume_token (parser);
matching_parens parens2;
parens2.require_open (parser);
tree score = c_parser_expr_no_commas (parser, NULL).value;
parens2.skip_until_found_close (parser);
c_parser_require (parser, CPP_COLON, "expected %<:%>");
if (score != error_mark_node)
{
mark_exp_read (score);
score = c_fully_fold (score, false, NULL);
if (!INTEGRAL_TYPE_P (TREE_TYPE (score))
|| TREE_CODE (score) != INTEGER_CST)
error_at (token->location, "score argument must be "
"constant integer expression");
else if (tree_int_cst_sgn (score) < 0)
error_at (token->location, "score argument must be "
"non-negative");
else
properties = tree_cons (get_identifier (" score"),
score, properties);
}
token = c_parser_peek_token (parser);
}
switch (property_kind)
{
tree t;
case CTX_PROPERTY_USER:
do
{
t = c_parser_expr_no_commas (parser, NULL).value;
if (TREE_CODE (t) == STRING_CST)
properties = tree_cons (NULL_TREE, t, properties);
else if (t != error_mark_node)
{
mark_exp_read (t);
t = c_fully_fold (t, false, NULL);
if (!INTEGRAL_TYPE_P (TREE_TYPE (t))
|| !tree_fits_shwi_p (t))
error_at (token->location, "property must be "
"constant integer expression or string "
"literal");
else
properties = tree_cons (NULL_TREE, t, properties);
}
else
return error_mark_node;
if (c_parser_next_token_is (parser, CPP_COMMA))
c_parser_consume_token (parser);
else
break;
}
while (1);
break;
case CTX_PROPERTY_ID:
if (c_parser_next_token_is (parser, CPP_KEYWORD)
|| c_parser_next_token_is (parser, CPP_NAME))
{
tree prop = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
properties = tree_cons (prop, NULL_TREE, properties);
}
else
{
c_parser_error (parser, "expected identifier");
return error_mark_node;
}
break;
case CTX_PROPERTY_NAME_LIST:
do
{
tree prop = NULL_TREE, value = NULL_TREE;
if (c_parser_next_token_is (parser, CPP_KEYWORD)
|| c_parser_next_token_is (parser, CPP_NAME))
{
prop = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
}
else if (c_parser_next_token_is (parser, CPP_STRING))
value = c_parser_string_literal (parser, false,
false).value;
else
{
c_parser_error (parser, "expected identifier or "
"string literal");
return error_mark_node;
}
properties = tree_cons (prop, value, properties);
if (c_parser_next_token_is (parser, CPP_COMMA))
c_parser_consume_token (parser);
else
break;
}
while (1);
break;
case CTX_PROPERTY_EXPR:
t = c_parser_expr_no_commas (parser, NULL).value;
if (t != error_mark_node)
{
mark_exp_read (t);
t = c_fully_fold (t, false, NULL);
if (!INTEGRAL_TYPE_P (TREE_TYPE (t))
|| !tree_fits_shwi_p (t))
error_at (token->location, "property must be "
"constant integer expression");
else
properties = tree_cons (NULL_TREE, t, properties);
}
else
return error_mark_node;
break;
case CTX_PROPERTY_SIMD:
if (parms == NULL_TREE)
{
error_at (token->location, "properties for %<simd%> "
"selector may not be specified in "
"%<metadirective%>");
return error_mark_node;
}
tree c;
c = c_parser_omp_all_clauses (parser,
OMP_DECLARE_SIMD_CLAUSE_MASK,
"simd", true, 2);
c = c_omp_declare_simd_clauses_to_numbers (parms
== error_mark_node
? NULL_TREE : parms,
c);
properties = c;
break;
default:
gcc_unreachable ();
}
parens.skip_until_found_close (parser);
properties = nreverse (properties);
}
else if (property_kind == CTX_PROPERTY_NAME_LIST
|| property_kind == CTX_PROPERTY_ID
|| property_kind == CTX_PROPERTY_EXPR)
{
c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>");
return error_mark_node;
}
ret = tree_cons (selector, properties, ret);
if (c_parser_next_token_is (parser, CPP_COMMA))
c_parser_consume_token (parser);
else
break;
}
while (1);
return nreverse (ret);
}
/* OpenMP 5.0:
trait-set-selector[,trait-set-selector[,...]]
trait-set-selector:
trait-set-selector-name = { trait-selector[, trait-selector[, ...]] }
trait-set-selector-name:
constructor
device
implementation
user */
static tree
c_parser_omp_context_selector_specification (c_parser *parser, tree parms)
{
tree ret = NULL_TREE;
do
{
const char *setp = "";
if (c_parser_next_token_is (parser, CPP_NAME))
setp = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value);
switch (setp[0])
{
case 'c':
if (strcmp (setp, "construct") == 0)
setp = NULL;
break;
case 'd':
if (strcmp (setp, "device") == 0)
setp = NULL;
break;
case 'i':
if (strcmp (setp, "implementation") == 0)
setp = NULL;
break;
case 'u':
if (strcmp (setp, "user") == 0)
setp = NULL;
break;
default:
break;
}
if (setp)
{
c_parser_error (parser, "expected %<construct%>, %<device%>, "
"%<implementation%> or %<user%>");
return error_mark_node;
}
tree set = c_parser_peek_token (parser)->value;
c_parser_consume_token (parser);
if (!c_parser_require (parser, CPP_EQ, "expected %<=%>"))
return error_mark_node;
matching_braces braces;
if (!braces.require_open (parser))
return error_mark_node;
tree selectors = c_parser_omp_context_selector (parser, set, parms);
if (selectors == error_mark_node)
ret = error_mark_node;
else if (ret != error_mark_node)
ret = tree_cons (set, selectors, ret);
braces.skip_until_found_close (parser);
if (c_parser_next_token_is (parser, CPP_COMMA))
c_parser_consume_token (parser);
else
break;
}
while (1);
if (ret == error_mark_node)
return ret;
return nreverse (ret);
}
/* Finalize #pragma omp declare variant after FNDECL has been parsed, and put
that into "omp declare variant base" attribute. */
static void
c_finish_omp_declare_variant (c_parser *parser, tree fndecl, tree parms)
{
matching_parens parens;
if (!parens.require_open (parser))
{
fail:
c_parser_skip_to_pragma_eol (parser, false);
return;
}
if (c_parser_next_token_is_not (parser, CPP_NAME)
|| c_parser_peek_token (parser)->id_kind != C_ID_ID)
{
c_parser_error (parser, "expected identifier");
goto fail;
}
c_token *token = c_parser_peek_token (parser);
tree variant = lookup_name (token->value);
if (variant == NULL_TREE)
{
undeclared_variable (token->location, token->value);
variant = error_mark_node;
}
c_parser_consume_token (parser);
parens.require_close (parser);
const char *clause = "";
location_t match_loc = c_parser_peek_token (parser)->location;
if (c_parser_next_token_is (parser, CPP_NAME))
clause = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value);
if (strcmp (clause, "match"))
{
c_parser_error (parser, "expected %<match%>");
goto fail;
}
c_parser_consume_token (parser);
if (!parens.require_open (parser))
goto fail;
if (parms == NULL_TREE)
parms = error_mark_node;
tree ctx = c_parser_omp_context_selector_specification (parser, parms);
if (ctx == error_mark_node)
goto fail;
ctx = c_omp_check_context_selector (match_loc, ctx);
if (ctx != error_mark_node && variant != error_mark_node)
{
if (TREE_CODE (variant) != FUNCTION_DECL)
{
error_at (token->location, "variant %qD is not a function", variant);
variant = error_mark_node;
}
else if (omp_get_context_selector (ctx, "construct", "simd") == NULL_TREE
&& !comptypes (TREE_TYPE (fndecl), TREE_TYPE (variant)))
{
error_at (token->location, "variant %qD and base %qD have "
"incompatible types", variant, fndecl);
variant = error_mark_node;
}
else if (fndecl_built_in_p (variant)
&& (strncmp (IDENTIFIER_POINTER (DECL_NAME (variant)),
"__builtin_", strlen ("__builtin_")) == 0
|| strncmp (IDENTIFIER_POINTER (DECL_NAME (variant)),
"__sync_", strlen ("__sync_")) == 0
|| strncmp (IDENTIFIER_POINTER (DECL_NAME (variant)),
"__atomic_", strlen ("__atomic_")) == 0))
{
error_at (token->location, "variant %qD is a built-in", variant);
variant = error_mark_node;
}
if (variant != error_mark_node)
{
C_DECL_USED (variant) = 1;
tree construct = omp_get_context_selector (ctx, "construct", NULL);
c_omp_mark_declare_variant (match_loc, variant, construct);
if (omp_context_selector_matches (ctx))
{
tree attr
= tree_cons (get_identifier ("omp declare variant base"),
build_tree_list (variant, ctx),
DECL_ATTRIBUTES (fndecl));
DECL_ATTRIBUTES (fndecl) = attr;
}
}
}
parens.require_close (parser);
c_parser_skip_to_pragma_eol (parser);
}
/* Finalize #pragma omp declare simd or #pragma omp declare variant
clauses after FNDECL has been parsed, and put that into "omp declare simd"
or "omp declare variant base" attribute. */
static void
c_finish_omp_declare_simd (c_parser *parser, tree fndecl, tree parms,
vec<c_token> clauses)
{
/* Normally first token is CPP_NAME "simd" or "variant". CPP_EOF there
indicates error has been reported and CPP_PRAGMA that
c_finish_omp_declare_simd has already processed the tokens. */
if (clauses.exists () && clauses[0].type == CPP_EOF)
return;
const char *kind = "simd";
if (clauses.exists ()
&& (clauses[0].type == CPP_NAME || clauses[0].type == CPP_PRAGMA))
kind = IDENTIFIER_POINTER (clauses[0].value);
gcc_assert (strcmp (kind, "simd") == 0 || strcmp (kind, "variant") == 0);
if (fndecl == NULL_TREE || TREE_CODE (fndecl) != FUNCTION_DECL)
{
error ("%<#pragma omp declare %s%> not immediately followed by "
"a function declaration or definition", kind);
clauses[0].type = CPP_EOF;
return;
}
if (clauses.exists () && clauses[0].type != CPP_NAME)
{
error_at (DECL_SOURCE_LOCATION (fndecl),
"%<#pragma omp declare %s%> not immediately followed by "
"a single function declaration or definition", kind);
clauses[0].type = CPP_EOF;
return;
}
if (parms == NULL_TREE)
parms = DECL_ARGUMENTS (fndecl);
unsigned int tokens_avail = parser->tokens_avail;
gcc_assert (parser->tokens == &parser->tokens_buf[0]);
parser->tokens = clauses.address ();
parser->tokens_avail = clauses.length ();
/* c_parser_omp_declare_simd pushed 2 extra CPP_EOF tokens at the end. */
while (parser->tokens_avail > 3)
{
c_token *token = c_parser_peek_token (parser);
gcc_assert (token->type == CPP_NAME
&& strcmp (IDENTIFIER_POINTER (token->value), kind) == 0);
c_parser_consume_token (parser);
parser->in_pragma = true;
if (strcmp (kind, "simd") == 0)
{
tree c;
c = c_parser_omp_all_clauses (parser, OMP_DECLARE_SIMD_CLAUSE_MASK,
"#pragma omp declare simd");
c = c_omp_declare_simd_clauses_to_numbers (parms, c);
if (c != NULL_TREE)
c = tree_cons (NULL_TREE, c, NULL_TREE);
c = build_tree_list (get_identifier ("omp declare simd"), c);
TREE_CHAIN (c) = DECL_ATTRIBUTES (fndecl);
DECL_ATTRIBUTES (fndecl) = c;
}
else
{
gcc_assert (strcmp (kind, "variant") == 0);
c_finish_omp_declare_variant (parser, fndecl, parms);
}
}
parser->tokens = &parser->tokens_buf[0];
parser->tokens_avail = tokens_avail;
if (clauses.exists ())
clauses[0].type = CPP_PRAGMA;
}
/* OpenMP 4.0:
# pragma omp declare target new-line
declarations and definitions
# pragma omp end declare target new-line
OpenMP 4.5:
# pragma omp declare target ( extended-list ) new-line
# pragma omp declare target declare-target-clauses[seq] new-line */
#define OMP_DECLARE_TARGET_CLAUSE_MASK \
( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_TO) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_LINK) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DEVICE_TYPE))
static void
c_parser_omp_declare_target (c_parser *parser)
{
tree clauses = NULL_TREE;
int device_type = 0;
bool only_device_type = true;
if (c_parser_next_token_is (parser, CPP_NAME))
clauses = c_parser_omp_all_clauses (parser, OMP_DECLARE_TARGET_CLAUSE_MASK,
"#pragma omp declare target");
else if (c_parser_next_token_is (parser, CPP_OPEN_PAREN))
{
clauses = c_parser_omp_var_list_parens (parser, OMP_CLAUSE_TO_DECLARE,
clauses);
clauses = c_finish_omp_clauses (clauses, C_ORT_OMP);
c_parser_skip_to_pragma_eol (parser);
}
else
{
c_parser_skip_to_pragma_eol (parser);
current_omp_declare_target_attribute++;
return;
}
for (tree c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEVICE_TYPE)
device_type |= OMP_CLAUSE_DEVICE_TYPE_KIND (c);
for (tree c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
{
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEVICE_TYPE)
continue;
tree t = OMP_CLAUSE_DECL (c), id;
tree at1 = lookup_attribute ("omp declare target", DECL_ATTRIBUTES (t));
tree at2 = lookup_attribute ("omp declare target link",
DECL_ATTRIBUTES (t));
only_device_type = false;
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINK)
{
id = get_identifier ("omp declare target link");
std::swap (at1, at2);
}
else
id = get_identifier ("omp declare target");
if (at2)
{
error_at (OMP_CLAUSE_LOCATION (c),
"%qD specified both in declare target %<link%> and %<to%>"
" clauses", t);
continue;
}
if (!at1)
{
DECL_ATTRIBUTES (t) = tree_cons (id, NULL_TREE, DECL_ATTRIBUTES (t));
if (TREE_CODE (t) != FUNCTION_DECL && !is_global_var (t))
continue;
symtab_node *node = symtab_node::get (t);
if (node != NULL)
{
node->offloadable = 1;
if (ENABLE_OFFLOADING)
{
g->have_offload = true;
if (is_a <varpool_node *> (node))
vec_safe_push (offload_vars, t);
}
}
}
if (TREE_CODE (t) != FUNCTION_DECL)
continue;
if ((device_type & OMP_CLAUSE_DEVICE_TYPE_HOST) != 0)
{
tree at3 = lookup_attribute ("omp declare target host",
DECL_ATTRIBUTES (t));
if (at3 == NULL_TREE)
{
id = get_identifier ("omp declare target host");
DECL_ATTRIBUTES (t)
= tree_cons (id, NULL_TREE, DECL_ATTRIBUTES (t));
}
}
if ((device_type & OMP_CLAUSE_DEVICE_TYPE_NOHOST) != 0)
{
tree at3 = lookup_attribute ("omp declare target nohost",
DECL_ATTRIBUTES (t));
if (at3 == NULL_TREE)
{
id = get_identifier ("omp declare target nohost");
DECL_ATTRIBUTES (t)
= tree_cons (id, NULL_TREE, DECL_ATTRIBUTES (t));
}
}
}
if (device_type && only_device_type)
warning_at (OMP_CLAUSE_LOCATION (clauses), 0,
"directive with only %<device_type%> clauses ignored");
}
static void
c_parser_omp_end_declare_target (c_parser *parser)
{
location_t loc = c_parser_peek_token (parser)->location;
c_parser_consume_pragma (parser);
if (c_parser_next_token_is (parser, CPP_NAME)
&& strcmp (IDENTIFIER_POINTER (c_parser_peek_token (parser)->value),
"declare") == 0)
{
c_parser_consume_token (parser);
if (c_parser_next_token_is (parser, CPP_NAME)
&& strcmp (IDENTIFIER_POINTER (c_parser_peek_token (parser)->value),
"target") == 0)
c_parser_consume_token (parser);
else
{
c_parser_error (parser, "expected %<target%>");
c_parser_skip_to_pragma_eol (parser);
return;
}
}
else
{
c_parser_error (parser, "expected %<declare%>");
c_parser_skip_to_pragma_eol (parser);
return;
}
c_parser_skip_to_pragma_eol (parser);
if (!current_omp_declare_target_attribute)
error_at (loc, "%<#pragma omp end declare target%> without corresponding "
"%<#pragma omp declare target%>");
else
current_omp_declare_target_attribute--;
}
/* OpenMP 4.0
#pragma omp declare reduction (reduction-id : typename-list : expression) \
initializer-clause[opt] new-line
initializer-clause:
initializer (omp_priv = initializer)
initializer (function-name (argument-list)) */
static void
c_parser_omp_declare_reduction (c_parser *parser, enum pragma_context context)
{
unsigned int tokens_avail = 0, i;
vec<tree> types = vNULL;
vec<c_token> clauses = vNULL;
enum tree_code reduc_code = ERROR_MARK;
tree reduc_id = NULL_TREE;
tree type;
location_t rloc = c_parser_peek_token (parser)->location;
if (context == pragma_struct || context == pragma_param)
{
error ("%<#pragma omp declare reduction%> not at file or block scope");
goto fail;
}
if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>"))
goto fail;
switch (c_parser_peek_token (parser)->type)
{
case CPP_PLUS:
reduc_code = PLUS_EXPR;
break;
case CPP_MULT:
reduc_code = MULT_EXPR;
break;
case CPP_MINUS:
reduc_code = MINUS_EXPR;
break;
case CPP_AND:
reduc_code = BIT_AND_EXPR;
break;
case CPP_XOR:
reduc_code = BIT_XOR_EXPR;
break;
case CPP_OR:
reduc_code = BIT_IOR_EXPR;
break;
case CPP_AND_AND:
reduc_code = TRUTH_ANDIF_EXPR;
break;
case CPP_OR_OR:
reduc_code = TRUTH_ORIF_EXPR;
break;
case CPP_NAME:
const char *p;
p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value);
if (strcmp (p, "min") == 0)
{
reduc_code = MIN_EXPR;
break;
}
if (strcmp (p, "max") == 0)
{
reduc_code = MAX_EXPR;
break;
}
reduc_id = c_parser_peek_token (parser)->value;
break;
default:
c_parser_error (parser,
"expected %<+%>, %<*%>, %<-%>, %<&%>, "
"%<^%>, %<|%>, %<&&%>, %<||%> or identifier");
goto fail;
}
tree orig_reduc_id, reduc_decl;
orig_reduc_id = reduc_id;
reduc_id = c_omp_reduction_id (reduc_code, reduc_id);
reduc_decl = c_omp_reduction_decl (reduc_id);
c_parser_consume_token (parser);
if (!c_parser_require (parser, CPP_COLON, "expected %<:%>"))
goto fail;
while (true)
{
location_t loc = c_parser_peek_token (parser)->location;
struct c_type_name *ctype = c_parser_type_name (parser);
if (ctype != NULL)
{
type = groktypename (ctype, NULL, NULL);
if (type == error_mark_node)
;
else if ((INTEGRAL_TYPE_P (type)
|| TREE_CODE (type) == REAL_TYPE
|| TREE_CODE (type) == COMPLEX_TYPE)
&& orig_reduc_id == NULL_TREE)
error_at (loc, "predeclared arithmetic type in "
"%<#pragma omp declare reduction%>");
else if (TREE_CODE (type) == FUNCTION_TYPE
|| TREE_CODE (type) == ARRAY_TYPE)
error_at (loc, "function or array type in "
"%<#pragma omp declare reduction%>");
else if (TYPE_ATOMIC (type))
error_at (loc, "%<_Atomic%> qualified type in "
"%<#pragma omp declare reduction%>");
else if (TYPE_QUALS_NO_ADDR_SPACE (type))
error_at (loc, "const, volatile or restrict qualified type in "
"%<#pragma omp declare reduction%>");
else
{
tree t;
for (t = DECL_INITIAL (reduc_decl); t; t = TREE_CHAIN (t))
if (comptypes (TREE_PURPOSE (t), type))
{
error_at (loc, "redeclaration of %qs "
"%<#pragma omp declare reduction%> for "
"type %qT",
IDENTIFIER_POINTER (reduc_id)
+ sizeof ("omp declare reduction ") - 1,
type);
location_t ploc
= DECL_SOURCE_LOCATION (TREE_VEC_ELT (TREE_VALUE (t),
0));
error_at (ploc, "previous %<#pragma omp declare "
"reduction%>");
break;
}
if (t == NULL_TREE)
types.safe_push (type);
}
if (c_parser_next_token_is (parser, CPP_COMMA))
c_parser_consume_token (parser);
else
break;
}
else
break;
}
if (!c_parser_require (parser, CPP_COLON, "expected %<:%>")
|| types.is_empty ())
{
fail:
clauses.release ();
types.release ();
while (true)
{
c_token *token = c_parser_peek_token (parser);
if (token->type == CPP_EOF || token->type == CPP_PRAGMA_EOL)
break;
c_parser_consume_token (parser);
}
c_parser_skip_to_pragma_eol (parser);
return;
}
if (types.length () > 1)
{
while (c_parser_next_token_is_not (parser, CPP_PRAGMA_EOL))
{
c_token *token = c_parser_peek_token (parser);
if (token->type == CPP_EOF)
goto fail;
clauses.safe_push (*token);
c_parser_consume_token (parser);
}
clauses.safe_push (*c_parser_peek_token (parser));
c_parser_skip_to_pragma_eol (parser);
/* Make sure nothing tries to read past the end of the tokens. */
c_token eof_token;
memset (&eof_token, 0, sizeof (eof_token));
eof_token.type = CPP_EOF;
clauses.safe_push (eof_token);
clauses.safe_push (eof_token);
}
int errs = errorcount;
FOR_EACH_VEC_ELT (types, i, type)
{
tokens_avail = parser->tokens_avail;
gcc_assert (parser->tokens == &parser->tokens_buf[0]);
if (!clauses.is_empty ())
{
parser->tokens = clauses.address ();
parser->tokens_avail = clauses.length ();
parser->in_pragma = true;
}
bool nested = current_function_decl != NULL_TREE;
if (nested)
c_push_function_context ();
tree fndecl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
reduc_id, default_function_type);
current_function_decl = fndecl;
allocate_struct_function (fndecl, true);
push_scope ();
tree stmt = push_stmt_list ();
/* Intentionally BUILTINS_LOCATION, so that -Wshadow doesn't
warn about these. */
tree omp_out = build_decl (BUILTINS_LOCATION, VAR_DECL,
get_identifier ("omp_out"), type);
DECL_ARTIFICIAL (omp_out) = 1;
DECL_CONTEXT (omp_out) = fndecl;
pushdecl (omp_out);
tree omp_in = build_decl (BUILTINS_LOCATION, VAR_DECL,
get_identifier ("omp_in"), type);
DECL_ARTIFICIAL (omp_in) = 1;
DECL_CONTEXT (omp_in) = fndecl;
pushdecl (omp_in);
struct c_expr combiner = c_parser_expression (parser);
struct c_expr initializer;
tree omp_priv = NULL_TREE, omp_orig = NULL_TREE;
bool bad = false;
initializer.set_error ();
if (!c_parser_require (parser, CPP_CLOSE_PAREN, "expected %<)%>"))
bad = true;
else if (c_parser_next_token_is (parser, CPP_NAME)
&& strcmp (IDENTIFIER_POINTER
(c_parser_peek_token (parser)->value),
"initializer") == 0)
{
c_parser_consume_token (parser);
pop_scope ();
push_scope ();
omp_priv = build_decl (BUILTINS_LOCATION, VAR_DECL,
get_identifier ("omp_priv"), type);
DECL_ARTIFICIAL (omp_priv) = 1;
DECL_INITIAL (omp_priv) = error_mark_node;
DECL_CONTEXT (omp_priv) = fndecl;
pushdecl (omp_priv);
omp_orig = build_decl (BUILTINS_LOCATION, VAR_DECL,
get_identifier ("omp_orig"), type);
DECL_ARTIFICIAL (omp_orig) = 1;
DECL_CONTEXT (omp_orig) = fndecl;
pushdecl (omp_orig);
if (!c_parser_require (parser, CPP_OPEN_PAREN, "expected %<(%>"))
bad = true;
else if (!c_parser_next_token_is (parser, CPP_NAME))
{
c_parser_error (parser, "expected %<omp_priv%> or "
"function-name");
bad = true;
}
else if (strcmp (IDENTIFIER_POINTER
(c_parser_peek_token (parser)->value),
"omp_priv") != 0)
{
if (c_parser_peek_2nd_token (parser)->type != CPP_OPEN_PAREN
|| c_parser_peek_token (parser)->id_kind != C_ID_ID)
{
c_parser_error (parser, "expected function-name %<(%>");
bad = true;
}
else
initializer = c_parser_postfix_expression (parser);
if (initializer.value
&& TREE_CODE (initializer.value) == CALL_EXPR)
{
int j;
tree c = initializer.value;
for (j = 0; j < call_expr_nargs (c); j++)
{
tree a = CALL_EXPR_ARG (c, j);
STRIP_NOPS (a);
if (TREE_CODE (a) == ADDR_EXPR
&& TREE_OPERAND (a, 0) == omp_priv)
break;
}
if (j == call_expr_nargs (c))
error ("one of the initializer call arguments should be "
"%<&omp_priv%>");
}
}
else
{
c_parser_consume_token (parser);
if (!c_parser_require (parser, CPP_EQ, "expected %<=%>"))
bad = true;
else
{
tree st = push_stmt_list ();
location_t loc = c_parser_peek_token (parser)->location;
rich_location richloc (line_table, loc);
start_init (omp_priv, NULL_TREE, 0, &richloc);
struct c_expr init = c_parser_initializer (parser);
finish_init ();
finish_decl (omp_priv, loc, init.value,
init.original_type, NULL_TREE);
pop_stmt_list (st);
}
}
if (!bad
&& !c_parser_require (parser, CPP_CLOSE_PAREN, "expected %<)%>"))
bad = true;
}
if (!bad)
{
c_parser_skip_to_pragma_eol (parser);
tree t = tree_cons (type, make_tree_vec (omp_priv ? 6 : 3),
DECL_INITIAL (reduc_decl));
DECL_INITIAL (reduc_decl) = t;
DECL_SOURCE_LOCATION (omp_out) = rloc;
TREE_VEC_ELT (TREE_VALUE (t), 0) = omp_out;
TREE_VEC_ELT (TREE_VALUE (t), 1) = omp_in;
TREE_VEC_ELT (TREE_VALUE (t), 2) = combiner.value;
walk_tree (&combiner.value, c_check_omp_declare_reduction_r,
&TREE_VEC_ELT (TREE_VALUE (t), 0), NULL);
if (omp_priv)
{
DECL_SOURCE_LOCATION (omp_priv) = rloc;
TREE_VEC_ELT (TREE_VALUE (t), 3) = omp_priv;
TREE_VEC_ELT (TREE_VALUE (t), 4) = omp_orig;
TREE_VEC_ELT (TREE_VALUE (t), 5) = initializer.value;
walk_tree (&initializer.value, c_check_omp_declare_reduction_r,
&TREE_VEC_ELT (TREE_VALUE (t), 3), NULL);
walk_tree (&DECL_INITIAL (omp_priv),
c_check_omp_declare_reduction_r,
&TREE_VEC_ELT (TREE_VALUE (t), 3), NULL);
}
}
pop_stmt_list (stmt);
pop_scope ();
if (cfun->language != NULL)
{
ggc_free (cfun->language);
cfun->language = NULL;
}
set_cfun (NULL);
current_function_decl = NULL_TREE;
if (nested)
c_pop_function_context ();
if (!clauses.is_empty ())
{
parser->tokens = &parser->tokens_buf[0];
parser->tokens_avail = tokens_avail;
}
if (bad)
goto fail;
if (errs != errorcount)
break;
}
clauses.release ();
types.release ();
}
/* OpenMP 4.0
#pragma omp declare simd declare-simd-clauses[optseq] new-line
#pragma omp declare reduction (reduction-id : typename-list : expression) \
initializer-clause[opt] new-line
#pragma omp declare target new-line
OpenMP 5.0
#pragma omp declare variant (identifier) match (context-selector) */
static void
c_parser_omp_declare (c_parser *parser, enum pragma_context context)
{
c_parser_consume_pragma (parser);
if (c_parser_next_token_is (parser, CPP_NAME))
{
const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value);
if (strcmp (p, "simd") == 0)
{
/* c_parser_consume_token (parser); done in
c_parser_omp_declare_simd. */
c_parser_omp_declare_simd (parser, context);
return;
}
if (strcmp (p, "reduction") == 0)
{
c_parser_consume_token (parser);
c_parser_omp_declare_reduction (parser, context);
return;
}
if (!flag_openmp) /* flag_openmp_simd */
{
c_parser_skip_to_pragma_eol (parser, false);
return;
}
if (strcmp (p, "target") == 0)
{
c_parser_consume_token (parser);
c_parser_omp_declare_target (parser);
return;
}
if (strcmp (p, "variant") == 0)
{
/* c_parser_consume_token (parser); done in
c_parser_omp_declare_simd. */
c_parser_omp_declare_simd (parser, context);
return;
}
}
c_parser_error (parser, "expected %<simd%>, %<reduction%>, "
"%<target%> or %<variant%>");
c_parser_skip_to_pragma_eol (parser);
}
/* OpenMP 5.0
#pragma omp requires clauses[optseq] new-line */
static void
c_parser_omp_requires (c_parser *parser)
{
bool first = true;
enum omp_requires new_req = (enum omp_requires) 0;
c_parser_consume_pragma (parser);
location_t loc = c_parser_peek_token (parser)->location;
while (c_parser_next_token_is_not (parser, CPP_PRAGMA_EOL))
{
if (!first && c_parser_next_token_is (parser, CPP_COMMA))
c_parser_consume_token (parser);
first = false;
if (c_parser_next_token_is (parser, CPP_NAME))
{
const char *p
= IDENTIFIER_POINTER (c_parser_peek_token (parser)->value);
location_t cloc = c_parser_peek_token (parser)->location;
enum omp_requires this_req = (enum omp_requires) 0;
if (!strcmp (p, "unified_address"))
this_req = OMP_REQUIRES_UNIFIED_ADDRESS;
else if (!strcmp (p, "unified_shared_memory"))
this_req = OMP_REQUIRES_UNIFIED_SHARED_MEMORY;
else if (!strcmp (p, "dynamic_allocators"))
this_req = OMP_REQUIRES_DYNAMIC_ALLOCATORS;
else if (!strcmp (p, "reverse_offload"))
this_req = OMP_REQUIRES_REVERSE_OFFLOAD;
else if (!strcmp (p, "atomic_default_mem_order"))
{
c_parser_consume_token (parser);
matching_parens parens;
if (parens.require_open (parser))
{
if (c_parser_next_token_is (parser, CPP_NAME))
{
tree v = c_parser_peek_token (parser)->value;
p = IDENTIFIER_POINTER (v);
if (!strcmp (p, "seq_cst"))
this_req
= (enum omp_requires) OMP_MEMORY_ORDER_SEQ_CST;
else if (!strcmp (p, "relaxed"))
this_req
= (enum omp_requires) OMP_MEMORY_ORDER_RELAXED;
else if (!strcmp (p, "acq_rel"))
this_req
= (enum omp_requires) OMP_MEMORY_ORDER_ACQ_REL;
}
if (this_req == 0)
{
error_at (c_parser_peek_token (parser)->location,
"expected %<seq_cst%>, %<relaxed%> or "
"%<acq_rel%>");
if (c_parser_peek_2nd_token (parser)->type
== CPP_CLOSE_PAREN)
c_parser_consume_token (parser);
}
else
c_parser_consume_token (parser);
parens.skip_until_found_close (parser);
if (this_req == 0)
{
c_parser_skip_to_pragma_eol (parser, false);
return;
}
}
p = NULL;
}
else
{
error_at (cloc, "expected %<unified_address%>, "
"%<unified_shared_memory%>, "
"%<dynamic_allocators%>, "
"%<reverse_offload%> "
"or %<atomic_default_mem_order%> clause");
c_parser_skip_to_pragma_eol (parser, false);
return;
}
if (p)
sorry_at (cloc, "%qs clause on %<requires%> directive not "
"supported yet", p);
if (p)
c_parser_consume_token (parser);
if (this_req)
{
if ((this_req & ~OMP_REQUIRES_ATOMIC_DEFAULT_MEM_ORDER) != 0)
{
if ((this_req & new_req) != 0)
error_at (cloc, "too many %qs clauses", p);
if (this_req != OMP_REQUIRES_DYNAMIC_ALLOCATORS
&& (omp_requires_mask & OMP_REQUIRES_TARGET_USED) != 0)
error_at (cloc, "%qs clause used lexically after first "
"target construct or offloading API", p);
}
else if ((new_req & OMP_REQUIRES_ATOMIC_DEFAULT_MEM_ORDER) != 0)
{
error_at (cloc, "too many %qs clauses",
"atomic_default_mem_order");
this_req = (enum omp_requires) 0;
}
else if ((omp_requires_mask
& OMP_REQUIRES_ATOMIC_DEFAULT_MEM_ORDER) != 0)
{
error_at (cloc, "more than one %<atomic_default_mem_order%>"
" clause in a single compilation unit");
this_req
= (enum omp_requires)
(omp_requires_mask
& OMP_REQUIRES_ATOMIC_DEFAULT_MEM_ORDER);
}
else if ((omp_requires_mask
& OMP_REQUIRES_ATOMIC_DEFAULT_MEM_ORDER_USED) != 0)
error_at (cloc, "%<atomic_default_mem_order%> clause used "
"lexically after first %<atomic%> construct "
"without memory order clause");
new_req = (enum omp_requires) (new_req | this_req);
omp_requires_mask
= (enum omp_requires) (omp_requires_mask | this_req);
continue;
}
}
break;
}
c_parser_skip_to_pragma_eol (parser);
if (new_req == 0)
error_at (loc, "%<pragma omp requires%> requires at least one clause");
}
/* Helper function for c_parser_omp_taskloop.
Disallow zero sized or potentially zero sized task reductions. */
static tree
c_finish_taskloop_clauses (tree clauses)
{
tree *pc = &clauses;
for (tree c = clauses; c; c = *pc)
{
bool remove = false;
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION)
{
tree type = strip_array_types (TREE_TYPE (OMP_CLAUSE_DECL (c)));
if (integer_zerop (TYPE_SIZE_UNIT (type)))
{
error_at (OMP_CLAUSE_LOCATION (c),
"zero sized type %qT in %<reduction%> clause", type);
remove = true;
}
else if (TREE_CODE (TYPE_SIZE_UNIT (type)) != INTEGER_CST)
{
error_at (OMP_CLAUSE_LOCATION (c),
"variable sized type %qT in %<reduction%> clause",
type);
remove = true;
}
}
if (remove)
*pc = OMP_CLAUSE_CHAIN (c);
else
pc = &OMP_CLAUSE_CHAIN (c);
}
return clauses;
}
/* OpenMP 4.5:
#pragma omp taskloop taskloop-clause[optseq] new-line
for-loop
#pragma omp taskloop simd taskloop-simd-clause[optseq] new-line
for-loop */
#define OMP_TASKLOOP_CLAUSE_MASK \
( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SHARED) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_LASTPRIVATE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DEFAULT) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_GRAINSIZE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TASKS) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_COLLAPSE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_UNTIED) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_IF) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_FINAL) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MERGEABLE) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PRIORITY) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_REDUCTION) \
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_IN_REDUCTION))
static tree
c_parser_omp_taskloop (location_t loc, c_parser *parser,
char *p_name, omp_clause_mask mask, tree *cclauses,
bool *if_p)
{
tree clauses, block, ret;
strcat (p_name, " taskloop");
mask |= OMP_TASKLOOP_CLAUSE_MASK;
/* #pragma omp parallel master taskloop{, simd} disallow in_reduction
clause. */
if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0)
mask &= ~(OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_IN_REDUCTION);
if (c_parser_next_token_is (parser, CPP_NAME))
{
const char *p = IDENTIFIER_POINTER (c_parser_peek_token (parser)->value);
if (strcmp (p, "simd") == 0)
{
tree cclauses_buf[C_OMP_CLAUSE_SPLIT_COUNT];
if (cclauses == NULL)
cclauses = cclauses_buf;
c_parser_consume_token (parser);
if (!flag_openmp) /* flag_openmp_simd */
return c_parser_omp_simd (loc, parser, p_name, mask, cclauses,
if_p);
block = c_begin_compound_stmt (true);
ret = c_parser_omp_simd (loc, parser, p_name, mask, cclauses, if_p);
block = c_end_compound_stmt (loc, block, true);
if (ret == NULL)
return ret;
ret = make_node (OMP_TASKLOOP);
TREE_TYPE (ret) = void_type_node;
OMP_FOR_BODY (ret) = block;
OMP_FOR_CLAUSES (ret) = cclauses[C_OMP_CLAUSE_SPLIT_TASKLOOP];
OMP_FOR_CLAUSES (ret)
= c_finish_taskloop_clauses (OMP_FOR_CLAUSES (ret));
SET_EXPR_LOCATION (ret, loc);
add_stmt (ret);
return ret;
}
}
if (!flag_openmp) /* flag_openmp_simd */
{
c_parser_skip_to_pragma_eol (parser, false);
return NULL_TREE;
}
clauses = c_parser_omp_all_clauses (parser, mask, p_name, cclauses == NULL);
if (cclauses)
{
omp_split_clauses (loc, OMP_TASKLOOP, mask, clauses, cclauses);
clauses = cclauses[C_OMP_CLAUSE_SPLIT_TASKLOOP];
}
clauses = c_finish_taskloop_clauses (clauses);
block = c_begin_compound_stmt (true);
ret = c_parser_omp_for_loop (loc, parser, OMP_TASKLOOP, clauses, NULL, if_p);
block = c_end_compound_stmt (loc, block, true);
add_stmt (block);
return ret;
}
/* Main entry point to parsing most OpenMP pragmas. */
static void
c_parser_omp_construct (c_parser *parser, bool *if_p)
{
enum pragma_kind p_kind;
location_t loc;
tree stmt;
char p_name[sizeof "#pragma omp teams distribute parallel for simd"];
omp_clause_mask mask (0);
loc = c_parser_peek_token (parser)->location;
p_kind = c_parser_peek_token (parser)->pragma_kind;
c_parser_consume_pragma (parser);
switch (p_kind)
{
case PRAGMA_OACC_ATOMIC:
c_parser_omp_atomic (loc, parser);
return;
case PRAGMA_OACC_CACHE:
strcpy (p_name, "#pragma acc");
stmt = c_parser_oacc_cache (loc, parser);
break;
case PRAGMA_OACC_DATA:
stmt = c_parser_oacc_data (loc, parser, if_p);
break;
case PRAGMA_OACC_HOST_DATA:
stmt = c_parser_oacc_host_data (loc, parser, if_p);
break;
case PRAGMA_OACC_KERNELS:
case PRAGMA_OACC_PARALLEL:
case PRAGMA_OACC_SERIAL:
strcpy (p_name, "#pragma acc");
stmt = c_parser_oacc_compute (loc, parser, p_kind, p_name, if_p);
break;
case PRAGMA_OACC_LOOP:
strcpy (p_name, "#pragma acc");
stmt = c_parser_oacc_loop (loc, parser, p_name, mask, NULL, if_p);
break;
case PRAGMA_OACC_WAIT:
strcpy (p_name, "#pragma wait");
stmt = c_parser_oacc_wait (loc, parser, p_name);
break;
case PRAGMA_OMP_ATOMIC:
c_parser_omp_atomic (loc, parser);
return;
case PRAGMA_OMP_CRITICAL:
stmt = c_parser_omp_critical (loc, parser, if_p);
break;
case PRAGMA_OMP_DISTRIBUTE:
strcpy (p_name, "#pragma omp");
stmt = c_parser_omp_distribute (loc, parser, p_name, mask, NULL, if_p);
break;
case PRAGMA_OMP_FOR:
strcpy (p_name, "#pragma omp");
stmt = c_parser_omp_for (loc, parser, p_name, mask, NULL, if_p);
break;
case PRAGMA_OMP_LOOP:
strcpy (p_name, "#pragma omp");
stmt = c_parser_omp_loop (loc, parser, p_name, mask, NULL, if_p);
break;
case PRAGMA_OMP_MASTER:
strcpy (p_name, "#pragma omp");
stmt = c_parser_omp_master (loc, parser, p_name, mask, NULL, if_p);
break;
case PRAGMA_OMP_PARALLEL:
strcpy (p_name, "#pragma omp");
stmt = c_parser_omp_parallel (loc, parser, p_name, mask, NULL, if_p);
break;
case PRAGMA_OMP_SECTIONS:
strcpy (p_name, "#pragma omp");
stmt = c_parser_omp_sections (loc, parser, p_name, mask, NULL);
break;
case PRAGMA_OMP_SIMD:
strcpy (p_name, "#pragma omp");
stmt = c_parser_omp_simd (loc, parser, p_name, mask, NULL, if_p);
break;
case PRAGMA_OMP_SINGLE:
stmt = c_parser_omp_single (loc, parser, if_p);
break;
case PRAGMA_OMP_TASK:
stmt = c_parser_omp_task (loc, parser, if_p);
break;
case PRAGMA_OMP_TASKGROUP:
stmt = c_parser_omp_taskgroup (loc, parser, if_p);
break;
case PRAGMA_OMP_TASKLOOP:
strcpy (p_name, "#pragma omp");
stmt = c_parser_omp_taskloop (loc, parser, p_name, mask, NULL, if_p);
break;
case PRAGMA_OMP_TEAMS:
strcpy (p_name, "#pragma omp");
stmt = c_parser_omp_teams (loc, parser, p_name, mask, NULL, if_p);
break;
default:
gcc_unreachable ();
}
if (stmt && stmt != error_mark_node)
gcc_assert (EXPR_LOCATION (stmt) != UNKNOWN_LOCATION);
}
/* OpenMP 2.5:
# pragma omp threadprivate (variable-list) */
static void
c_parser_omp_threadprivate (c_parser *parser)
{
tree vars, t;
location_t loc;
c_parser_consume_pragma (parser);
loc = c_parser_peek_token (parser)->location;
vars = c_parser_omp_var_list_parens (parser, OMP_CLAUSE_ERROR, NULL);
/* Mark every variable in VARS to be assigned thread local storage. */
for (t = vars; t; t = TREE_CHAIN (t))
{
tree v = TREE_PURPOSE (t);
/* FIXME diagnostics: Ideally we should keep individual
locations for all the variables in the var list to make the
following errors more precise. Perhaps
c_parser_omp_var_list_parens() should construct a list of
locations to go along with the var list. */
/* If V had already been marked threadprivate, it doesn't matter
whether it had been used prior to this point. */
if (!VAR_P (v))
error_at (loc, "%qD is not a variable", v);
else if (TREE_USED (v) && !C_DECL_THREADPRIVATE_P (v))
error_at (loc, "%qE declared %<threadprivate%> after first use", v);
else if (! is_global_var (v))
error_at (loc, "automatic variable %qE cannot be %<threadprivate%>", v);
else if (TREE_TYPE (v) == error_mark_node)
;
else if (! COMPLETE_TYPE_P (TREE_TYPE (v)))
error_at (loc, "%<threadprivate%> %qE has incomplete type", v);
else
{
if (! DECL_THREAD_LOCAL_P (v))
{
set_decl_tls_model (v, decl_default_tls_model (v));
/* If rtl has been already set for this var, call
make_decl_rtl once again, so that encode_section_info
has a chance to look at the new decl flags. */
if (DECL_RTL_SET_P (v))
make_decl_rtl (v);
}
C_DECL_THREADPRIVATE_P (v) = 1;
}
}
c_parser_skip_to_pragma_eol (parser);
}
/* Parse a transaction attribute (GCC Extension).
transaction-attribute:
gnu-attributes
attribute-specifier
*/
static tree
c_parser_transaction_attributes (c_parser *parser)
{
if (c_parser_next_token_is_keyword (parser, RID_ATTRIBUTE))
return c_parser_gnu_attributes (parser);
if (!c_parser_next_token_is (parser, CPP_OPEN_SQUARE))
return NULL_TREE;
return c_parser_std_attribute_specifier (parser, true);
}
/* Parse a __transaction_atomic or __transaction_relaxed statement
(GCC Extension).
transaction-statement:
__transaction_atomic transaction-attribute[opt] compound-statement
__transaction_relaxed compound-statement
Note that the only valid attribute is: "outer".
*/
static tree
c_parser_transaction (c_parser *parser, enum rid keyword)
{
unsigned int old_in = parser->in_transaction;
unsigned int this_in = 1, new_in;
location_t loc = c_parser_peek_token (parser)->location;
tree stmt, attrs;
gcc_assert ((keyword == RID_TRANSACTION_ATOMIC
|| keyword == RID_TRANSACTION_RELAXED)
&& c_parser_next_token_is_keyword (parser, keyword));
c_parser_consume_token (parser);
if (keyword == RID_TRANSACTION_RELAXED)
this_in |= TM_STMT_ATTR_RELAXED;
else
{
attrs = c_parser_transaction_attributes (parser);
if (attrs)
this_in |= parse_tm_stmt_attr (attrs, TM_STMT_ATTR_OUTER);
}
/* Keep track if we're in the lexical scope of an outer transaction. */
new_in = this_in | (old_in & TM_STMT_ATTR_OUTER);
parser->in_transaction = new_in;
stmt = c_parser_compound_statement (parser);
parser->in_transaction = old_in;
if (flag_tm)
stmt = c_finish_transaction (loc, stmt, this_in);
else
error_at (loc, (keyword == RID_TRANSACTION_ATOMIC ?
"%<__transaction_atomic%> without transactional memory support enabled"
: "%<__transaction_relaxed %> "
"without transactional memory support enabled"));
return stmt;
}
/* Parse a __transaction_atomic or __transaction_relaxed expression
(GCC Extension).
transaction-expression:
__transaction_atomic ( expression )
__transaction_relaxed ( expression )
*/
static struct c_expr
c_parser_transaction_expression (c_parser *parser, enum rid keyword)
{
struct c_expr ret;
unsigned int old_in = parser->in_transaction;
unsigned int this_in = 1;
location_t loc = c_parser_peek_token (parser)->location;
tree attrs;
gcc_assert ((keyword == RID_TRANSACTION_ATOMIC
|| keyword == RID_TRANSACTION_RELAXED)
&& c_parser_next_token_is_keyword (parser, keyword));
c_parser_consume_token (parser);
if (keyword == RID_TRANSACTION_RELAXED)
this_in |= TM_STMT_ATTR_RELAXED;
else
{
attrs = c_parser_transaction_attributes (parser);
if (attrs)
this_in |= parse_tm_stmt_attr (attrs, 0);
}
parser->in_transaction = this_in;
matching_parens parens;
if (parens.require_open (parser))
{
tree expr = c_parser_expression (parser).value;
ret.original_type = TREE_TYPE (expr);
ret.value = build1 (TRANSACTION_EXPR, ret.original_type, expr);
if (this_in & TM_STMT_ATTR_RELAXED)
TRANSACTION_EXPR_RELAXED (ret.value) = 1;
SET_EXPR_LOCATION (ret.value, loc);
ret.original_code = TRANSACTION_EXPR;
if (!parens.require_close (parser))
{
c_parser_skip_until_found (parser, CPP_CLOSE_PAREN, NULL);
goto error;
}
}
else
{
error:
ret.set_error ();
ret.original_code = ERROR_MARK;
ret.original_type = NULL;
}
parser->in_transaction = old_in;
if (!flag_tm)
error_at (loc, (keyword == RID_TRANSACTION_ATOMIC ?
"%<__transaction_atomic%> without transactional memory support enabled"
: "%<__transaction_relaxed %> "
"without transactional memory support enabled"));
set_c_expr_source_range (&ret, loc, loc);
return ret;
}
/* Parse a __transaction_cancel statement (GCC Extension).
transaction-cancel-statement:
__transaction_cancel transaction-attribute[opt] ;
Note that the only valid attribute is "outer".
*/
static tree
c_parser_transaction_cancel (c_parser *parser)
{
location_t loc = c_parser_peek_token (parser)->location;
tree attrs;
bool is_outer = false;
gcc_assert (c_parser_next_token_is_keyword (parser, RID_TRANSACTION_CANCEL));
c_parser_consume_token (parser);
attrs = c_parser_transaction_attributes (parser);
if (attrs)
is_outer = (parse_tm_stmt_attr (attrs, TM_STMT_ATTR_OUTER) != 0);
if (!flag_tm)
{
error_at (loc, "%<__transaction_cancel%> without "
"transactional memory support enabled");
goto ret_error;
}
else if (parser->in_transaction & TM_STMT_ATTR_RELAXED)
{
error_at (loc, "%<__transaction_cancel%> within a "
"%<__transaction_relaxed%>");
goto ret_error;
}
else if (is_outer)
{
if ((parser->in_transaction & TM_STMT_ATTR_OUTER) == 0
&& !is_tm_may_cancel_outer (current_function_decl))
{
error_at (loc, "outer %<__transaction_cancel%> not "
"within outer %<__transaction_atomic%> or "
"a %<transaction_may_cancel_outer%> function");
goto ret_error;
}
}
else if (parser->in_transaction == 0)
{
error_at (loc, "%<__transaction_cancel%> not within "
"%<__transaction_atomic%>");
goto ret_error;
}
return add_stmt (build_tm_abort_call (loc, is_outer));
ret_error:
return build1 (NOP_EXPR, void_type_node, error_mark_node);
}
/* Parse a single source file. */
void
c_parse_file (void)
{
/* Use local storage to begin. If the first token is a pragma, parse it.
If it is #pragma GCC pch_preprocess, then this will load a PCH file
which will cause garbage collection. */
c_parser tparser;
memset (&tparser, 0, sizeof tparser);
tparser.translate_strings_p = true;
tparser.tokens = &tparser.tokens_buf[0];
the_parser = &tparser;
if (c_parser_peek_token (&tparser)->pragma_kind == PRAGMA_GCC_PCH_PREPROCESS)
c_parser_pragma_pch_preprocess (&tparser);
else
c_common_no_more_pch ();
the_parser = ggc_alloc<c_parser> ();
*the_parser = tparser;
if (tparser.tokens == &tparser.tokens_buf[0])
the_parser->tokens = &the_parser->tokens_buf[0];
/* Initialize EH, if we've been told to do so. */
if (flag_exceptions)
using_eh_for_cleanups ();
c_parser_translation_unit (the_parser);
the_parser = NULL;
}
/* Parse the body of a function declaration marked with "__RTL".
The RTL parser works on the level of characters read from a
FILE *, whereas c_parser works at the level of tokens.
Square this circle by consuming all of the tokens up to and
including the closing brace, recording the start/end of the RTL
fragment, and reopening the file and re-reading the relevant
lines within the RTL parser.
This requires the opening and closing braces of the C function
to be on separate lines from the RTL they wrap.
Take ownership of START_WITH_PASS, if non-NULL. */
location_t
c_parser_parse_rtl_body (c_parser *parser, char *start_with_pass)
{
if (!c_parser_require (parser, CPP_OPEN_BRACE, "expected %<{%>"))
{
free (start_with_pass);
return c_parser_peek_token (parser)->location;
}
location_t start_loc = c_parser_peek_token (parser)->location;
/* Consume all tokens, up to the closing brace, handling
matching pairs of braces in the rtl dump. */
int num_open_braces = 1;
while (1)
{
switch (c_parser_peek_token (parser)->type)
{
case CPP_OPEN_BRACE:
num_open_braces++;
break;
case CPP_CLOSE_BRACE:
if (--num_open_braces == 0)
goto found_closing_brace;
break;
case CPP_EOF:
error_at (start_loc, "no closing brace");
free (start_with_pass);
return c_parser_peek_token (parser)->location;
default:
break;
}
c_parser_consume_token (parser);
}
found_closing_brace:
/* At the closing brace; record its location. */
location_t end_loc = c_parser_peek_token (parser)->location;
/* Consume the closing brace. */
c_parser_consume_token (parser);
/* Invoke the RTL parser. */
if (!read_rtl_function_body_from_file_range (start_loc, end_loc))
{
free (start_with_pass);
return end_loc;
}
/* Run the backend on the cfun created above, transferring ownership of
START_WITH_PASS. */
run_rtl_passes (start_with_pass);
return end_loc;
}
#include "gt-c-c-parser.h"
|
depend-1.c | #include <stdlib.h>
void
dep (void)
{
int x = 1;
#pragma omp parallel
#pragma omp single
{
#pragma omp task shared (x) depend(out: x)
x = 2;
#pragma omp task shared (x) depend(in: x)
if (x != 2)
abort ();
}
}
void
dep2 (void)
{
#pragma omp parallel
#pragma omp single
{
int x = 1;
#pragma omp task shared (x) depend(out: x)
x = 2;
#pragma omp task shared (x) depend(in: x)
if (x != 2)
abort ();
#pragma omp taskwait
}
}
void
dep3 (void)
{
#pragma omp parallel
{
int x = 1;
#pragma omp single
{
#pragma omp task shared (x) depend(out: x)
x = 2;
#pragma omp task shared (x) depend(in: x)
if (x != 2)
abort ();
}
}
}
void
firstpriv (void)
{
#pragma omp parallel
#pragma omp single
{
int x = 1;
#pragma omp task depend(out: x)
x = 2;
#pragma omp task depend(in: x)
if (x != 1)
abort ();
}
}
void
antidep (void)
{
int x = 1;
#pragma omp parallel
#pragma omp single
{
#pragma omp task shared(x) depend(in: x)
if (x != 1)
abort ();
#pragma omp task shared(x) depend(out: x)
x = 2;
}
}
void
antidep2 (void)
{
#pragma omp parallel
#pragma omp single
{
int x = 1;
#pragma omp taskgroup
{
#pragma omp task shared(x) depend(in: x)
if (x != 1)
abort ();
#pragma omp task shared(x) depend(out: x)
x = 2;
}
}
}
void
antidep3 (void)
{
#pragma omp parallel
{
int x = 1;
#pragma omp single
{
#pragma omp task shared(x) depend(in: x)
if (x != 1)
abort ();
#pragma omp task shared(x) depend(out: x)
x = 2;
}
}
}
void
outdep (void)
{
#pragma omp parallel
#pragma omp single
{
int x = 0;
#pragma omp task shared(x) depend(out: x)
x = 1;
#pragma omp task shared(x) depend(out: x)
x = 2;
#pragma omp taskwait
if (x != 2)
abort ();
}
}
void
concurrent (void)
{
int x = 1;
#pragma omp parallel
#pragma omp single
{
#pragma omp task shared (x) depend(out: x)
x = 2;
#pragma omp task shared (x) depend(in: x)
if (x != 2)
abort ();
#pragma omp task shared (x) depend(in: x)
if (x != 2)
abort ();
#pragma omp task shared (x) depend(in: x)
if (x != 2)
abort ();
}
}
void
concurrent2 (void)
{
#pragma omp parallel
#pragma omp single
{
int x = 1;
#pragma omp task shared (x) depend(out: x)
x = 2;
#pragma omp task shared (x) depend(in: x)
if (x != 2)
abort ();
#pragma omp task shared (x) depend(in: x)
if (x != 2)
abort ();
#pragma omp task shared (x) depend(in: x)
if (x != 2)
abort ();
#pragma omp taskwait
}
}
void
concurrent3 (void)
{
#pragma omp parallel
{
int x = 1;
#pragma omp single
{
#pragma omp task shared (x) depend(out: x)
x = 2;
#pragma omp task shared (x) depend(in: x)
if (x != 2)
abort ();
#pragma omp task shared (x) depend(in: x)
if (x != 2)
abort ();
#pragma omp task shared (x) depend(in: x)
if (x != 2)
abort ();
}
}
}
int
main ()
{
dep ();
dep2 ();
dep3 ();
firstpriv ();
antidep ();
antidep2 ();
antidep3 ();
outdep ();
concurrent ();
concurrent2 ();
concurrent3 ();
return 0;
}
|
GB_unaryop__lnot_fp32_bool.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_fp32_bool
// op(A') function: GB_tran__lnot_fp32_bool
// C type: float
// A type: bool
// cast: float cij = (float) aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
bool
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
bool aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, x) \
float z = (float) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_FP32 || GxB_NO_BOOL)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_fp32_bool
(
float *restrict Cx,
const bool *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_fp32_bool
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
symv_x_dia_n_lo.c | #include "alphasparse/kernel.h"
#include "alphasparse/opt.h"
#include "alphasparse/util.h"
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
#endif
static alphasparse_status_t ONAME_omp(const ALPHA_Number alpha,
const ALPHA_SPMAT_DIA* A,
const ALPHA_Number* x,
const ALPHA_Number beta,
ALPHA_Number* y)
{
const ALPHA_INT m = A->rows;
const ALPHA_INT n = A->cols;
if(m != n) return ALPHA_SPARSE_STATUS_INVALID_VALUE;
const ALPHA_INT thread_num = alpha_get_thread_num();
ALPHA_Number** tmp = (ALPHA_Number**)malloc(sizeof(ALPHA_Number*) * thread_num);
for(int i = 0; i < thread_num; ++i)
{
tmp[i] = malloc(sizeof(ALPHA_Number) * m);
memset(tmp[i], 0, sizeof(ALPHA_Number) * m);
}
const ALPHA_INT diags = A->ndiag;
#ifdef _OPENMP
#pragma omp parallel for num_threads(thread_num)
#endif
for (ALPHA_INT i = 0; i < diags; ++i)
{
const ALPHA_INT threadId = alpha_get_thread_id();
const ALPHA_INT dis = A->distance[i];
if(dis == 0)
{
const ALPHA_INT start = i * A->lval;
for(ALPHA_INT j = 0; j < m; ++j)
{
ALPHA_Number v;
alpha_mul(v, alpha, A->values[start + j]);
alpha_madde(tmp[threadId][j], v, x[j]);
}
}
else if(dis < 0)
{
const ALPHA_INT row_start = -dis;
const ALPHA_INT col_start = 0;
const ALPHA_INT nnz = m + dis;
const ALPHA_INT start = i * A->lval;
for(ALPHA_INT j = 0; j < nnz; ++j)
{
ALPHA_Number v;
alpha_mul(v, alpha, A->values[start + row_start + j]);
alpha_madde(tmp[threadId][row_start + j], v, x[col_start + j]);
alpha_madde(tmp[threadId][col_start + j], v, x[row_start + j]);
}
}
}
#ifdef _OPENMP
#pragma omp parallel for num_threads(thread_num)
#endif
for(ALPHA_INT i = 0; i < m; ++i)
{
alpha_mul(y[i], beta, y[i]);
for(ALPHA_INT j = 0; j < thread_num; ++j)
{
alpha_add(y[i], y[i], tmp[j][i]);
}
}
#ifdef _OPENMP
#pragma omp parallel for num_threads(thread_num)
#endif
for (ALPHA_INT i = 0; i < thread_num; ++i)
{
alpha_free(tmp[i]);
}
alpha_free(tmp);
return ALPHA_SPARSE_STATUS_SUCCESS;
}
alphasparse_status_t
ONAME(const ALPHA_Number alpha,
const ALPHA_SPMAT_DIA* A,
const ALPHA_Number* x,
const ALPHA_Number beta,
ALPHA_Number* y)
{
return ONAME_omp(alpha, A, x, beta, y);
}
|
dcache.c | #include "papi.h"
#include "caches.h"
#include "timing_kernels.h"
#include "dcache.h"
#include <math.h>
#define _SIZE_SAMPLES_ 40
extern char* eventname;
int min_size, max_size;
void d_cache_driver(char* papi_event_name, int max_iter, hw_desc_t *hw_desc, char* outdir, int latency_only, int mode, int show_progress)
{
int pattern = 3;
int stride, f, cache_line;
int status, test_cnt = 0;
float ppb = 16;
FILE *ofp_papi;
char *sufx, *papiFileName;
// Open file (pass handle to d_cache_test()).
if(CACHE_READ_WRITE == mode){
sufx = strdup(".data.writes");
}else{
sufx = strdup(".data.reads");
}
int l = strlen(outdir)+strlen(papi_event_name)+strlen(sufx);
papiFileName = (char *)calloc( 1+l, sizeof(char) );
if (!papiFileName) {
fprintf(stderr, "Unable to allocate memory. Skipping event %s.\n", papi_event_name);
goto error0;
}
if (l != (sprintf(papiFileName, "%s%s%s", outdir, papi_event_name, sufx))) {
fprintf(stderr, "sprintf error. Skipping event %s.\n", papi_event_name);
goto error1;
}
if (NULL == (ofp_papi = fopen(papiFileName,"w"))) {
fprintf(stderr, "Unable to open file %s. Skipping event %s.\n", papiFileName, papi_event_name);
goto error1;
}
if( (NULL==hw_desc) || (0==hw_desc->dcache_line_size[0]) )
cache_line = 64;
else
cache_line = hw_desc->dcache_line_size[0];
// Print the core to which each thread is pinned.
print_core_affinities(ofp_papi);
// Go through each parameter variant.
for(pattern = 3; pattern <= 4; ++pattern)
{
for(f = 1; f <= 2; f *= 2)
{
stride = cache_line*f;
// PPB variation only makes sense if the pattern is not sequential.
if(pattern != 4)
{
for(ppb = 64; ppb >= 16; ppb -= 48)
{
if( show_progress )
{
printf("%3d%%\b\b\b\b",(100*test_cnt++)/6);
fflush(stdout);
}
status = d_cache_test(pattern, max_iter, hw_desc, stride, ppb, papi_event_name, latency_only, mode, ofp_papi);
if( status < 0 )
goto error2;
}
}
else
{
if( show_progress )
{
printf("%3d%%\b\b\b\b",(100*test_cnt++)/6);
fflush(stdout);
}
status = d_cache_test(pattern, max_iter, hw_desc, stride, ppb, papi_event_name, latency_only, mode, ofp_papi);
if( status < 0 )
goto error2;
}
}
}
error2:
if( show_progress )
{
size_t i;
printf("100%%");
for(i=0; i<strlen("Total:100% Current test:100%"); i++) putchar('\b');
fflush(stdout);
}
// Close files and free memory.
fclose(ofp_papi);
error1:
free(papiFileName);
error0:
free(sufx);
return;
}
int d_cache_test(int pattern, int max_iter, hw_desc_t *hw_desc, int stride_in_bytes, float pages_per_block, char* papi_event_name, int latency_only, int mode, FILE* ofp){
int i,j,k;
int *values;
double ***rslts, *sorted_rslts;
double ***counter, *sorted_counter;
int status=0, guessCount, ONT;
min_size = 2*1024/sizeof(uintptr_t); // 2KB
max_size = 1024*1024*1024/sizeof(uintptr_t);// 1GB
// The number of different sizes we will guess, trying to find the right size.
guessCount = 0;
if( (NULL==hw_desc) || (hw_desc->cache_levels<=0) ){
for(i=min_size; i<max_size; i*=2){
// += 4 for i, i*1.25, i*1.5, i*1.75
guessCount += 4;
}
}else{
guessCount = _SIZE_SAMPLES_;
}
// Get the number of threads.
ONT = get_thread_count();
// Latency results from the benchmark.
rslts = (double ***)malloc(max_iter*sizeof(double **));
for(i=0; i<max_iter; ++i){
rslts[i] = (double **)malloc(guessCount*sizeof(double*));
for(j=0; j<guessCount; ++j){
rslts[i][j] = (double *)malloc(ONT*sizeof(double));
}
}
sorted_rslts = (double *)malloc(max_iter*sizeof(double));
// Counter results from the benchmark.
counter = (double ***)malloc(max_iter*sizeof(double **));
for(i=0; i<max_iter; ++i){
counter[i] = (double **)malloc(guessCount*sizeof(double*));
for(j=0; j<guessCount; ++j){
counter[i][j] = (double *)malloc(ONT*sizeof(double));
}
}
sorted_counter = (double *)malloc(max_iter*sizeof(double));
// List of buffer sizes which are used in the benchmark.
values = (int *)malloc(guessCount*sizeof(int));
// Set the name of the event to be monitored during the benchmark.
eventname = papi_event_name;
for(i=0; i<max_iter; ++i){
status = varyBufferSizes(values, rslts[i], counter[i], hw_desc, stride_in_bytes, pages_per_block, pattern, latency_only, mode, ONT);
if( status < 0 )
goto cleanup;
}
// Sort and print latency and counter results.
fprintf(ofp, "# PTRN=%d, STRIDE=%d, PPB=%f, ThreadCount=%d\n", pattern, stride_in_bytes, pages_per_block, ONT);
if(latency_only) {
for(j=0; j<guessCount; ++j){
fprintf(ofp, "%d", values[j]);
for(k=0; k<ONT; ++k){
for(i=0; i<max_iter; ++i){
sorted_rslts[i] = rslts[i][j][k];
}
qsort(sorted_rslts, max_iter, sizeof(double), compar_lf);
fprintf(ofp, " %.4lf", sorted_rslts[0]);
}
fprintf(ofp, "\n");
}
} else {
for(j=0; j<guessCount; ++j){
fprintf(ofp, "%d", values[j]);
for(k=0; k<ONT; ++k){
for(i=0; i<max_iter; ++i){
sorted_counter[i] = counter[i][j][k];
}
qsort(sorted_counter, max_iter, sizeof(double), compar_lf);
fprintf(ofp, " %lf", sorted_counter[0]);
}
fprintf(ofp, "\n");
}
}
cleanup:
for(i=0; i<max_iter; ++i){
for(j=0; j<guessCount; ++j){
free(rslts[i][j]);
free(counter[i][j]);
}
free(rslts[i]);
free(counter[i]);
}
free(rslts);
free(counter);
free(sorted_rslts);
free(sorted_counter);
free(values);
return status;
}
int varyBufferSizes(int *values, double **rslts, double **counter, hw_desc_t *hw_desc, int stride_in_bytes, float pages_per_block, int pattern, int latency_only, int mode, int ONT){
int i, j, k, cnt;
long active_buf_len;
int allocErr = 0;
run_output_t out;
int stride = stride_in_bytes/sizeof(uintptr_t);
uintptr_t rslt=42, *v[ONT], *ptr[ONT];
// Allocate memory for each thread to traverse.
#pragma omp parallel private(i) reduction(+:rslt) default(shared)
{
int idx = omp_get_thread_num();
ptr[idx] = (uintptr_t *)malloc( (2*max_size+stride)*sizeof(uintptr_t) );
if( !ptr[idx] ){
fprintf(stderr, "Error: cannot allocate space for experiment.\n");
#pragma omp critical
{
allocErr = -1;
}
}else{
// align v to the stride.
v[idx] = (uintptr_t *)(stride_in_bytes*(((uintptr_t)ptr[idx]+stride_in_bytes)/stride_in_bytes));
// touch every page at least a few times
for(i=0; i<2*max_size; i+=512){
rslt += v[idx][i];
}
}
}
if(allocErr != 0)
{
goto error;
}
// Make a cold run
out = probeBufferSize(16*stride, stride, pages_per_block, pattern, v, &rslt, latency_only, mode, ONT);
if(out.status != 0)
goto error;
// Run the actual experiment
if( (NULL==hw_desc) || (hw_desc->cache_levels<=0) ){
cnt = 0;
// If we don't know the cache sizes, space the measurements between two default values.
for(active_buf_len=min_size; active_buf_len<max_size; active_buf_len*=2){
out = probeBufferSize(active_buf_len, stride, pages_per_block, pattern, v, &rslt, latency_only, mode, ONT);
if(out.status != 0)
goto error;
for(k = 0; k < ONT; ++k) {
rslts[cnt][k] = out.dt[k];
counter[cnt][k] = out.counter[k];
}
values[cnt++] = ONT*sizeof(uintptr_t)*active_buf_len;
out = probeBufferSize((int)((double)active_buf_len*1.25), stride, pages_per_block, pattern, v, &rslt, latency_only, mode, ONT);
if(out.status != 0)
goto error;
for(k = 0; k < ONT; ++k) {
rslts[cnt][k] = out.dt[k];
counter[cnt][k] = out.counter[k];
}
values[cnt++] = ONT*sizeof(uintptr_t)*((int)((double)active_buf_len*1.25));
out = probeBufferSize((int)((double)active_buf_len*1.5), stride, pages_per_block, pattern, v, &rslt, latency_only, mode, ONT);
if(out.status != 0)
goto error;
for(k = 0; k < ONT; ++k) {
rslts[cnt][k] = out.dt[k];
counter[cnt][k] = out.counter[k];
}
values[cnt++] = ONT*sizeof(uintptr_t)*((int)((double)active_buf_len*1.5));
out = probeBufferSize((int)((double)active_buf_len*1.75), stride, pages_per_block, pattern, v, &rslt, latency_only, mode, ONT);
if(out.status != 0)
goto error;
for(k = 0; k < ONT; ++k) {
rslts[cnt][k] = out.dt[k];
counter[cnt][k] = out.counter[k];
}
values[cnt++] = ONT*sizeof(uintptr_t)*((int)((double)active_buf_len*1.75));
}
}else{
int llc;
double f, small_size, large_size, curr_size;
// If we know the cache sizes, space the measurements between a buffer size equal to L1/8
// and a buffer size that all threads cumulatively will exceed the LLC by a factor of 8.
// The rationale is that the L1 is typically private, while the LLC is shared among all cores.
llc = hw_desc->dcache_size[hw_desc->cache_levels-1];
small_size = hw_desc->dcache_size[0]/8;
large_size = (double)llc;
large_size = 8*large_size/ONT;
// Choose a factor "f" to grow the buffer size by, such that we collect "_SIZE_SAMPLES_"
// number of samples between "small_size" and "large_size", evenly distributed
// in a geometric fashion (i.e., sizes will be equally spaced in a log graph).
f = pow(large_size/small_size, 1.0/(_SIZE_SAMPLES_-1));
curr_size = small_size;
cnt=0;
for(j=0; j<_SIZE_SAMPLES_; j++){
active_buf_len = (long)(curr_size/sizeof(uintptr_t));
out = probeBufferSize(active_buf_len, stride, pages_per_block, pattern, v, &rslt, latency_only, mode, ONT);
if(out.status != 0)
goto error;
for(k = 0; k < ONT; ++k) {
rslts[cnt][k] = out.dt[k];
counter[cnt][k] = out.counter[k];
}
values[cnt++] = sizeof(uintptr_t)*active_buf_len;
curr_size *= f;
}
}
// Free each thread's memory.
for(j=0; j<ONT; ++j){
free(ptr[j]);
}
return 0;
error:
// Free each thread's memory.
for(j=0; j<ONT; ++j){
free(ptr[j]);
}
return -1;
}
int get_thread_count() {
int threadNum = 1;
#pragma omp parallel default(shared)
{
if(!omp_get_thread_num()) {
threadNum = omp_get_num_threads();
}
}
return threadNum;
}
void print_core_affinities(FILE *ofp) {
int k, ONT;
int *pinnings = NULL;
// Get the number of threads.
ONT = get_thread_count();
// List of core affinities in which the index is the thread ID.
pinnings = (int *)malloc(ONT*sizeof(int));
if( NULL == pinnings ) {
fprintf(stderr, "Error: cannot allocate space for experiment.\n");
return;
}
#pragma omp parallel default(shared)
{
int idx = omp_get_thread_num();
pinnings[idx] = sched_getcpu();
}
fprintf(ofp, "# Core:");
for(k=0; k<ONT; ++k) {
fprintf(ofp, " %d", pinnings[k]);
}
fprintf(ofp, "\n");
free(pinnings);
return;
}
|
mpncra.c | /* $Header$ */
/* This single source file may be called as three separate executables:
ncra -- netCDF record averager
nces -- netCDF ensemble statistics
ncrcat -- netCDF record concatenator */
/* Purpose: Compute averages or extract series of specified hyperslabs of
specfied variables of multiple input netCDF files and output them
to a single file. */
/* Copyright (C) 1995--present Charlie Zender
This file is part of NCO, the netCDF Operators. NCO is free software.
You may redistribute and/or modify NCO under the terms of the
3-Clause BSD License.
You are permitted to link NCO with the HDF, netCDF, OPeNDAP, and UDUnits
libraries and to distribute the resulting executables under the terms
of the BSD, but in addition obeying the extra stipulations of the
HDF, netCDF, OPeNDAP, and UDUnits licenses.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the 3-Clause BSD License for more details.
The original author of this software, Charlie Zender, seeks to improve
it with your suggestions, contributions, bug-reports, and patches.
Please contact the NCO project at http://nco.sf.net or write to
Charlie Zender
Department of Earth System Science
University of California, Irvine
Irvine, CA 92697-3100 */
/* Usage:
ncra -n 3,4,1 -p ${HOME}/nco/data h0001.nc ~/foo.nc
ncra -n 3,4,1 -p ${HOME}/nco/data -l ${HOME} h0001.nc ~/foo.nc
ncra -n 3,4,1 -p /ZENDER/tmp -l ${HOME}/nco/data h0001.nc ~/foo.nc
scp ~/nco/src/nco/ncra.c esmf.ess.uci.edu:nco/src/nco
nces in.nc in.nc ~/foo.nc
nces -n 3,4,1 -p ${HOME}/nco/data h0001.nc ~/foo.nc
nces -n 3,4,1 -p ${HOME}/nco/data -l ${HOME} h0001.nc ~/foo.nc
nces -n 3,4,1 -p /ZENDER/tmp -l ${HOME} h0001.nc ~/foo.nc */
#ifdef HAVE_CONFIG_H
# include <config.h> /* Autotools tokens */
#endif /* !HAVE_CONFIG_H */
/* Standard C headers */
#include <assert.h> /* assert() debugging macro */
#include <math.h> /* sin cos cos sin 3.14159 */
#include <stdio.h> /* stderr, FILE, NULL, etc. */
#include <stdlib.h> /* atof, atoi, malloc, getopt */
#include <string.h> /* strcmp() */
#include <sys/stat.h> /* stat() */
#include <time.h> /* machine time */
#include <unistd.h> /* POSIX stuff */
#ifndef HAVE_GETOPT_LONG
# include "nco_getopt.h"
#else /* HAVE_GETOPT_LONG */
# ifdef HAVE_GETOPT_H
# include <getopt.h>
# endif /* !HAVE_GETOPT_H */
#endif /* HAVE_GETOPT_LONG */
/* Internationalization i18n, Linux Journal 200211 p. 57--59 */
#ifdef I18N
#include <libintl.h> /* Internationalization i18n */
#include <locale.h> /* Locale setlocale() */
#define _(sng) gettext (sng)
#define gettext_noop(sng) (sng)
#define N_(sng) gettext_noop(sng)
#endif /* I18N */
#ifndef _LIBINTL_H
# define gettext(foo) foo
#endif /* _LIBINTL_H */
/* 3rd party vendors */
#include <netcdf.h> /* netCDF definitions and C library */
#ifdef ENABLE_MPI
#include <mpi.h> /* MPI definitions */
#include "nco_mpi.h" /* MPI utilities */
#endif /* !ENABLE_MPI */
/* Personal headers */
/* #define MAIN_PROGRAM_FILE MUST precede #include libnco.h */
#define MAIN_PROGRAM_FILE
#include "libnco.h" /* netCDF Operator (NCO) library */
#ifdef ENABLE_MPI
void checkpointMpi(int prc_rnk, int stage){
int msg[]={0,0};
int rcd; /* [rcd] Return code */
FILE * const fp_stderr=stderr; /* [fl] stderr filehandle CEWI */
if(prc_rnk == rnk_mgr){
msg[0]=stage;
msg[1]=stage;
} /* endif */
(void)fprintf(fp_stderr,"%d checkpointing at stage %d\n",prc_rnk,stage);
/* make everyone continue from this point. */
rcd=MPI_Bcast(msg,2,MPI_INT,rnk_mgr,MPI_COMM_WORLD);
if(prc_rnk != rnk_mgr) {
/* basic sanity check */
assert(msg[0] == stage);
assert(msg[1] == stage);
} /* end if */
} /* end checkpointMpi() */
#endif /* !ENABLE_MPI */
int
main(int argc,char **argv)
{
char **fl_lst_abb=NULL; /* Option n */
char **fl_lst_in;
char **gaa_arg=NULL; /* [sng] Global attribute arguments */
char **var_lst_in=NULL_CEWI;
char *cmd_ln;
char *cnk_arg[NC_MAX_DIMS];
char *cnk_map_sng=NULL_CEWI; /* [sng] Chunking map */
char *cnk_plc_sng=NULL_CEWI; /* [sng] Chunking policy */
char *fl_in=NULL;
char *fl_out=NULL; /* Option o */
char *fl_out_tmp=NULL_CEWI;
char *fl_pth=NULL; /* Option p */
char *fl_pth_lcl=NULL; /* Option l */
char *lmt_arg[NC_MAX_DIMS];
char *nco_op_typ_sng=NULL_CEWI; /* [sng] Operation type Option y */
char *nco_pck_plc_sng=NULL_CEWI; /* [sng] Packing policy Option P */
char *opt_crr=NULL; /* [sng] String representation of current long-option name */
char *optarg_lcl=NULL; /* [sng] Local copy of system optarg */
char *sng_cnv_rcd=NULL_CEWI; /* [sng] strtol()/strtoul() return code */
const char * const CVS_Id="$Id$";
const char * const CVS_Revision="$Revision$";
const char * const opt_sht_lst="34567ACcD:d:FHhL:l:n:Oo:p:P:rRSt:v:xY:y:-:";
dmn_sct **dim;
dmn_sct **dmn_out;
extern char *optarg;
extern int optind;
/* Using naked stdin/stdout/stderr in parallel region generates warning
Copy appropriate filehandle to variable scoped shared in parallel clause */
FILE * const fp_stderr=stderr; /* [fl] stderr filehandle CEWI */
int *in_id_arr;
int abb_arg_nbr=0;
int cnk_map=nco_cnk_map_nil; /* [enm] Chunking map */
int cnk_nbr=0; /* [nbr] Number of chunk sizes */
int cnk_plc=nco_cnk_plc_nil; /* [enm] Chunking policy */
int dfl_lvl=NCO_DFL_LVL_UNDEFINED; /* [enm] Deflate level */
int fl_idx;
int fl_nbr=0;
int fl_in_fmt; /* [enm] Input file format */
int fl_out_fmt=NCO_FORMAT_UNDEFINED; /* [enm] Output file format */
int fll_md_old; /* [enm] Old fill mode */
int gaa_nbr=0; /* [nbr] Number of global attributes to add */
int idx=int_CEWI;
int in_id;
int lmt_nbr=0; /* Option d. NB: lmt_nbr gets incremented */
int log_lvl=0; /* [enm] netCDF library debugging verbosity [0..5] */
int md_open; /* [enm] Mode flag for nc_open() call */
int nbr_dmn_fl;
int nbr_dmn_xtr;
int nbr_var_fix; /* nbr_var_fix gets incremented */
int nbr_var_fl;
int nbr_var_prc; /* nbr_var_prc gets incremented */
int xtr_nbr=0; /* xtr_nbr won't otherwise be set for -c with no -v */
int nco_op_typ=nco_op_avg; /* [enm] Default operation is averaging */
int nco_pck_plc=nco_pck_plc_nil; /* [enm] Default packing is none */
int opt;
int out_id;
int rcd=NC_NOERR; /* [rcd] Return code */
int rec_dmn_id=NCO_REC_DMN_UNDEFINED;
int thr_idx; /* [idx] Index of current thread */
int thr_nbr=int_CEWI; /* [nbr] Thread number Option t */
int var_lst_in_nbr=0;
lmt_sct **lmt=NULL_CEWI;
lmt_sct *lmt_rec=NULL_CEWI;
lmt_all_sct **lmt_all_lst; /* List of *lmt_all structures */
lmt_all_sct *lmt_all_rec=NULL_CEWI; /* Pointer to record limit structure in above list */
long idx_rec; /* [idx] Index of current record in current input file */
long rec_usd_cml=0L; /* [idx] Index of current record in output file (0 is first, ...) */
nco_bool CNV_ARM;
cnv_sct *cnv; /* [sct] Convention structure */
nco_bool EXCLUDE_INPUT_LIST=False; /* Option c */
nco_bool EXTRACT_ALL_COORDINATES=False; /* Option c */
nco_bool EXTRACT_ASSOCIATED_COORDINATES=True; /* Option C */
nco_bool FL_RTR_RMT_LCN;
nco_bool FL_LST_IN_APPEND=True; /* Option H */
nco_bool FL_LST_IN_FROM_STDIN=False; /* [flg] fl_lst_in comes from stdin */
nco_bool FORCE_APPEND=False; /* Option A */
nco_bool FORCE_OVERWRITE=False; /* Option O */
nco_bool FORTRAN_IDX_CNV=False; /* Option F */
nco_bool HISTORY_APPEND=True; /* Option h */
nco_bool HPSS_TRY=False; /* [flg] Search HPSS for unfound files */
nco_bool LAST_RECORD=False;
nco_bool RAM_CREATE=False; /* [flg] Create file in RAM */
nco_bool RAM_OPEN=False; /* [flg] Open (netCDF3-only) file(s) in RAM */
nco_bool SHARE_CREATE=False; /* [flg] Create (netCDF3-only) file(s) with unbuffered I/O */
nco_bool SHARE_OPEN=False; /* [flg] Open (netCDF3-only) file(s) with unbuffered I/O */
nco_bool RM_RMT_FL_PST_PRC=True; /* Option R */
nco_bool WRT_TMP_FL=True; /* [flg] Write output to temporary file */
nco_bool flg_mmr_cln=False; /* [flg] Clean memory prior to exit */
nco_int base_time_srt=nco_int_CEWI;
nco_int base_time_crr=nco_int_CEWI;
nm_id_sct *dmn_lst;
nm_id_sct *xtr_lst=NULL; /* xtr_lst may be alloc()'d from NULL with -c option */
size_t bfr_sz_hnt=NC_SIZEHINT_DEFAULT; /* [B] Buffer size hint */
size_t cnk_csh_byt=NCO_CNK_CSH_BYT_DFL; /* [B] Chunk cache size */
size_t cnk_min_byt=NCO_CNK_SZ_MIN_BYT_DFL; /* [B] Minimize size of variable to chunk */
size_t cnk_sz_byt=0UL; /* [B] Chunk size in bytes */
size_t cnk_sz_scl=0UL; /* [nbr] Chunk size scalar */
size_t hdr_pad=0UL; /* [B] Pad at end of header section */
var_sct **var;
var_sct **var_fix;
var_sct **var_fix_out;
var_sct **var_out=NULL_CEWI;
var_sct **var_prc;
var_sct **var_prc_out;
#ifdef ENABLE_MPI
/* Declare all MPI-specific variables here */
MPI_Status mpi_stt; /* [enm] Status check to decode msg_tag_typ */
nco_bool TKN_WRT_FREE=True; /* [flg] Write-access to output file is available */
int fl_nm_lng; /* [nbr] Output file name length */
int msg_bfr[msg_bfr_lng]; /* [bfr] Buffer containing var, idx, tkn_wrt_rsp */
int jdx=0; /* [idx] MPI index for local variables */
int lcl_idx_lst[60]; /* [arr] Array containing indices of variables processed at each Worker */
int lcl_nbr_var=0; /* [nbr] Count of variables processes at each Worker */
int msg_tag_typ; /* [enm] MPI message tag type */
int prc_rnk; /* [idx] Process rank */
int prc_nbr=0; /* [nbr] Number of MPI processes */
int tkn_wrt_rnk=0; /* [idx] Rank of process holding write token */
int tkn_wrt_rsp; /* [enm] Response to request for write token */
int var_wrt_nbr=0; /* [nbr] Variables written to output file until now */
int rnk_wrk; /* [idx] Worker rank */
int wrk_id_bfr[wrk_id_bfr_lng]; /* [bfr] Buffer for rnk_wrk */
#endif /* !ENABLE_MPI */
static struct option opt_lng[]={ /* Structure ordered by short option key if possible */
/* Long options with no argument, no short option counterpart */
{"clean",no_argument,0,0}, /* [flg] Clean memory prior to exit */
{"mmr_cln",no_argument,0,0}, /* [flg] Clean memory prior to exit */
{"drt",no_argument,0,0}, /* [flg] Allow dirty memory on exit */
{"dirty",no_argument,0,0}, /* [flg] Allow dirty memory on exit */
{"mmr_drt",no_argument,0,0}, /* [flg] Allow dirty memory on exit */
{"ram_all",no_argument,0,0}, /* [flg] Open and create (netCDF3) file(s) in RAM */
{"create_ram",no_argument,0,0}, /* [flg] Create file in RAM */
{"open_ram",no_argument,0,0}, /* [flg] Open (netCDF3) file(s) in RAM */
{"diskless_all",no_argument,0,0}, /* [flg] Open and create (netCDF3) file(s) in RAM */
{"share_all",no_argument,0,0}, /* [flg] Open and create (netCDF3) file(s) with unbuffered I/O */
{"create_share",no_argument,0,0}, /* [flg] Create (netCDF3) file(s) with unbuffered I/O */
{"open_share",no_argument,0,0}, /* [flg] Open (netCDF3) file(s) with unbuffered I/O */
{"unbuffered_io",no_argument,0,0}, /* [flg] Open and create (netCDF3) file(s) with unbuffered I/O */
{"uio",no_argument,0,0}, /* [flg] Open and create (netCDF3) file(s) with unbuffered I/O */
{"wrt_tmp_fl",no_argument,0,0}, /* [flg] Write output to temporary file */
{"write_tmp_fl",no_argument,0,0}, /* [flg] Write output to temporary file */
{"no_tmp_fl",no_argument,0,0}, /* [flg] Do not write output to temporary file */
{"version",no_argument,0,0},
{"vrs",no_argument,0,0},
/* Long options with argument, no short option counterpart */
{"bfr_sz_hnt",required_argument,0,0}, /* [B] Buffer size hint */
{"buffer_size_hint",required_argument,0,0}, /* [B] Buffer size hint */
{"cnk_byt",required_argument,0,0}, /* [B] Chunk size in bytes */
{"chunk_byte",required_argument,0,0}, /* [B] Chunk size in bytes */
{"cnk_dmn",required_argument,0,0}, /* [nbr] Chunk size */
{"chunk_dimension",required_argument,0,0}, /* [nbr] Chunk size */
{"cnk_map",required_argument,0,0}, /* [nbr] Chunking map */
{"chunk_map",required_argument,0,0}, /* [nbr] Chunking map */
{"cnk_min",required_argument,0,0}, /* [B] Minimize size of variable to chunk */
{"chunk_min",required_argument,0,0}, /* [B] Minimize size of variable to chunk */
{"cnk_plc",required_argument,0,0}, /* [nbr] Chunking policy */
{"chunk_policy",required_argument,0,0}, /* [nbr] Chunking policy */
{"cnk_scl",required_argument,0,0}, /* [nbr] Chunk size scalar */
{"chunk_scalar",required_argument,0,0}, /* [nbr] Chunk size scalar */
{"fl_fmt",required_argument,0,0},
{"file_format",required_argument,0,0},
{"gaa",required_argument,0,0}, /* [sng] Global attribute add */
{"glb_att_add",required_argument,0,0}, /* [sng] Global attribute add */
{"hdr_pad",required_argument,0,0},
{"header_pad",required_argument,0,0},
{"log_lvl",required_argument,0,0}, /* [enm] netCDF library debugging verbosity [0..5] */
{"log_level",required_argument,0,0}, /* [enm] netCDF library debugging verbosity [0..5] */
{"log_lvl",required_argument,0,0}, /* [enm] netCDF library debugging verbosity [0..5] */
{"log_level",required_argument,0,0}, /* [enm] netCDF library debugging verbosity [0..5] */
/* Long options with short counterparts */
{"3",no_argument,0,'3'},
{"4",no_argument,0,'4'},
{"netcdf4",no_argument,0,'4'},
{"5",no_argument,0,'5'},
{"64bit_data",no_argument,0,'5'},
{"cdf5",no_argument,0,'5'},
{"pnetcdf",no_argument,0,'5'},
{"64bit_offset",no_argument,0,'6'},
{"7",no_argument,0,'7'},
{"append",no_argument,0,'A'},
{"coords",no_argument,0,'c'},
{"crd",no_argument,0,'c'},
{"xtr_ass_var",no_argument,0,'c'},
{"xcl_ass_var",no_argument,0,'C'},
{"no_coords",no_argument,0,'C'},
{"no_crd",no_argument,0,'C'},
{"dbg_lvl",required_argument,0,'D'},
{"debug",required_argument,0,'D'},
{"nco_dbg_lvl",required_argument,0,'D'},
{"dimension",required_argument,0,'d'},
{"dmn",required_argument,0,'d'},
{"fortran",no_argument,0,'F'},
{"ftn",no_argument,0,'F'},
{"fl_lst_in",no_argument,0,'H'},
{"file_list",no_argument,0,'H'},
{"history",no_argument,0,'h'},
{"hst",no_argument,0,'h'},
{"dfl_lvl",required_argument,0,'L'}, /* [enm] Deflate level */
{"deflate",required_argument,0,'L'}, /* [enm] Deflate level */
{"local",required_argument,0,'l'},
{"lcl",required_argument,0,'l'},
{"nintap",required_argument,0,'n'},
{"overwrite",no_argument,0,'O'},
{"ovr",no_argument,0,'O'},
{"output",required_argument,0,'o'},
{"fl_out",required_argument,0,'o'},
{"path",required_argument,0,'p'},
{"pack",required_argument,0,'P'},
{"retain",no_argument,0,'R'},
{"rtn",no_argument,0,'R'},
{"revision",no_argument,0,'r'},
{"suspend", no_argument,0,'S'},
{"thr_nbr",required_argument,0,'t'},
{"threads",required_argument,0,'t'},
{"omp_num_threads",required_argument,0,'t'},
{"variable",required_argument,0,'v'},
{"exclude",no_argument,0,'x'},
{"xcl",no_argument,0,'x'},
{"pseudonym",required_argument,0,'Y'},
{"program",required_argument,0,'Y'},
{"prg_nm",required_argument,0,'Y'},
{"math",required_argument,0,'y'},
{"help",no_argument,0,'?'},
{"hlp",no_argument,0,'?'},
{0,0,0,0}
}; /* end opt_lng */
int opt_idx=0; /* Index of current long option into opt_lng array */
#ifdef _LIBINTL_H
setlocale(LC_ALL,""); /* LC_ALL sets all localization tokens to same value */
bindtextdomain("nco","/home/zender/share/locale"); /* ${LOCALEDIR} is e.g., /usr/share/locale */
/* MO files should be in ${LOCALEDIR}/es/LC_MESSAGES */
textdomain("nco"); /* PACKAGE is name of program */
#endif /* not _LIBINTL_H */
#ifdef ENABLE_MPI
/* MPI Initialization */
MPI_Init(&argc,&argv);
MPI_Comm_size(MPI_COMM_WORLD,&prc_nbr);
MPI_Comm_rank(MPI_COMM_WORLD,&prc_rnk);
#endif /* !ENABLE_MPI */
/* Start clock and save command line */
cmd_ln=nco_cmd_ln_sng(argc,argv);
/* Get program name and set program enum (e.g., nco_prg_id=ncra) */
nco_prg_nm=nco_prg_prs(argv[0],&nco_prg_id);
/* Parse command line arguments */
while(1){
/* getopt_long_only() allows one dash to prefix long options */
opt=getopt_long(argc,argv,opt_sht_lst,opt_lng,&opt_idx);
/* NB: access to opt_crr is only valid when long_opt is detected */
if(opt == EOF) break; /* Parse positional arguments once getopt_long() returns EOF */
opt_crr=(char *)strdup(opt_lng[opt_idx].name);
/* Process long options without short option counterparts */
if(opt == 0){
if(!strcmp(opt_crr,"bfr_sz_hnt") || !strcmp(opt_crr,"buffer_size_hint")){
bfr_sz_hnt=strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10);
if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd);
} /* endif cnk */
if(!strcmp(opt_crr,"cnk_byt") || !strcmp(opt_crr,"chunk_byte")){
cnk_sz_byt=strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10);
if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd);
} /* endif cnk_byt */
if(!strcmp(opt_crr,"cnk_min") || !strcmp(opt_crr,"chunk_min")){
cnk_min_byt=strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10);
if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd);
} /* endif cnk_min */
if(!strcmp(opt_crr,"cnk_dmn") || !strcmp(opt_crr,"chunk_dimension")){
/* Copy limit argument for later processing */
cnk_arg[cnk_nbr]=(char *)strdup(optarg);
cnk_nbr++;
} /* endif cnk */
if(!strcmp(opt_crr,"cnk_scl") || !strcmp(opt_crr,"chunk_scalar")){
cnk_sz_scl=strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10);
if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd);
} /* endif cnk */
if(!strcmp(opt_crr,"cnk_map") || !strcmp(opt_crr,"chunk_map")){
/* Chunking map */
cnk_map_sng=(char *)strdup(optarg);
cnk_map=nco_cnk_map_get(cnk_map_sng);
} /* endif cnk */
if(!strcmp(opt_crr,"cnk_plc") || !strcmp(opt_crr,"chunk_policy")){
/* Chunking policy */
cnk_plc_sng=(char *)strdup(optarg);
cnk_plc=nco_cnk_plc_get(cnk_plc_sng);
} /* endif cnk */
if(!strcmp(opt_crr,"mmr_cln") || !strcmp(opt_crr,"clean")) flg_mmr_cln=True; /* [flg] Clean memory prior to exit */
if(!strcmp(opt_crr,"drt") || !strcmp(opt_crr,"mmr_drt") || !strcmp(opt_crr,"dirty")) flg_mmr_cln=False; /* [flg] Clean memory prior to exit */
if(!strcmp(opt_crr,"fl_fmt") || !strcmp(opt_crr,"file_format")) rcd=nco_create_mode_prs(optarg,&fl_out_fmt);
if(!strcmp(opt_crr,"gaa") || !strcmp(opt_crr,"glb_att_add")){
gaa_arg=(char **)nco_realloc(gaa_arg,(gaa_nbr+1)*sizeof(char *));
gaa_arg[gaa_nbr++]=(char *)strdup(optarg);
} /* endif gaa */
if(!strcmp(opt_crr,"hdr_pad") || !strcmp(opt_crr,"header_pad")){
hdr_pad=strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10);
if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd);
} /* endif "hdr_pad" */
if(!strcmp(opt_crr,"log_lvl") || !strcmp(opt_crr,"log_level")){
log_lvl=(int)strtol(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10);
if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtol",sng_cnv_rcd);
nc_set_log_level(log_lvl);
} /* !log_lvl */
if(!strcmp(opt_crr,"ram_all") || !strcmp(opt_crr,"create_ram") || !strcmp(opt_crr,"diskless_all")) RAM_CREATE=True; /* [flg] Create (netCDF3) file(s) in RAM */
if(!strcmp(opt_crr,"ram_all") || !strcmp(opt_crr,"open_ram") || !strcmp(opt_crr,"diskless_all")) RAM_OPEN=True; /* [flg] Open (netCDF3) file(s) in RAM */
if(!strcmp(opt_crr,"share_all") || !strcmp(opt_crr,"unbuffered_io") || !strcmp(opt_crr,"uio") || !strcmp(opt_crr,"create_share")) SHARE_CREATE=True; /* [flg] Create (netCDF3) file(s) with unbuffered I/O */
if(!strcmp(opt_crr,"share_all") || !strcmp(opt_crr,"unbuffered_io") || !strcmp(opt_crr,"uio") || !strcmp(opt_crr,"open_share")) SHARE_OPEN=True; /* [flg] Open (netCDF3) file(s) with unbuffered I/O */
if(!strcmp(opt_crr,"vrs") || !strcmp(opt_crr,"version")){
(void)nco_vrs_prn(CVS_Id,CVS_Revision);
nco_exit(EXIT_SUCCESS);
} /* endif "vrs" */
if(!strcmp(opt_crr,"wrt_tmp_fl") || !strcmp(opt_crr,"write_tmp_fl")) WRT_TMP_FL=True;
if(!strcmp(opt_crr,"no_tmp_fl")) WRT_TMP_FL=False;
} /* opt != 0 */
/* Process short options */
switch(opt){
case 0: /* Long options have already been processed, return */
break;
case '3': /* Request netCDF3 output storage format */
fl_out_fmt=NC_FORMAT_CLASSIC;
break;
case '4': /* Request netCDF4 output storage format */
fl_out_fmt=NC_FORMAT_NETCDF4;
break;
case '5': /* Request netCDF3 64-bit offset+data storage (i.e., pnetCDF) format */
fl_out_fmt=NC_FORMAT_CDF5;
break;
case '6': /* Request netCDF3 64-bit offset output storage format */
fl_out_fmt=NC_FORMAT_64BIT_OFFSET;
break;
case '7': /* Request netCDF4-classic output storage format */
fl_out_fmt=NC_FORMAT_NETCDF4_CLASSIC;
break;
case 'A': /* Toggle FORCE_APPEND */
FORCE_APPEND=!FORCE_APPEND;
break;
case 'C': /* Extract all coordinates associated with extracted variables? */
EXTRACT_ASSOCIATED_COORDINATES=False;
break;
case 'c':
EXTRACT_ALL_COORDINATES=True;
break;
case 'D': /* Debugging level. Default is 0. */
nco_dbg_lvl=(unsigned short int)strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10);
if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd);
break;
case 'd': /* Copy limit argument for later processing */
lmt_arg[lmt_nbr]=(char *)strdup(optarg);
lmt_nbr++;
break;
case 'F': /* Toggle index convention. Default is 0-based arrays (C-style). */
FORTRAN_IDX_CNV=!FORTRAN_IDX_CNV;
break;
case 'H': /* Toggle writing input file list attribute */
FL_LST_IN_APPEND=!FL_LST_IN_APPEND;
break;
case 'h': /* Toggle appending to history global attribute */
HISTORY_APPEND=!HISTORY_APPEND;
break;
case 'L': /* [enm] Deflate level. Default is 0. */
dfl_lvl=(int)strtol(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10);
if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtol",sng_cnv_rcd);
break;
case 'l': /* Local path prefix for files retrieved from remote file system */
fl_pth_lcl=(char *)strdup(optarg);
break;
case 'n': /* NINTAP-style abbreviation of files to average */
fl_lst_abb=nco_lst_prs_2D(optarg,",",&abb_arg_nbr);
if(abb_arg_nbr < 1 || abb_arg_nbr > 6){
(void)fprintf(stdout,gettext("%s: ERROR Incorrect abbreviation for file list\n"),nco_prg_nm_get());
(void)nco_usg_prn();
nco_exit(EXIT_FAILURE);
} /* end if */
break;
case 'O': /* Toggle FORCE_OVERWRITE */
FORCE_OVERWRITE=!FORCE_OVERWRITE;
break;
case 'o': /* Name of output file */
fl_out=(char *)strdup(optarg);
break;
case 'p': /* Common file path */
fl_pth=(char *)strdup(optarg);
break;
case 'P': /* Packing policy */
nco_pck_plc_sng=(char *)strdup(optarg);
nco_pck_plc=nco_pck_plc_get(nco_pck_plc_sng);
break;
case 'R': /* Toggle removal of remotely-retrieved-files. Default is True. */
RM_RMT_FL_PST_PRC=!RM_RMT_FL_PST_PRC;
break;
case 'r': /* Print CVS program information and copyright notice */
(void)nco_vrs_prn(CVS_Id,CVS_Revision);
(void)nco_lbr_vrs_prn();
(void)nco_cpy_prn();
(void)nco_cnf_prn();
nco_exit(EXIT_SUCCESS);
break;
#ifdef ENABLE_MPI
case 'S': /* Suspend with signal handler to facilitate debugging */
if(signal(SIGUSR1,nco_cnt_run) == SIG_ERR) (void)fprintf(fp_stderr,"%s: ERROR Could not install suspend handler.\n",nco_prg_nm_get());
while(!nco_spn_lck_brk) usleep(nco_spn_lck_us); /* Spinlock. fxm: should probably insert a sched_yield */
break;
#endif /* !ENABLE_MPI */
case 't': /* Thread number */
thr_nbr=(int)strtol(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10);
if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtol",sng_cnv_rcd);
break;
case 'v': /* Variables to extract/exclude */
/* Replace commas with hashes when within braces (convert back later) */
optarg_lcl=(char *)strdup(optarg);
(void)nco_rx_comma2hash(optarg_lcl);
var_lst_in=nco_lst_prs_2D(optarg_lcl,",",&var_lst_in_nbr);
optarg_lcl=(char *)nco_free(optarg_lcl);
xtr_nbr=var_lst_in_nbr;
break;
case 'x': /* Exclude rather than extract variables specified with -v */
EXCLUDE_INPUT_LIST=True;
break;
case 'Y': /* Pseudonym */
/* Call nco_prg_prs to reset pseudonym */
optarg_lcl=(char *)strdup(optarg);
if(nco_prg_nm) nco_prg_nm=(char *)nco_free(nco_prg_nm);
nco_prg_nm=nco_prg_prs(optarg_lcl,&nco_prg_id);
optarg_lcl=(char *)nco_free(optarg_lcl);
break;
case 'y': /* Operation type */
nco_op_typ_sng=(char *)strdup(optarg);
if(nco_prg_id == ncra || nco_prg_id == ncfe || nco_prg_id == ncge) nco_op_typ=nco_op_typ_get(nco_op_typ_sng);
break;
case '?': /* Print proper usage */
(void)nco_usg_prn();
nco_exit(EXIT_SUCCESS);
break;
case '-': /* Long options are not allowed */
(void)fprintf(stderr,"%s: ERROR Long options are not available in this build. Use single letter options instead.\n",nco_prg_nm_get());
nco_exit(EXIT_FAILURE);
break;
default: /* Print proper usage */
(void)fprintf(stdout,"%s ERROR in command-line syntax/options. Please reformulate command accordingly.\n",nco_prg_nm_get());
(void)nco_usg_prn();
nco_exit(EXIT_FAILURE);
break;
} /* end switch */
if(opt_crr) opt_crr=(char *)nco_free(opt_crr);
} /* end while loop */
/* Process positional arguments and fill-in filenames */
fl_lst_in=nco_fl_lst_mk(argv,argc,optind,&fl_nbr,&fl_out,&FL_LST_IN_FROM_STDIN,FORCE_OVERWRITE);
/* Make uniform list of user-specified chunksizes */
if(cnk_nbr > 0) cnk_dmn=nco_cnk_prs(cnk_nbr,cnk_arg);
/* Make uniform list of user-specified dimension limits */
if(lmt_nbr > 0) lmt=nco_lmt_prs(lmt_nbr,lmt_arg);
/* Initialize thread information */
thr_nbr=nco_openmp_ini(thr_nbr);
in_id_arr=(int *)nco_malloc(thr_nbr*sizeof(int));
/* Parse filename */
fl_in=nco_fl_nm_prs(fl_in,0,&fl_nbr,fl_lst_in,abb_arg_nbr,fl_lst_abb,fl_pth);
/* Make sure file is on local system and is readable or die trying */
fl_in=nco_fl_mk_lcl(fl_in,fl_pth_lcl,HPSS_TRY,&FL_RTR_RMT_LCN);
/* Open file using appropriate buffer size hints and verbosity */
if(RAM_OPEN) md_open=NC_NOWRITE|NC_DISKLESS; else md_open=NC_NOWRITE;
if(SHARE_OPEN) md_open=md_open|NC_SHARE;
rcd+=nco_fl_open(fl_in,md_open,&bfr_sz_hnt,&in_id);
/* Get number of variables, dimensions, and record dimension ID of input file */
(void)nco_inq(in_id,&nbr_dmn_fl,&nbr_var_fl,(int *)NULL,&rec_dmn_id);
(void)nco_inq_format(in_id,&fl_in_fmt);
/* Form initial extraction list which may include extended regular expressions */
xtr_lst=nco_var_lst_mk(in_id,nbr_var_fl,var_lst_in,EXCLUDE_INPUT_LIST,EXTRACT_ALL_COORDINATES,&xtr_nbr);
/* Change included variables to excluded variables */
if(EXCLUDE_INPUT_LIST) xtr_lst=nco_var_lst_xcl(in_id,nbr_var_fl,xtr_lst,&xtr_nbr);
/* Determine conventions (ARM/CCM/CCSM/CF/MPAS) for treating file */
cnv=nco_cnv_ini(in_id);
/* Add all coordinate variables to extraction list */
if(EXTRACT_ALL_COORDINATES) xtr_lst=nco_var_lst_crd_add(in_id,nbr_dmn_fl,nbr_var_fl,xtr_lst,&xtr_nbr,cnv);
/* Extract coordinates associated with extracted variables */
if(EXTRACT_ASSOCIATED_COORDINATES) xtr_lst=nco_var_lst_crd_ass_add(in_id,xtr_lst,&xtr_nbr,cnv);
/* Sort extraction list by variable ID for fastest I/O */
if(xtr_nbr > 1) xtr_lst=nco_lst_srt_nm_id(xtr_lst,xtr_nbr,False);
/* We now have final list of variables to extract. Phew. */
/* Find coordinate/dimension values associated with user-specified limits
NB: nco_lmt_evl() with same nc_id contains OpenMP critical region */
for(idx=0;idx<lmt_nbr;idx++) (void)nco_lmt_evl(in_id,lmt[idx],0L,FORTRAN_IDX_CNV);
/* Place all dimensions in lmt_all_lst */
lmt_all_lst=(lmt_all_sct **)nco_malloc(nbr_dmn_fl*sizeof(lmt_all_sct *));
/* Initialize lmt_all_sct's */
(void)nco_msa_lmt_all_ntl(in_id,MSA_USR_RDR,lmt_all_lst,nbr_dmn_fl,lmt,lmt_nbr);
/* Find dimensions associated with variables to be extracted */
dmn_lst=nco_dmn_lst_ass_var(in_id,xtr_lst,xtr_nbr,&nbr_dmn_xtr);
/* Fill-in dimension structure for all extracted dimensions */
dim=(dmn_sct **)nco_malloc(nbr_dmn_xtr*sizeof(dmn_sct *));
for(idx=0;idx<nbr_dmn_xtr;idx++) dim[idx]=nco_dmn_fll(in_id,dmn_lst[idx].id,dmn_lst[idx].nm);
/* Dimension list no longer needed */
dmn_lst=nco_nm_id_lst_free(dmn_lst,nbr_dmn_xtr);
/* Merge hyperslab limit information into dimension structures */
if(nbr_dmn_fl > 0) (void)nco_dmn_lmt_all_mrg(dmn_out,nbr_dmn_xtr,lmt_all_lst,nbr_dmn_fl);
/* Duplicate input dimension structures for output dimension structures */
dmn_out=(dmn_sct **)nco_malloc(nbr_dmn_xtr*sizeof(dmn_sct *));
for(idx=0;idx<nbr_dmn_xtr;idx++){
dmn_out[idx]=nco_dmn_dpl(dim[idx]);
(void)nco_dmn_xrf(dim[idx],dmn_out[idx]);
} /* end loop over idx */
/* Create stand-alone limit structure just for record dimension */
if(rec_dmn_id == NCO_REC_DMN_UNDEFINED){
if(nco_prg_id == ncra || nco_prg_id == ncrcat){
(void)fprintf(stdout,gettext("%s: ERROR input file %s lacks a record dimension\n"),nco_prg_nm_get(),fl_in);
if(fl_nbr == 1)(void)fprintf(stdout,gettext("%s: HINT Use ncks instead of %s\n"),nco_prg_nm_get(),nco_prg_nm_get());
nco_exit(EXIT_FAILURE);
} /* endif */
}else{ /* Record dimension exists */
lmt_rec=nco_lmt_sct_mk(in_id,rec_dmn_id,lmt,lmt_nbr,FORTRAN_IDX_CNV);
/* Initialize record coordinate re-basing */
if(nco_prg_id == ncra || nco_prg_id == ncrcat){
int var_id;
lmt_rec->cln_typ=cln_nil;
lmt_rec->origin=0.0;
lmt_rec->rbs_sng=NULL;
/* Obtain metadata for record coordinate */
rcd=nco_inq_varid_flg(in_id,lmt_rec->nm,&var_id);
if(rcd == NC_NOERR){
char *cln_att_sng=NULL;
lmt_rec->rbs_sng=nco_lmt_get_udu_att(in_id,var_id,"units");
cln_att_sng=nco_lmt_get_udu_att(in_id,var_id,"calendar");
lmt_rec->cln_typ=nco_cln_get_cln_typ(cln_att_sng);
if(cln_att_sng) cln_att_sng=(char*)nco_free(cln_att_sng);
}else{ /* endif record coordinate exists */
/* Record dimension, but not record coordinate, exists, which is fine. Reset return code. */
rcd=NC_NOERR;
} /* endif record coordinate exists */
} /* endif ncra, ncrcat */
} /* endif record dimension exists */
if(rec_dmn_id != NCO_REC_DMN_UNDEFINED){
for(idx=0;idx<nbr_dmn_fl;idx++){
if(!strcmp(lmt_rec->nm,lmt_all_lst[idx]->dmn_nm)){
lmt_all_rec=lmt_all_lst[idx];
/* Can only have one record limit */
if(lmt_all_rec->lmt_dmn_nbr > 1L){
(void)fprintf(stdout,"%s: Although this program allows multiple hyperslab limits for a single dimension, it allows only one unwrapped limit for the record dimension \"%s\". You have specified %i.\n",nco_prg_nm_get(),lmt_all_rec->dmn_nm,lmt_all_rec->lmt_dmn_nbr);
nco_exit(EXIT_FAILURE);
} /* end if */
if(nco_prg_id==ncra || nco_prg_id==ncrcat){
/* Change record dim in lmt_all_lst so that cnt=1 */
lmt_all_lst[idx]->dmn_cnt=1L;
lmt_all_lst[idx]->lmt_dmn[0]->srt=0L;
lmt_all_lst[idx]->lmt_dmn[0]->end=0L;
lmt_all_lst[idx]->lmt_dmn[0]->cnt=1L;
lmt_all_lst[idx]->lmt_dmn[0]->srd=1L;
} /* endif ncra || ncrcat */
break;
} /* endif current limit applies to record dimension */
} /* end loop over all dimensions */
} /* end if file has record dimension */
/* Is this an ARM-format data file? */
CNV_ARM=nco_cnv_arm_inq(in_id);
/* NB: nco_cnv_arm_base_time_get() with same nc_id contains OpenMP critical region */
if(CNV_ARM) base_time_srt=nco_cnv_arm_base_time_get(in_id);
/* Fill-in variable structure list for all extracted variables */
var=(var_sct **)nco_malloc(xtr_nbr*sizeof(var_sct *));
var_out=(var_sct **)nco_malloc(xtr_nbr*sizeof(var_sct *));
for(idx=0;idx<xtr_nbr;idx++){
var[idx]=nco_var_fll(in_id,xtr_lst[idx].id,xtr_lst[idx].nm,dim,nbr_dmn_xtr);
var_out[idx]=nco_var_dpl(var[idx]);
(void)nco_xrf_var(var[idx],var_out[idx]);
(void)nco_xrf_dmn(var_out[idx]);
} /* end loop over idx */
/* Extraction list no longer needed */
xtr_lst=nco_nm_id_lst_free(xtr_lst,xtr_nbr);
/* Divide variable lists into lists of fixed variables and variables to be processed */
(void)nco_var_lst_dvd(var,var_out,xtr_nbr,cnv,True,nco_pck_plc_nil,nco_pck_map_nil,NULL,0,&var_fix,&var_fix_out,&nbr_var_fix,&var_prc,&var_prc_out,&nbr_var_prc);
#ifdef ENABLE_MPI
if(prc_rnk == rnk_mgr){ /* MPI manager code */
#endif /* !ENABLE_MPI */
/* Make output and input files consanguinous */
if(fl_out_fmt == NCO_FORMAT_UNDEFINED) fl_out_fmt=fl_in_fmt;
/* Verify output file format supports requested actions */
(void)nco_fl_fmt_vet(fl_out_fmt,cnk_nbr,dfl_lvl);
/* Open output file */
fl_out_tmp=nco_fl_out_open(fl_out,&FORCE_APPEND,FORCE_OVERWRITE,fl_out_fmt,&bfr_sz_hnt,RAM_CREATE,RAM_OPEN,SHARE_CREATE,SHARE_OPEN,WRT_TMP_FL,&out_id);
/* Copy global attributes */
(void)nco_att_cpy(in_id,out_id,NC_GLOBAL,NC_GLOBAL,(nco_bool)True);
/* Catenate time-stamped command line to "history" global attribute */
if(HISTORY_APPEND) (void)nco_hst_att_cat(out_id,cmd_ln);
if(HISTORY_APPEND && FORCE_APPEND) (void)nco_prv_att_cat(fl_in,in_id,out_id);
if(gaa_nbr > 0) (void)nco_glb_att_add(out_id,gaa_arg,gaa_nbr);
if(HISTORY_APPEND) (void)nco_vrs_att_cat(out_id);
/* Add input file list global attribute */
if(FL_LST_IN_APPEND && HISTORY_APPEND && FL_LST_IN_FROM_STDIN) (void)nco_fl_lst_att_cat(out_id,fl_lst_in,fl_nbr);
#ifdef ENABLE_MPI
/* Initialize MPI task information */
if(prc_nbr > 0 && HISTORY_APPEND) (void)nco_mpi_att_cat(out_id,prc_nbr);
#endif /* !ENABLE_MPI */
if(thr_nbr > 1 && HISTORY_APPEND) (void)nco_thr_att_cat(out_id,thr_nbr);
/* Define dimensions in output file */
(void)nco_dmn_dfn(fl_out,out_id,dmn_out,nbr_dmn_xtr);
/* Define variables in output file, copy their attributes */
(void)nco_var_dfn(in_id,fl_out,out_id,var_out,xtr_nbr,(dmn_sct **)NULL,(int)0,nco_pck_plc_nil,nco_pck_map_nil,dfl_lvl);
/* Set chunksize parameters */
if(fl_out_fmt == NC_FORMAT_NETCDF4 || fl_out_fmt == NC_FORMAT_NETCDF4_CLASSIC) (void)nco_cnk_sz_set(out_id,lmt_all_lst,nbr_dmn_fl,&cnk_map,&cnk_plc,cnk_sz_scl,cnk_dmn,cnk_nbr);
/* Turn-off default filling behavior to enhance efficiency */
(void)nco_set_fill(out_id,NC_NOFILL,&fll_md_old);
/* Take output file out of define mode */
if(hdr_pad == 0UL){
(void)nco_enddef(out_id);
}else{
(void)nco__enddef(out_id,hdr_pad);
if(nco_dbg_lvl >= nco_dbg_scl) (void)fprintf(stderr,"%s: INFO Padding header with %lu extra bytes\n",nco_prg_nm_get(),(unsigned long)hdr_pad);
} /* hdr_pad */
#ifdef ENABLE_MPI
} /* prc_rnk != rnk_mgr */
/* Manager obtains output filename and broadcasts to workers */
if(prc_rnk == rnk_mgr) fl_nm_lng=(int)strlen(fl_out_tmp);
MPI_Bcast(&fl_nm_lng,1,MPI_INT,0,MPI_COMM_WORLD);
if(prc_rnk != rnk_mgr) fl_out_tmp=(char *)nco_malloc((fl_nm_lng+1)*sizeof(char));
MPI_Bcast(fl_out_tmp,fl_nm_lng+1,MPI_CHAR,0,MPI_COMM_WORLD);
#endif /* !ENABLE_MPI */
/* Pre-processor token spaghetti here is necessary so that
1. UP/SMP/MPI codes all zero srt vectors before calling nco_var_val_cpy()
2. No codes zero srt vectors more than once */
/* Assign zero to start and unity to stride vectors in output variables */
(void)nco_var_srd_srt_set(var_out,xtr_nbr);
#ifdef ENABLE_MPI
if(prc_rnk == rnk_mgr){ /* MPI manager code */
TKN_WRT_FREE=False;
#endif /* !ENABLE_MPI */
/* Copy variable data for non-processed variables */
/* (void)nco_var_val_cpy(in_id,out_id,var_fix,nbr_var_fix); */
(void)nco_msa_var_val_cpy(in_id,out_id,var_fix,nbr_var_fix,lmt_all_lst,nbr_dmn_fl);
#ifdef ENABLE_MPI
/* Close output file so workers can open it */
nco_close(out_id);
TKN_WRT_FREE=True;
} /* prc_rnk != rnk_mgr */
#else /* !ENABLE_MPI */
/* Close first input netCDF file (SMP only since MPI code immediate re-opens) */
(void)nco_close(in_id);
#endif /* !ENABLE_MPI */
/* Allocate and, if necesssary, initialize accumulation space for processed variables */
for(idx=0;idx<nbr_var_prc;idx++){
if(nco_prg_id == ncra || nco_prg_id == ncrcat){
/* Allocate space for only one record */
var_prc_out[idx]->sz=var_prc[idx]->sz=var_prc[idx]->sz_rec;
} /* endif */
if(nco_prg_id == ncra || nco_prg_id == ncfe){
var_prc_out[idx]->tally=var_prc[idx]->tally=(long *)nco_malloc(var_prc_out[idx]->sz*sizeof(long int));
(void)nco_zero_long(var_prc_out[idx]->sz,var_prc_out[idx]->tally);
var_prc_out[idx]->val.vp=(void *)nco_malloc(var_prc_out[idx]->sz*nco_typ_lng(var_prc_out[idx]->type));
(void)nco_var_zero(var_prc_out[idx]->type,var_prc_out[idx]->sz,var_prc_out[idx]->val);
} /* end if */
} /* end loop over idx */
#ifdef ENABLE_MPI
/* NB: Only manager code manipulates value of TKN_WRT_FREE
Pass 1: Workers construct local persistant variable lists
Open first file
mpncra and mpncrcat process first record only
mpnces ingests complete file
Workers create local list of their variables
Pass 2: Complete record/file loops with local variable lists
Workers skip first timestep (mpncra/mpncrcat)
Workers process only variables in their local list from Pass 1
This variable persistance is necessary for mpncra and mpnces
since their workers must maintain running tallies for each variable.
Variable persistance is not necessary for mpncrcat
However, we do it anyway to keep mpncrcat and mpncra similar
mpncrcat writes records as it reads them and finishes after pass 2
Pass 3:
mpnces and mpncra require a final loop to normalize and write
Write-token for this loop is passed sequentially through the ranks */
/* Begin Pass 1: Workers construct local persistant variable lists */
fl_idx=0;
/* Variables may have different ID, missing_value, type, in each file */
for(idx=0;idx<nbr_var_prc;idx++) (void)nco_var_mtd_refresh(in_id,var_prc[idx]);
/* Each file can have a different number of records to process
NB: nco_lmt_evl() with same nc_id contains OpenMP critical region */
if(nco_prg_id == ncra || nco_prg_id == ncrcat) (void)nco_lmt_evl(in_id,lmt_rec,rec_usd_cml,FORTRAN_IDX_CNV);
/* NB: nco_cnv_arm_base_time_get() with same nc_id contains OpenMP critical region */
if(CNV_ARM) base_time_crr=nco_cnv_arm_base_time_get(in_id);
/* Perform various error-checks on input file */
if(False) (void)nco_fl_cmp_err_chk();
if(nco_prg_id == ncra || nco_prg_id == ncrcat){ /* ncfe jumps to else branch */
/* Loop over each record in current file */
if(nco_dbg_lvl >= nco_dbg_std && lmt_rec->srt > lmt_rec->end) (void)fprintf(stdout,gettext("%s: WARNING %s (input file %d) is superfluous\n"),nco_prg_nm_get(),fl_in,fl_idx);
idx_rec=lmt_rec->srt;
if(fl_idx == fl_nbr-1 && idx_rec >= 1L+lmt_rec->end-lmt_rec->srd) LAST_RECORD=True;
/* Process all variables in first record */
if(nco_dbg_lvl > nco_dbg_scl) (void)fprintf(stderr,gettext("Record %ld of %s is output record %ld\n"),idx_rec,fl_in,rec_usd_cml);
if(prc_rnk == rnk_mgr){ /* MPI manager code */
/* Compensate for incrementing on each worker's first message */
var_wrt_nbr=-prc_nbr+1;
idx=0;
/* While variables remain to be processed or written... */
while(var_wrt_nbr < nbr_var_prc){
/* Receive any message from any worker */
MPI_Recv(wrk_id_bfr,wrk_id_bfr_lng,MPI_INT,MPI_ANY_SOURCE,MPI_ANY_TAG,MPI_COMM_WORLD,&mpi_stt);
/* Obtain MPI message tag type */
msg_tag_typ=mpi_stt.MPI_TAG;
/* Get sender's prc_rnk */
rnk_wrk=wrk_id_bfr[0];
/* Allocate next variable, if any, to worker */
if(msg_tag_typ == msg_tag_wrk_rqs){
var_wrt_nbr++; /* [nbr] Number of variables written */
/* Worker closed output file before sending msg_tag_wrk_rqs */
if(nco_prg_id == ncrcat) TKN_WRT_FREE=True; /* File written to at this point only for ncrcat */
if(idx > nbr_var_prc-1){
msg_bfr[0]=idx_all_wrk_ass; /* [enm] All variables already assigned */
msg_bfr[1]=out_id; /* Output file ID */
}else{
/* Tell requesting worker to allocate space for next variable */
msg_bfr[0]=idx; /* [idx] Variable to be processed */
/* csz: fxm Workers do not need to know Master's out_id */
msg_bfr[1]=out_id; /* Output file ID */
msg_bfr[2]=var_prc_out[idx]->id; /* [id] Variable ID in output file */
/* Point to next variable on list */
idx++;
} /* endif idx */
MPI_Send(msg_bfr,msg_bfr_lng,MPI_INT,rnk_wrk,msg_tag_wrk_rsp,MPI_COMM_WORLD);
}else if(msg_tag_typ == msg_tag_tkn_wrt_rqs && nco_prg_id == ncrcat){ /* msg_tag_typ != msg_tag_wrk_rqs */
/* Allocate token if free, else ask worker to try later */
if(TKN_WRT_FREE){
TKN_WRT_FREE=False;
msg_bfr[0]=tkn_wrt_rqs_xcp; /* Accept request for write token */
}else{
msg_bfr[0]=tkn_wrt_rqs_dny; /* Deny request for write token */
} /* !TKN_WRT_FREE */
MPI_Send(msg_bfr,msg_bfr_lng,MPI_INT,rnk_wrk,msg_tag_tkn_wrt_rsp,MPI_COMM_WORLD);
} /* msg_tag_typ != msg_tag_tkn_wrt_rqs */
} /* end while var_wrt_nbr < nbr_var_prc */
}else{ /* prc_rnk != rnk_mgr, end Manager code begin Worker code */
/* csz: fxm delete redundant statement with two lines further down */
wrk_id_bfr[0]=prc_rnk;
var_wrt_nbr=0;
while(1){ /* While work remains... */
/* Send msg_tag_wrk_rqs */
wrk_id_bfr[0]=prc_rnk;
MPI_Send(wrk_id_bfr,wrk_id_bfr_lng,MPI_INT,rnk_mgr,msg_tag_wrk_rqs,MPI_COMM_WORLD);
/* Receive msg_tag_wrk_rsp */
MPI_Recv(msg_bfr,msg_bfr_lng,MPI_INT,rnk_mgr,msg_tag_wrk_rsp,MPI_COMM_WORLD,&mpi_stt);
idx=msg_bfr[0];
/* csz: fxm dangerous---workers must get and use their own out_id's, not master's out_id */
out_id=msg_bfr[1];
if(idx == idx_all_wrk_ass){
break;
}else{ /* idx != idx_all_wrk_ass */
/* Assign this variable to this worker for rest of program */
lcl_idx_lst[lcl_nbr_var]=idx;
/* csz: got to here reading logic */
lcl_nbr_var++;
var_prc_out[idx]->id=msg_bfr[2];
if(nco_dbg_lvl >= nco_dbg_var) rcd+=nco_var_prc_crr_prn(idx,var_prc[idx]->nm);
if(nco_dbg_lvl >= nco_dbg_var) (void)fflush(fp_stderr);
/* Update hyperslab start indices to current record for each variable */
var_prc[idx]->srt[0]=idx_rec;
var_prc[idx]->end[0]=idx_rec;
var_prc[idx]->cnt[0]=1L;
/* Retrieve variable from disk into memory */
/* NB: nco_var_get() with same nc_id contains OpenMP critical region */
(void)nco_var_get(in_id,var_prc[idx]);
if(nco_prg_id == ncra){
/* Convert char, short, long, int, and float types to doubles before arithmetic */
/* Output variable type is "sticky" so only convert on first record */
if(rec_usd_cml == 0) var_prc_out[idx]=nco_typ_cnv_rth(var_prc_out[idx],nco_op_typ);
/* Convert var_prc to type of var_prc_out in case type of variable on disk has changed */
var_prc[idx]=nco_var_cnf_typ(var_prc_out[idx]->type,var_prc[idx]);
/* Perform arithmetic operations: avg, min, max, ttl, ... */
nco_opr_drv(rec_usd_cml,nco_op_typ,var_prc[idx],var_prc_out[idx]);
} /* !ncra */
/* Append current record to output file */
if(nco_prg_id == ncrcat){
var_prc_out[idx]->srt[0]=var_prc_out[idx]->end[0]=rec_usd_cml;
var_prc_out[idx]->cnt[0]=1L;
/* Replace this time_offset value with time_offset from initial file base_time */
if(CNV_ARM && !strcmp(var_prc[idx]->nm,"time_offset")) var_prc[idx]->val.dp[0]+=(base_time_crr-base_time_srt);
/* Obtain token and prepare to write */
while(1){ /* Send msg_tag_tkn_wrt_rqs repeatedly until token obtained */
wrk_id_bfr[0]=prc_rnk;
MPI_Send(wrk_id_bfr,wrk_id_bfr_lng,MPI_INT,rnk_mgr,msg_tag_tkn_wrt_rqs,MPI_COMM_WORLD);
MPI_Recv(msg_bfr,msg_bfr_lng,MPI_INT,rnk_mgr,msg_tag_tkn_wrt_rsp,MPI_COMM_WORLD,&mpi_stt);
tkn_wrt_rsp=msg_bfr[0];
/* Wait then re-send request */
if(tkn_wrt_rsp == tkn_wrt_rqs_dny) sleep(tkn_wrt_rqs_ntv); else break;
} /* end while loop waiting for write token */
/* Worker has token---prepare to write */
if(tkn_wrt_rsp == tkn_wrt_rqs_xcp){
if(RAM_OPEN) md_open=NC_WRITE|NC_SHARE|NC_DISKLESS; else md_open=NC_WRITE|NC_SHARE;
rcd=nco_fl_open(fl_out_tmp,md_open,&bfr_sz_hnt,&out_id);
/* Set chunksize parameters */
if(fl_out_fmt == NC_FORMAT_NETCDF4 || fl_out_fmt == NC_FORMAT_NETCDF4_CLASSIC) (void)nco_cnk_sz_set(out_id,lmt_all_lst,nbr_dmn_fl,&cnk_map,&cnk_plc,cnk_sz_scl,cnk_dmn,cnk_nbr);
/* Turn-off default filling behavior to enhance efficiency */
nco_set_fill(out_id,NC_NOFILL,&fll_md_old);
if(var_prc_out[idx]->sz_rec > 1L) (void)nco_put_vara(out_id,var_prc_out[idx]->id,var_prc_out[idx]->srt,var_prc_out[idx]->cnt,var_prc[idx]->val.vp,var_prc_out[idx]->type);
else (void)nco_put_var1(out_id,var_prc_out[idx]->id,var_prc_out[idx]->srt,var_prc[idx]->val.vp,var_prc_out[idx]->type);
/* Close output file and increment written counter */
nco_close(out_id);
var_wrt_nbr++;
} /* endif tkn_wrt_rqs_xcp */
} /* end if ncrcat */
/* Make sure record coordinate, if any, is monotonic */
if(nco_prg_id == ncrcat && var_prc[idx]->is_crd_var) (void)rec_crd_chk(var_prc[idx],fl_in,fl_out,idx_rec,rec_usd_cml);
/* Convert missing_value, if any, back to unpacked type */
if(var_prc[idx]->has_mss_val && var_prc[idx]->type != var_prc[idx]->typ_upk && !LAST_RECORD)
var_prc[idx]=nco_cnv_mss_val_typ(var_prc[idx],var_prc[idx]->typ_upk);
/* Free current input buffer */
var_prc[idx]->val.vp=nco_free(var_prc[idx]->val.vp);
if(nco_dbg_lvl >= nco_dbg_var) (void)fprintf(stderr,"\n");
} /* !idx_all_wrk_ass */
} /* while(1) loop requesting work/token in Worker */
rec_usd_cml++; /* [idx] Index of current record in output file (0 is first, ...) */
} /* endif Worker */
printf("DEBUG: End of first pass of ncra/ncrcat at node %d\n",prc_rnk);
/* End of ncra, ncrcat section */
}else{ /* ncfe */
if(prc_rnk == rnk_mgr){ /* MPI manager code */
/* Compensate for incrementing on each worker's first message */
var_wrt_nbr=-prc_nbr+1;
idx=0;
/* While variables remain to be processed or written... */
while(var_wrt_nbr < nbr_var_prc){
/* Receive message from any worker */
MPI_Recv(wrk_id_bfr,wrk_id_bfr_lng,MPI_INT,MPI_ANY_SOURCE,MPI_ANY_TAG,MPI_COMM_WORLD,&mpi_stt);
/* Obtain MPI message tag type */
msg_tag_typ=mpi_stt.MPI_TAG;
/* Get sender's prc_rnk */
rnk_wrk=wrk_id_bfr[0];
/* Allocate next variable, if any, to worker */
if(msg_tag_typ == msg_tag_wrk_rqs){
var_wrt_nbr++; /* [nbr] Number of variables written */
/* Worker closed output file before sending msg_tag_wrk_rqs */
/* TKN_WRT_FREE=True; ncfe does not do file write here */
if(idx > nbr_var_prc-1){
msg_bfr[0]=idx_all_wrk_ass; /* [enm] All variables already assigned */
msg_bfr[1]=out_id; /* Output file ID */
}else{
/* Tell requesting worker to allocate space for next variable */
msg_bfr[0]=idx; /* [idx] Variable to be processed */
msg_bfr[1]=out_id; /* Output file ID */
msg_bfr[2]=var_prc_out[idx]->id; /* [id] Variable ID in output file */
/* Point to next variable on list */
idx++;
} /* endif idx */
MPI_Send(msg_bfr,msg_bfr_lng,MPI_INT,rnk_wrk,msg_tag_wrk_rsp,MPI_COMM_WORLD);
} /* msg_tag_typ != msg_tag_wrk_rqs */
} /* end while var_wrt_nbr < nbr_var_prc */
}else{ /* prc_rnk != rnk_mgr, end Manager code begin Worker code */
while(1){ /* While work remains... */
/* Send msg_tag_wrk_rqs */
wrk_id_bfr[0]=prc_rnk;
MPI_Send(wrk_id_bfr,wrk_id_bfr_lng,MPI_INT,rnk_mgr,msg_tag_wrk_rqs,MPI_COMM_WORLD);
/* Receive msg_tag_wrk_rsp */
MPI_Recv(msg_bfr,msg_bfr_lng,MPI_INT,rnk_mgr,msg_tag_wrk_rsp,MPI_COMM_WORLD,&mpi_stt);
idx=msg_bfr[0];
out_id=msg_bfr[1];
if(idx == idx_all_wrk_ass) break;
else{
lcl_idx_lst[lcl_nbr_var]=idx; /* storing the indices for subsequent processing by the worker */
lcl_nbr_var++;
var_prc_out[idx]->id=msg_bfr[2];
if(nco_dbg_lvl >= nco_dbg_var) rcd+=nco_var_prc_crr_prn(idx,var_prc[idx]->nm);
if(nco_dbg_lvl >= nco_dbg_var) (void)fflush(fp_stderr);
/* Retrieve variable from disk into memory */
/* NB: nco_var_get() with same nc_id contains OpenMP critical region */
(void)nco_var_get(in_id,var_prc[idx]);
/* Convert char, short, long, int, and float types to doubles before arithmetic */
/* var_prc[idx]=nco_typ_cnv_rth(var_prc[idx],nco_op_typ); */
/* Output variable type is "sticky" so only convert on first record */
if(fl_idx == 0) var_prc_out[idx]=nco_typ_cnv_rth(var_prc_out[idx],nco_op_typ);
/* Convert var_prc to type of var_prc_out in case type of variable on disk has changed */
var_prc[idx]=nco_var_cnf_typ(var_prc_out[idx]->type,var_prc[idx]);
/* Perform arithmetic operations: avg, min, max, ttl, ... */ /* Note: fl_idx not rec_usd_cml! */
nco_opr_drv(fl_idx,nco_op_typ,var_prc[idx],var_prc_out[idx]);
/* Free current input buffer */
var_prc[idx]->val.vp=nco_free(var_prc[idx]->val.vp);
} /* !idx_all_wrk_ass */
} /* while(1) loop requesting work/token in Worker */
} /* endif Worker */
} /* end else ncfe */
if(nco_dbg_lvl > nco_dbg_scl) (void)fprintf(stderr,"\n");
/* Close input netCDF file */
nco_close(in_id);
#ifdef ENABLE_MPI
/* This barrier ensures that all nodes have reached this point together.
Otherwise, the manager code should be altered so it can deal with
nodes in different stages of execution at any time.
Daniel: I think we should be convinced of this parallelization
structure before bothering with implementing the code restructuring in
the manager that would let us remove the barrier. The barrier
should only negligibly impact performance. */
checkpointMpi(prc_rnk, 1);
#endif /* ENABLE_MPI */
/* End Pass 1: Workers construct local persistant variable lists */
printf("DEBUG: prc_rnk %d is done with 1st pass\n",prc_rnk);
/* Begin Pass 2: Complete record/file loops with local variable lists */
#endif /* !ENABLE_MPI */
/* Loop over input files */
for(fl_idx=0;fl_idx<fl_nbr;fl_idx++){
/* Parse filename */
if(fl_idx != 0) fl_in=nco_fl_nm_prs(fl_in,fl_idx,(int *)NULL,fl_lst_in,abb_arg_nbr,fl_lst_abb,fl_pth);
if(nco_dbg_lvl >= nco_dbg_fl) (void)fprintf(stderr,gettext("\nInput file %d is %s; "),fl_idx,fl_in);
/* Make sure file is on local system and is readable or die trying */
if(fl_idx != 0) fl_in=nco_fl_mk_lcl(fl_in,fl_pth_lcl,HPSS_TRY,&FL_RTR_RMT_LCN);
if(nco_dbg_lvl >= nco_dbg_fl) (void)fprintf(stderr,gettext("local file %s:\n"),fl_in);
/* Open file once per thread to improve caching */
for(thr_idx=0;thr_idx<thr_nbr;thr_idx++) rcd+=nco_fl_open(fl_in,md_open,&bfr_sz_hnt,in_id_arr+thr_idx);
in_id=in_id_arr[0];
#ifdef ENABLE_MPI
printf("DEBUG: input file opened in prc_rnk %d inside the loop\n",prc_rnk);
#endif /* !ENABLE_MPI */
/* Variables may have different IDs and missing_values in each file */
for(idx=0;idx<nbr_var_prc;idx++) (void)nco_var_mtd_refresh(in_id,var_prc[idx]);
/* Each file can have a different number of records to process
NB: nco_lmt_evl() with same nc_id contains OpenMP critical region */
if(nco_prg_id == ncra || nco_prg_id == ncrcat) (void)nco_lmt_evl(in_id,lmt_rec,rec_usd_cml,FORTRAN_IDX_CNV);
/* NB: nco_cnv_arm_base_time_get() with same nc_id contains OpenMP critical region */
if(CNV_ARM) base_time_crr=nco_cnv_arm_base_time_get(in_id);
/* Perform various error-checks on input file */
if(False) (void)nco_fl_cmp_err_chk();
if(nco_prg_id == ncra || nco_prg_id == ncrcat){ /* ncfe jumps to else branch */
/* Loop over each record in current file */
if(nco_dbg_lvl >= nco_dbg_std && lmt_rec->srt > lmt_rec->end) (void)fprintf(stdout,gettext("%s: WARNING %s (input file %d) is superfluous\n"),nco_prg_nm_get(),fl_in,fl_idx);
for(idx_rec=lmt_rec->srt;idx_rec<=lmt_rec->end;idx_rec+=lmt_rec->srd){
if(fl_idx == fl_nbr-1 && idx_rec >= 1L+lmt_rec->end-lmt_rec->srd) LAST_RECORD=True;
#ifdef ENABLE_MPI
if(fl_idx == 0 && idx_rec == lmt_rec->srt){
/* MPI operators processed first record in first-stage loop */
continue;
}else{ /* a loop of idx = stored indices */
if(prc_rnk == rnk_mgr){ /* For ncrcat, Manager gives write access for each record in each file */
if(nco_prg_id == ncrcat){ /* Give Write access to write current record */
/* var_wrt_nbr=-prc_nbr+1; */
var_wrt_nbr=0;
while(var_wrt_nbr < nbr_var_prc){ /* Give write access to Workers who have some variables; wrong condn? */
/* Receive message from any worker */
MPI_Recv(wrk_id_bfr,wrk_id_bfr_lng,MPI_INT,MPI_ANY_SOURCE,MPI_ANY_TAG,MPI_COMM_WORLD,&mpi_stt);
/* Obtain MPI message tag type */
msg_tag_typ=mpi_stt.MPI_TAG;
/* Get sender's prc_rnk */
rnk_wrk=wrk_id_bfr[0];
if(msg_tag_typ == msg_tag_wrk_done) TKN_WRT_FREE=True;
if(msg_tag_typ == msg_tag_tkn_wrt_rqs){
if(rnk_wrk == tkn_wrt_rnk){ /* Prev write completed */
TKN_WRT_FREE=True;
} /* rnk_wrk != tkn_wrt_rnk */
/* Allocate token if free, else ask worker to try later */
if(TKN_WRT_FREE){
TKN_WRT_FREE=False;
msg_bfr[0]=tkn_wrt_rqs_xcp; /* Accept request for write token */
tkn_wrt_rnk=rnk_wrk; /* To track who has the token */
var_wrt_nbr++;
}else{
msg_bfr[0]=tkn_wrt_rqs_dny; /* Deny request for write token */
} /* !TKN_WRT_FREE */
MPI_Send(msg_bfr,msg_bfr_lng,MPI_INT,rnk_wrk,msg_tag_tkn_wrt_rsp,MPI_COMM_WORLD);
} /* msg_tag_typ != msg_tag_tkn_wrt_rqs */
} /* End-while token request loop */
} /* !ncrcat */
}else{ /* prc_rnk != rnk_mgr, end Manager code begin Worker code */
wrk_id_bfr[0]=prc_rnk;
var_wrt_nbr=0;
/* if(fl_idx == 0 && idx_rec == lmt_rec->srt) continue;
else a loop of idx = stored indices */
for(jdx=0;jdx<lcl_nbr_var;jdx++){
idx=lcl_idx_lst[jdx];
#endif /* !ENABLE_MPI */
/* Process all variables in current record */
if(nco_dbg_lvl > nco_dbg_scl) (void)fprintf(stderr,gettext("Record %ld of %s is output record %ld\n"),idx_rec,fl_in,rec_usd_cml);
#if 0
/* NB: Immediately preceding MPI for scope confounds Emacs indentation
Fake end scope restores correct indentation, simplifies code-checking */
} /* fake end for */
#endif /* !0 */
#ifndef ENABLE_MPI
#ifdef _OPENMP
#pragma omp parallel for default(none) private(idx,in_id) shared(CNV_ARM,base_time_crr,base_time_srt,nco_dbg_lvl,fl_in,fl_out,idx_rec,rec_usd_cml,in_id_arr,LAST_RECORD,nbr_var_prc,nco_op_typ,out_id,prg,rcd,var_prc,var_prc_out)
#endif /* !_OPENMP */
/* UP and SMP codes main loop over variables */
for(idx=0;idx<nbr_var_prc;idx++){
#endif /* ENABLE_MPI */
in_id=in_id_arr[omp_get_thread_num()];
if(nco_dbg_lvl >= nco_dbg_var) rcd+=nco_var_prc_crr_prn(idx,var_prc[idx]->nm);
if(nco_dbg_lvl >= nco_dbg_var) (void)fflush(fp_stderr);
/* Update hyperslab start indices to current record for each variable */
var_prc[idx]->srt[0]=idx_rec;
var_prc[idx]->end[0]=idx_rec;
var_prc[idx]->cnt[0]=1L;
/* Retrieve variable from disk into memory */
/* NB: nco_var_get() with same nc_id contains OpenMP critical region */
(void)nco_var_get(in_id,var_prc[idx]);
if(nco_prg_id == ncra){
/* Convert char, short, long, int, and float types to doubles before arithmetic */
var_prc[idx]=nco_typ_cnv_rth(var_prc[idx],nco_op_typ);
/* Output variable type is "sticky" so only convert on first record */
if(rec_usd_cml == 0) var_prc_out[idx]=nco_typ_cnv_rth(var_prc_out[idx],nco_op_typ);
/* Convert var_prc to type of var_prc_out in case type of variable on disk has changed */
var_prc[idx]=nco_var_cnf_typ(var_prc_out[idx]->type,var_prc[idx]);
/* Perform arithmetic operations: avg, min, max, ttl, ... */
nco_opr_drv(rec_usd_cml,nco_op_typ,var_prc[idx],var_prc_out[idx]);
} /* end if ncra */
/* Append current record to output file */
if(nco_prg_id == ncrcat){
var_prc_out[idx]->srt[0]=var_prc_out[idx]->end[0]=rec_usd_cml;
var_prc_out[idx]->cnt[0]=1L;
/* Replace this time_offset value with time_offset from initial file base_time */
if(CNV_ARM && !strcmp(var_prc[idx]->nm,"time_offset")) var_prc[idx]->val.dp[0]+=(base_time_crr-base_time_srt);
#ifdef ENABLE_MPI
/* Obtain token and prepare to write */
while(1){ /* Send msg_tag_tkn_wrt_rqs repeatedly until token obtained */
wrk_id_bfr[0]=prc_rnk;
MPI_Send(wrk_id_bfr,wrk_id_bfr_lng,MPI_INT,rnk_mgr,msg_tag_tkn_wrt_rqs,MPI_COMM_WORLD);
MPI_Recv(msg_bfr,msg_bfr_lng,MPI_INT,rnk_mgr,msg_tag_tkn_wrt_rsp,MPI_COMM_WORLD,&mpi_stt);
tkn_wrt_rsp=msg_bfr[0];
/* Wait then re-send request */
if(tkn_wrt_rsp == tkn_wrt_rqs_dny) sleep(tkn_wrt_rqs_ntv); else break;
} /* end while loop waiting for write token */
/* Worker has token---prepare to write */
if(tkn_wrt_rsp == tkn_wrt_rqs_xcp){
rcd=nco_fl_open(fl_out_tmp,NC_WRITE|NC_SHARE,&bfr_sz_hnt,&out_id);
/* Set chunksize parameters */
if(fl_out_fmt == NC_FORMAT_NETCDF4 || fl_out_fmt == NC_FORMAT_NETCDF4_CLASSIC) (void)nco_cnk_sz_set(out_id,lmt_all_lst,nbr_dmn_fl,&cnk_map,&cnk_plc,cnk_sz_scl,cnk_dmn,cnk_nbr);
/* Turn-off default filling behavior to enhance efficiency */
nco_set_fill(out_id,NC_NOFILL,&fll_md_old);
#else /* !ENABLE_MPI */
#ifdef _OPENMP
#pragma omp critical
#endif /* _OPENMP */
#endif /* !ENABLE_MPI */
if(var_prc_out[idx]->sz_rec > 1) (void)nco_put_vara(out_id,var_prc_out[idx]->id,var_prc_out[idx]->srt,var_prc_out[idx]->cnt,var_prc[idx]->val.vp,var_prc_out[idx]->type);
else (void)nco_put_var1(out_id,var_prc_out[idx]->id,var_prc_out[idx]->srt,var_prc[idx]->val.vp,var_prc_out[idx]->type);
#ifdef ENABLE_MPI
/* Close output file and increment written counter */
nco_close(out_id);
var_wrt_nbr++;
} /* endif tkn_wrt_rqs_xcp */
#endif /* !ENABLE_MPI */
} /* end if ncrcat */
/* Make sure record coordinate, if any, is monotonic */
if(nco_prg_id == ncrcat && var_prc[idx]->is_crd_var) (void)rec_crd_chk(var_prc[idx],fl_in,fl_out,idx_rec,rec_usd_cml);
/* Convert missing_value, if any, back to disk type */
if(var_prc[idx]->has_mss_val && var_prc[idx]->type != var_prc[idx]->typ_upk && !LAST_RECORD)
var_prc[idx]=nco_cnv_mss_val_typ(var_prc[idx],var_prc[idx]->typ_upk);
/* Free current input buffer */
var_prc[idx]->val.vp=nco_free(var_prc[idx]->val.vp);
} /* end (OpenMP Parallel for) loop over variables */
#ifdef ENABLE_MPI
if(nco_prg_id == ncrcat){
/* Return token after writing record's last variable */
wrk_id_bfr[0]=prc_rnk;
MPI_Send(wrk_id_bfr,wrk_id_bfr_lng,MPI_INT,rnk_mgr,msg_tag_wrk_done,MPI_COMM_WORLD);
} /* !ncrcat */
#endif /* !ENABLE_MPI */
rec_usd_cml++; /* [idx] Index of current record in output file (0 is first, ...) */
if(nco_dbg_lvl >= nco_dbg_var) (void)fprintf(stderr,"\n");
#ifdef ENABLE_MPI
} /* !Worker */
} /* end else ! fl_idx=0,idx_rec=srt */
#endif /* !ENABLE_MPI */
} /* end loop over idx_rec */
#ifdef ENABLE_MPI
if(prc_rnk != rnk_mgr){ /* Only Worker */
#endif /* !ENABLE_MPI */
/* Warn if fewer than number of requested records were read and final file has been processed */
if(lmt_rec->lmt_typ == lmt_dmn_idx && lmt_rec->is_usr_spc_min && lmt_rec->is_usr_spc_max){
long rec_nbr_rqs; /* Number of records user requested */
rec_nbr_rqs=1L+(lmt_rec->max_idx-lmt_rec->min_idx)/lmt_rec->srd;
if(nco_dbg_lvl >= nco_dbg_std && fl_idx == fl_nbr-1 && rec_nbr_rqs != rec_usd_cml) (void)fprintf(stdout,gettext("%s: WARNING User requested %li records but only %li were found\n"),nco_prg_nm_get(),rec_nbr_rqs,rec_usd_cml);
} /* end if */
/* Error if no records were read and final file has been processed */
if(rec_usd_cml <= 0 && fl_idx == fl_nbr-1){
(void)fprintf(stdout,gettext("%s: ERROR No records lay within specified hyperslab\n"),nco_prg_nm_get());
nco_exit(EXIT_FAILURE);
} /* end if */
#ifdef ENABLE_MPI
} /* !Worker */
printf("DEBUG: prc_rnk %d at the end of ncra/rcat\n",prc_rnk);
#endif /* !ENABLE_MPI */
/* End of ncra, ncrcat section */
}else{ /* ncfe */
#ifdef ENABLE_MPI
if(prc_rnk != rnk_mgr){ /* Only Worker does the ncfe processing */
if(fl_idx == 0){
continue;
}else{ /* a loop of idx = stored indices */
for(jdx=0;jdx<lcl_nbr_var;jdx++){
idx=lcl_idx_lst[jdx];
#else /* !ENABLE_MPI */
#ifdef _OPENMP
#pragma omp parallel for default(none) private(idx,in_id) shared(nco_dbg_lvl,fl_idx,in_id_arr,nbr_var_prc,nco_op_typ,rcd,var_prc,var_prc_out)
#endif /* !_OPENMP */
for(idx=0;idx<nbr_var_prc;idx++){ /* Process all variables in current file */
#endif /* !ENABLE_MPI */
in_id=in_id_arr[omp_get_thread_num()];
if(nco_dbg_lvl >= nco_dbg_var) rcd+=nco_var_prc_crr_prn(idx,var_prc[idx]->nm);
if(nco_dbg_lvl >= nco_dbg_var) (void)fflush(fp_stderr);
/* Retrieve variable from disk into memory */
/* NB: nco_var_get() with same nc_id contains OpenMP critical region */
(void)nco_var_get(in_id,var_prc[idx]);
/* Convert char, short, long, int, and float types to doubles before arithmetic */
/* var_prc[idx]=nco_typ_cnv_rth(var_prc[idx],nco_op_typ); */
/* Output variable type is "sticky" so only convert on first record */
if(fl_idx == 0) var_prc_out[idx]=nco_typ_cnv_rth(var_prc_out[idx],nco_op_typ);
/* Convert var_prc to type of var_prc_out in case type of variable on disk has changed */
var_prc[idx]=nco_var_cnf_typ(var_prc_out[idx]->type,var_prc[idx]);
/* Perform arithmetic operations: avg, min, max, ttl, ... */ /* Note: fl_idx not rec_usd_cml! */
nco_opr_drv(fl_idx,nco_op_typ,var_prc[idx],var_prc_out[idx]);
/* Free current input buffer */
var_prc[idx]->val.vp=nco_free(var_prc[idx]->val.vp);
} /* end (OpenMP parallel for) loop over idx */
#ifdef ENABLE_MPI
} /* end else !fl_idx=0 */
} /* !Worker */
#endif /* !ENABLE_MPI */
} /* end else ncfe */
if(nco_dbg_lvl > nco_dbg_scl) (void)fprintf(stderr,"\n");
/* Close input netCDF file */
for(thr_idx=0;thr_idx<thr_nbr;thr_idx++) nco_close(in_id_arr[thr_idx]);
/* Dispose local copy of file */
if(FL_RTR_RMT_LCN && RM_RMT_FL_PST_PRC) (void)nco_fl_rm(fl_in);
} /* end loop over fl_idx */
#ifdef ENABLE_MPI
printf("DEBUG: prc_rnk %d is out of file idx loop\n",prc_rnk);
#endif /* !ENABLE_MPI */
/* Normalize, multiply, etc where necessary */
if(nco_prg_id == ncra || nco_prg_id == ncfe){
#ifdef ENABLE_MPI
if(prc_rnk != rnk_mgr){ /* Only workers have indices of variables to process */
for(jdx=0;jdx<lcl_nbr_var;jdx++){
idx=lcl_idx_lst[jdx];
#if 0
/* NB: Immediately preceding MPI if/for scopes confound Emacs indentation
Fake end scopes restore correct indentation, simplify code-checking */
} /* fake end for */
} /* fake end if */
#endif /* !0 */
#else /* !ENABLE_MPI */
#ifdef _OPENMP
#pragma omp parallel for default(none) private(idx) shared(nbr_var_prc,nco_op_typ,var_prc,var_prc_out)
#endif /* !_OPENMP */
for(idx=0;idx<nbr_var_prc;idx++){
#endif /* !ENABLE_MPI */
if(var_prc[idx]->is_crd_var){
/* Return linear averages of coordinates unless computing extrema
Prevent coordinate variables from encountering nco_var_nrm_sdn() */
if((nco_op_typ != nco_op_min) && (nco_op_typ != nco_op_max)) (void)nco_var_nrm(var_prc_out[idx]->type,var_prc_out[idx]->sz,var_prc[idx]->has_mss_val,var_prc[idx]->mss_val,var_prc[idx]->tally,var_prc_out[idx]->val);
}else{ /* !var_prc[idx]->is_crd_var */
switch(nco_op_typ){
case nco_op_avg: /* Normalize sum by tally to create mean */
case nco_op_sqrt: /* Normalize sum by tally to create mean */
case nco_op_sqravg: /* Normalize sum by tally to create mean */
case nco_op_rms: /* Normalize sum of squares by tally to create mean square */
case nco_op_avgsqr: /* Normalize sum of squares by tally to create mean square */
(void)nco_var_nrm(var_prc_out[idx]->type,var_prc_out[idx]->sz,var_prc[idx]->has_mss_val,var_prc[idx]->mss_val,var_prc[idx]->tally,var_prc_out[idx]->val);
break;
case nco_op_rmssdn: /* Normalize sum of squares by tally-1 to create mean square for sdn */
(void)nco_var_nrm_sdn(var_prc_out[idx]->type,var_prc_out[idx]->sz,var_prc[idx]->has_mss_val,var_prc[idx]->mss_val,var_prc[idx]->tally,var_prc_out[idx]->val);
break;
case nco_op_min: /* Minimum is already in buffer, do nothing */
case nco_op_max: /* Maximum is already in buffer, do nothing */
case nco_op_ttl: /* Total is already in buffer, stuff missing values into elements with zero tally */
(void)nco_var_tll_zro_mss_val(var_prc_out[idx]->type,var_prc_out[idx]->sz,var_prc[idx]->has_mss_val,var_prc[idx]->mss_val,var_prc[idx]->tally,var_prc_out[idx]->val);
default:
break;
} /* end switch */
/* Some operations require additional processing */
switch(nco_op_typ){
case nco_op_rms: /* Take root of mean of sum of squares to create root mean square */
case nco_op_rmssdn: /* Take root of sdn mean of sum of squares to create root mean square for sdn */
case nco_op_sqrt: /* Take root of mean to create root mean */
(void)nco_var_sqrt(var_prc_out[idx]->type,var_prc_out[idx]->sz,var_prc[idx]->has_mss_val,var_prc[idx]->mss_val,var_prc[idx]->tally,var_prc_out[idx]->val,var_prc_out[idx]->val);
break;
case nco_op_sqravg: /* Square mean to create square of the mean (for sdn) */
(void)nco_var_mlt(var_prc_out[idx]->type,var_prc_out[idx]->sz,var_prc_out[idx]->has_mss_val,var_prc_out[idx]->mss_val,var_prc_out[idx]->val,var_prc_out[idx]->val);
break;
default:
break;
} /* end switch */
} /* !var_prc[idx]->is_crd_var */
var_prc_out[idx]->tally=var_prc[idx]->tally=(long *)nco_free(var_prc[idx]->tally);
} /* end (OpenMP parallel for) loop over variables */
#ifdef ENABLE_MPI
printf("DEBUG: End of Normzn at prc_rnk %d\n",prc_rnk);
} /* prc_rnk == rnk_mgr */
for(idx = 0; idx < nbr_var_prc; idx++) {
assert(var_prc_out[idx]->tally == var_prc[idx]->tally);
if (var_prc_out[idx]->tally == 0) continue;
printf("DEBUG: node %d reset idx %d tally for var_prc(out) (cleanup)\n",prc_rnk,idx);
var_prc_out[idx]->tally=var_prc[idx]->tally=(long *)nco_free(var_prc[idx]->tally);
}
printf("DEBUG: Mgr shud prnt this too, prc_rnk %d\n",prc_rnk);
#endif /* !ENABLE_MPI */
} /* !ncra/ncfe */
#ifdef ENABLE_MPI
printf("DEBUG: After all processing; Before barrier, prc_rnk %d\n",prc_rnk);
if(prc_rnk == rnk_mgr){ /* Only Manager */
rcd=nco_fl_open(fl_out_tmp,NC_WRITE|NC_SHARE,&bfr_sz_hnt,&out_id);
printf("DEBUG: prc_rnk %d opened out file\n",prc_rnk);
#endif /* !ENABLE_MPI */
/* Manually fix YYMMDD date which was mangled by averaging */
if(cnv->CCM_CCSM_CF && nco_prg_id == ncra) (void)nco_cnv_ccm_ccsm_cf_date(out_id,var_out,xtr_nbr);
/* End Pass 2: Complete record/file loops with local variable lists */
/* Begin Pass 3: */
/* End Pass 3: */
/* Add time variable to output file
NB: nco_cnv_arm_time_install() contains OpenMP critical region */
if(CNV_ARM && nco_prg_id == ncrcat) (void)nco_cnv_arm_time_install(out_id,base_time_srt,dfl_lvl);
#ifdef ENABLE_MPI
nco_close(out_id);
printf("DEBUG: Mgr prc_rnk %d closed out file %d after fixing date, time \n", prc_rnk, out_id);
MPI_Send(msg_bfr,msg_bfr_lng,MPI_INT,prc_rnk+1,msg_tag_tkn_wrt_rsp,MPI_COMM_WORLD);
printf("DEBUG: Mgr sent token to worker 1 for final write\n");
}else{ /* Workers */
printf("DEBUG: prc_rnk %d waiting for msg from Mgr for final write\n",prc_rnk);
MPI_Recv(msg_bfr,msg_bfr_lng,MPI_INT,prc_rnk-1,msg_tag_tkn_wrt_rsp,MPI_COMM_WORLD,&mpi_stt);
printf("DEBUG: prc_rnk %d got token for final write to %d\n",prc_rnk, out_id);
if(nco_prg_id == ncra || nco_prg_id == ncfe){
/* Copy averages to output file and free averaging buffers */
rcd=nco_fl_open(fl_out_tmp,NC_WRITE|NC_SHARE,&bfr_sz_hnt,&out_id);
printf("DEBUG: prc_rnk %d opened output file for final write\n",prc_rnk);
for(jdx=0;jdx<lcl_nbr_var;jdx++){
idx=lcl_idx_lst[jdx];
/* Revert any arithmetic promotion but leave unpacked (for now) */
/* printf("DEBUG: Before nco_var_cnf_typ prc_rnk %d var val %f\n",prc_rnk,var_prc_out[idx]->val.ip[0]); */
var_prc_out[idx]=nco_var_cnf_typ(var_prc_out[idx]->typ_upk,var_prc_out[idx]);
/* printf("DEBUG: After nco_var_cnf_typ prc_rnk %d var val %f\n",prc_rnk,var_prc_out[idx]->val.ip[0]); */
/* Packing/Unpacking */
if(nco_pck_plc == nco_pck_plc_all_new_att) var_prc_out[idx]=nco_put_var_pck(out_id,var_prc_out[idx],nco_pck_plc);
printf("DEBUG: prc_rnk %d to final write var %s with idx %d val %g\n",prc_rnk,var_prc_out[idx]->nm,idx,var_prc_out[idx]->val.fp[0]);
if(var_prc_out[idx]->nbr_dim == 0){
(void)nco_put_var1(out_id,var_prc_out[idx]->id,var_prc_out[idx]->srt,var_prc_out[idx]->val.vp,var_prc_out[idx]->type);
}else{ /* end if variable is scalar */
/* Size of record dimension is one in output file */
if(nco_prg_id == ncra) var_prc_out[idx]->cnt[0]=1L;
(void)nco_put_vara(out_id,var_prc_out[idx]->id,var_prc_out[idx]->srt,var_prc_out[idx]->cnt,var_prc_out[idx]->val.vp,var_prc_out[idx]->type);
} /* end if variable is an array */
var_prc_out[idx]->val.vp=nco_free(var_prc_out[idx]->val.vp);
} /* end loop over jdx */
/* Close output file */
nco_close(out_id);
printf("DEBUG: prc_rnk %d closed out file after writing\n",prc_rnk);
/* Send Token to Manager */
} /* end if */
if(prc_rnk == prc_nbr-1) MPI_Send(msg_bfr,msg_bfr_lng,MPI_INT,rnk_mgr,msg_tag_tkn_wrt_rsp,MPI_COMM_WORLD); else MPI_Send(msg_bfr,msg_bfr_lng,MPI_INT,prc_rnk+1,msg_tag_tkn_wrt_rsp,MPI_COMM_WORLD);
} /* !Workers */
if(prc_rnk == rnk_mgr){ /* Only Manager */
MPI_Recv(msg_bfr,msg_bfr_lng,MPI_INT,prc_nbr-1,msg_tag_tkn_wrt_rsp,MPI_COMM_WORLD,&mpi_stt);
(void)nco_fl_mv(fl_out_tmp,fl_out);
} /* !Manager */
MPI_Finalize();
#else /* !ENABLE_MPI */
/* Copy averages to output file and free averaging buffers */
if(nco_prg_id == ncra || nco_prg_id == ncfe){
for(idx=0;idx<nbr_var_prc;idx++){
/* Revert any arithmetic promotion but leave unpacked (for now) */
var_prc_out[idx]=nco_var_cnf_typ(var_prc_out[idx]->typ_upk,var_prc_out[idx]);
/* Packing/Unpacking */
if(nco_pck_plc == nco_pck_plc_all_new_att) var_prc_out[idx]=nco_put_var_pck(out_id,var_prc_out[idx],nco_pck_plc);
if(var_prc_out[idx]->nbr_dim == 0){
(void)nco_put_var1(out_id,var_prc_out[idx]->id,var_prc_out[idx]->srt,var_prc_out[idx]->val.vp,var_prc_out[idx]->type);
}else{ /* end if variable is scalar */
/* Size of record dimension is 1 in output file */
if(nco_prg_id == ncra) var_prc_out[idx]->cnt[0]=1L;
(void)nco_put_vara(out_id,var_prc_out[idx]->id,var_prc_out[idx]->srt,var_prc_out[idx]->cnt,var_prc_out[idx]->val.vp,var_prc_out[idx]->type);
} /* end if variable is an array */
var_prc_out[idx]->val.vp=nco_free(var_prc_out[idx]->val.vp);
} /* end loop over idx */
} /* end if */
/* Close output file and move it from temporary to permanent location */
(void)nco_fl_out_cls(fl_out,fl_out_tmp,out_id);
#endif /* !ENABLE_MPI */
/* Clean memory unless dirty memory allowed */
if(flg_mmr_cln){
/* ncra-specific memory cleanup */
if(nco_prg_id == ncra || nco_prg_id == ncrcat) lmt_rec=nco_lmt_free(lmt_rec);
/* NCO-generic clean-up */
/* Free individual strings/arrays */
if(cmd_ln) cmd_ln=(char *)nco_free(cmd_ln);
if(cnk_map_sng) cnk_map_sng=(char *)nco_free(cnk_map_sng);
if(cnk_plc_sng) cnk_plc_sng=(char *)nco_free(cnk_plc_sng);
if(fl_in) fl_in=(char *)nco_free(fl_in);
if(fl_out) fl_out=(char *)nco_free(fl_out);
if(fl_out_tmp) fl_out_tmp=(char *)nco_free(fl_out_tmp);
if(fl_pth) fl_pth=(char *)nco_free(fl_pth);
if(fl_pth_lcl) fl_pth_lcl=(char *)nco_free(fl_pth_lcl);
if(in_id_arr) in_id_arr=(int *)nco_free(in_id_arr);
/* Free lists of strings */
if(fl_lst_in && fl_lst_abb == NULL) fl_lst_in=nco_sng_lst_free(fl_lst_in,fl_nbr);
if(fl_lst_in && fl_lst_abb) fl_lst_in=nco_sng_lst_free(fl_lst_in,1);
if(fl_lst_abb) fl_lst_abb=nco_sng_lst_free(fl_lst_abb,abb_arg_nbr);
if(gaa_nbr > 0) gaa_arg=nco_sng_lst_free(gaa_arg,gaa_nbr);
if(var_lst_in_nbr > 0) var_lst_in=nco_sng_lst_free(var_lst_in,var_lst_in_nbr);
/* Free limits */
for(idx=0;idx<lmt_nbr;idx++) lmt_arg[idx]=(char *)nco_free(lmt_arg[idx]);
if(lmt_nbr > 0) lmt=nco_lmt_lst_free(lmt,lmt_nbr);
/* Free chunking information */
for(idx=0;idx<cnk_nbr;idx++) cnk_arg[idx]=(char *)nco_free(cnk_arg[idx]);
if(cnk_nbr > 0) cnk_dmn=nco_cnk_lst_free(cnk_dmn,cnk_nbr);
/* Free dimension lists */
if(nbr_dmn_xtr > 0) dim=nco_dmn_lst_free(dim,nbr_dmn_xtr);
if(nbr_dmn_xtr > 0) dmn_out=nco_dmn_lst_free(dmn_out,nbr_dmn_xtr);
#if 1
/* Free variable lists */
if(xtr_nbr > 0) var=nco_var_lst_free(var,xtr_nbr);
if(xtr_nbr > 0) var_out=nco_var_lst_free(var_out,xtr_nbr);
var_prc=(var_sct **)nco_free(var_prc);
var_prc_out=(var_sct **)nco_free(var_prc_out);
var_fix=(var_sct **)nco_free(var_fix);
var_fix_out=(var_sct **)nco_free(var_fix_out);
#endif /* !1 */
#if 0
/* 20051027: Try ncwa free()'ing technique to avoid freeing dangling pointers */
if(xtr_nbr > 0) var=nco_var_lst_free(var,xtr_nbr);
/* ncwa uses nco_var_lst_free() on var_prc_out because var_out has dangling pointers */
if(nbr_var_fix > 0) var_fix_out=nco_var_lst_free(var_fix_out,nbr_var_fix);
if(nbr_var_prc > 0) var_prc_out=nco_var_lst_free(var_prc_out,nbr_var_prc);
var_prc=(var_sct **)nco_free(var_prc);
var_fix=(var_sct **)nco_free(var_fix);
var_out=(var_sct **)nco_free(var_out);
#endif /* !0 */
} /* !flg_mmr_cln */
nco_exit_gracefully();
return EXIT_SUCCESS;
} /* end main() */
|
gimplify.c | /* Tree lowering pass. This pass converts the GENERIC functions-as-trees
tree representation into the GIMPLE form.
Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008
Free Software Foundation, Inc.
Major work done by Sebastian Pop <s.pop@laposte.net>,
Diego Novillo <dnovillo@redhat.com> and Jason Merrill <jason@redhat.com>.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
#include "rtl.h"
#include "varray.h"
#include "tree-gimple.h"
#include "tree-inline.h"
#include "diagnostic.h"
#include "langhooks.h"
#include "langhooks-def.h"
#include "tree-flow.h"
#include "cgraph.h"
#include "timevar.h"
#include "except.h"
#include "hashtab.h"
#include "flags.h"
#include "real.h"
#include "function.h"
#include "output.h"
#include "expr.h"
#include "ggc.h"
#include "toplev.h"
#include "target.h"
#include "optabs.h"
#include "pointer-set.h"
#include "splay-tree.h"
enum gimplify_omp_var_data
{
GOVD_SEEN = 1,
GOVD_EXPLICIT = 2,
GOVD_SHARED = 4,
GOVD_PRIVATE = 8,
GOVD_FIRSTPRIVATE = 16,
GOVD_LASTPRIVATE = 32,
GOVD_REDUCTION = 64,
GOVD_LOCAL = 128,
GOVD_DEBUG_PRIVATE = 256,
GOVD_DATA_SHARE_CLASS = (GOVD_SHARED | GOVD_PRIVATE | GOVD_FIRSTPRIVATE
| GOVD_LASTPRIVATE | GOVD_REDUCTION | GOVD_LOCAL)
};
struct gimplify_omp_ctx
{
struct gimplify_omp_ctx *outer_context;
splay_tree variables;
struct pointer_set_t *privatized_types;
location_t location;
enum omp_clause_default_kind default_kind;
bool is_parallel;
bool is_combined_parallel;
};
struct gimplify_ctx
{
struct gimplify_ctx *prev_context;
tree current_bind_expr;
tree temps;
tree conditional_cleanups;
tree exit_label;
tree return_temp;
VEC(tree,heap) *case_labels;
/* The formal temporary table. Should this be persistent? */
htab_t temp_htab;
int conditions;
bool save_stack;
bool into_ssa;
bool allow_rhs_cond_expr;
};
static struct gimplify_ctx *gimplify_ctxp;
static struct gimplify_omp_ctx *gimplify_omp_ctxp;
/* Formal (expression) temporary table handling: Multiple occurrences of
the same scalar expression are evaluated into the same temporary. */
typedef struct gimple_temp_hash_elt
{
tree val; /* Key */
tree temp; /* Value */
} elt_t;
/* Forward declarations. */
static enum gimplify_status gimplify_compound_expr (tree *, tree *, bool);
/* Mark X addressable. Unlike the langhook we expect X to be in gimple
form and we don't do any syntax checking. */
static void
mark_addressable (tree x)
{
while (handled_component_p (x))
x = TREE_OPERAND (x, 0);
if (TREE_CODE (x) != VAR_DECL && TREE_CODE (x) != PARM_DECL)
return ;
TREE_ADDRESSABLE (x) = 1;
}
/* Return a hash value for a formal temporary table entry. */
static hashval_t
gimple_tree_hash (const void *p)
{
tree t = ((const elt_t *) p)->val;
return iterative_hash_expr (t, 0);
}
/* Compare two formal temporary table entries. */
static int
gimple_tree_eq (const void *p1, const void *p2)
{
tree t1 = ((const elt_t *) p1)->val;
tree t2 = ((const elt_t *) p2)->val;
enum tree_code code = TREE_CODE (t1);
if (TREE_CODE (t2) != code
|| TREE_TYPE (t1) != TREE_TYPE (t2))
return 0;
if (!operand_equal_p (t1, t2, 0))
return 0;
/* Only allow them to compare equal if they also hash equal; otherwise
results are nondeterminate, and we fail bootstrap comparison. */
gcc_assert (gimple_tree_hash (p1) == gimple_tree_hash (p2));
return 1;
}
/* Set up a context for the gimplifier. */
void
push_gimplify_context (void)
{
struct gimplify_ctx *c;
c = (struct gimplify_ctx *) xcalloc (1, sizeof (struct gimplify_ctx));
c->prev_context = gimplify_ctxp;
if (optimize)
c->temp_htab = htab_create (1000, gimple_tree_hash, gimple_tree_eq, free);
gimplify_ctxp = c;
}
/* Tear down a context for the gimplifier. If BODY is non-null, then
put the temporaries into the outer BIND_EXPR. Otherwise, put them
in the unexpanded_var_list. */
void
pop_gimplify_context (tree body)
{
struct gimplify_ctx *c = gimplify_ctxp;
tree t;
gcc_assert (c && !c->current_bind_expr);
gimplify_ctxp = c->prev_context;
for (t = c->temps; t ; t = TREE_CHAIN (t))
DECL_GIMPLE_FORMAL_TEMP_P (t) = 0;
if (body)
declare_vars (c->temps, body, false);
else
record_vars (c->temps);
if (optimize)
htab_delete (c->temp_htab);
free (c);
}
static void
gimple_push_bind_expr (tree bind)
{
TREE_CHAIN (bind) = gimplify_ctxp->current_bind_expr;
gimplify_ctxp->current_bind_expr = bind;
}
static void
gimple_pop_bind_expr (void)
{
gimplify_ctxp->current_bind_expr
= TREE_CHAIN (gimplify_ctxp->current_bind_expr);
}
tree
gimple_current_bind_expr (void)
{
return gimplify_ctxp->current_bind_expr;
}
/* Returns true iff there is a COND_EXPR between us and the innermost
CLEANUP_POINT_EXPR. This info is used by gimple_push_cleanup. */
static bool
gimple_conditional_context (void)
{
return gimplify_ctxp->conditions > 0;
}
/* Note that we've entered a COND_EXPR. */
static void
gimple_push_condition (void)
{
#ifdef ENABLE_CHECKING
if (gimplify_ctxp->conditions == 0)
gcc_assert (!gimplify_ctxp->conditional_cleanups);
#endif
++(gimplify_ctxp->conditions);
}
/* Note that we've left a COND_EXPR. If we're back at unconditional scope
now, add any conditional cleanups we've seen to the prequeue. */
static void
gimple_pop_condition (tree *pre_p)
{
int conds = --(gimplify_ctxp->conditions);
gcc_assert (conds >= 0);
if (conds == 0)
{
append_to_statement_list (gimplify_ctxp->conditional_cleanups, pre_p);
gimplify_ctxp->conditional_cleanups = NULL_TREE;
}
}
/* A stable comparison routine for use with splay trees and DECLs. */
static int
splay_tree_compare_decl_uid (splay_tree_key xa, splay_tree_key xb)
{
tree a = (tree) xa;
tree b = (tree) xb;
return DECL_UID (a) - DECL_UID (b);
}
/* Create a new omp construct that deals with variable remapping. */
static struct gimplify_omp_ctx *
new_omp_context (bool is_parallel, bool is_combined_parallel)
{
struct gimplify_omp_ctx *c;
c = XCNEW (struct gimplify_omp_ctx);
c->outer_context = gimplify_omp_ctxp;
c->variables = splay_tree_new (splay_tree_compare_decl_uid, 0, 0);
c->privatized_types = pointer_set_create ();
c->location = input_location;
c->is_parallel = is_parallel;
c->is_combined_parallel = is_combined_parallel;
c->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
return c;
}
/* Destroy an omp construct that deals with variable remapping. */
static void
delete_omp_context (struct gimplify_omp_ctx *c)
{
splay_tree_delete (c->variables);
pointer_set_destroy (c->privatized_types);
XDELETE (c);
}
static void omp_add_variable (struct gimplify_omp_ctx *, tree, unsigned int);
static bool omp_notice_variable (struct gimplify_omp_ctx *, tree, bool);
/* A subroutine of append_to_statement_list{,_force}. T is not NULL. */
static void
append_to_statement_list_1 (tree t, tree *list_p)
{
tree list = *list_p;
tree_stmt_iterator i;
if (!list)
{
if (t && TREE_CODE (t) == STATEMENT_LIST)
{
*list_p = t;
return;
}
*list_p = list = alloc_stmt_list ();
}
i = tsi_last (list);
tsi_link_after (&i, t, TSI_CONTINUE_LINKING);
}
/* Add T to the end of the list container pointed to by LIST_P.
If T is an expression with no effects, it is ignored. */
void
append_to_statement_list (tree t, tree *list_p)
{
if (t && TREE_SIDE_EFFECTS (t))
append_to_statement_list_1 (t, list_p);
}
/* Similar, but the statement is always added, regardless of side effects. */
void
append_to_statement_list_force (tree t, tree *list_p)
{
if (t != NULL_TREE)
append_to_statement_list_1 (t, list_p);
}
/* Both gimplify the statement T and append it to LIST_P. */
void
gimplify_and_add (tree t, tree *list_p)
{
gimplify_stmt (&t);
append_to_statement_list (t, list_p);
}
/* Strip off a legitimate source ending from the input string NAME of
length LEN. Rather than having to know the names used by all of
our front ends, we strip off an ending of a period followed by
up to five characters. (Java uses ".class".) */
static inline void
remove_suffix (char *name, int len)
{
int i;
for (i = 2; i < 8 && len > i; i++)
{
if (name[len - i] == '.')
{
name[len - i] = '\0';
break;
}
}
}
/* Create a nameless artificial label and put it in the current function
context. Returns the newly created label. */
tree
create_artificial_label (void)
{
tree lab = build_decl (LABEL_DECL, NULL_TREE, void_type_node);
DECL_ARTIFICIAL (lab) = 1;
DECL_IGNORED_P (lab) = 1;
DECL_CONTEXT (lab) = current_function_decl;
return lab;
}
/* Subroutine for find_single_pointer_decl. */
static tree
find_single_pointer_decl_1 (tree *tp, int *walk_subtrees ATTRIBUTE_UNUSED,
void *data)
{
tree *pdecl = (tree *) data;
/* We are only looking for pointers at the same level as the
original tree; we must not look through any indirections.
Returning anything other than NULL_TREE will cause the caller to
not find a base. */
if (REFERENCE_CLASS_P (*tp))
return *tp;
if (DECL_P (*tp) && POINTER_TYPE_P (TREE_TYPE (*tp)))
{
if (*pdecl)
{
/* We already found a pointer decl; return anything other
than NULL_TREE to unwind from walk_tree signalling that
we have a duplicate. */
return *tp;
}
*pdecl = *tp;
}
return NULL_TREE;
}
/* Find the single DECL of pointer type in the tree T, used directly
rather than via an indirection, and return it. If there are zero
or more than one such DECLs, return NULL. */
static tree
find_single_pointer_decl (tree t)
{
tree decl = NULL_TREE;
if (walk_tree (&t, find_single_pointer_decl_1, &decl, NULL))
{
/* find_single_pointer_decl_1 returns a nonzero value, causing
walk_tree to return a nonzero value, to indicate that it
found more than one pointer DECL or that it found an
indirection. */
return NULL_TREE;
}
return decl;
}
/* Create a new temporary name with PREFIX. Returns an identifier. */
static GTY(()) unsigned int tmp_var_id_num;
tree
create_tmp_var_name (const char *prefix)
{
char *tmp_name;
if (prefix)
{
char *preftmp = ASTRDUP (prefix);
remove_suffix (preftmp, strlen (preftmp));
prefix = preftmp;
}
ASM_FORMAT_PRIVATE_NAME (tmp_name, prefix ? prefix : "T", tmp_var_id_num++);
return get_identifier (tmp_name);
}
/* Create a new temporary variable declaration of type TYPE.
Does NOT push it into the current binding. */
tree
create_tmp_var_raw (tree type, const char *prefix)
{
tree tmp_var;
tree new_type;
/* Make the type of the variable writable. */
new_type = build_type_variant (type, 0, 0);
TYPE_ATTRIBUTES (new_type) = TYPE_ATTRIBUTES (type);
tmp_var = build_decl (VAR_DECL, prefix ? create_tmp_var_name (prefix) : NULL,
type);
/* The variable was declared by the compiler. */
DECL_ARTIFICIAL (tmp_var) = 1;
/* And we don't want debug info for it. */
DECL_IGNORED_P (tmp_var) = 1;
/* Make the variable writable. */
TREE_READONLY (tmp_var) = 0;
DECL_EXTERNAL (tmp_var) = 0;
TREE_STATIC (tmp_var) = 0;
TREE_USED (tmp_var) = 1;
return tmp_var;
}
/* Create a new temporary variable declaration of type TYPE. DOES push the
variable into the current binding. Further, assume that this is called
only from gimplification or optimization, at which point the creation of
certain types are bugs. */
tree
create_tmp_var (tree type, const char *prefix)
{
tree tmp_var;
/* We don't allow types that are addressable (meaning we can't make copies),
or incomplete. We also used to reject every variable size objects here,
but now support those for which a constant upper bound can be obtained.
The processing for variable sizes is performed in gimple_add_tmp_var,
point at which it really matters and possibly reached via paths not going
through this function, e.g. after direct calls to create_tmp_var_raw. */
gcc_assert (!TREE_ADDRESSABLE (type) && COMPLETE_TYPE_P (type));
tmp_var = create_tmp_var_raw (type, prefix);
gimple_add_tmp_var (tmp_var);
return tmp_var;
}
/* Given a tree, try to return a useful variable name that we can use
to prefix a temporary that is being assigned the value of the tree.
I.E. given <temp> = &A, return A. */
const char *
get_name (const_tree t)
{
const_tree stripped_decl;
stripped_decl = t;
STRIP_NOPS (stripped_decl);
if (DECL_P (stripped_decl) && DECL_NAME (stripped_decl))
return IDENTIFIER_POINTER (DECL_NAME (stripped_decl));
else
{
switch (TREE_CODE (stripped_decl))
{
case ADDR_EXPR:
return get_name (TREE_OPERAND (stripped_decl, 0));
default:
return NULL;
}
}
}
/* Create a temporary with a name derived from VAL. Subroutine of
lookup_tmp_var; nobody else should call this function. */
static inline tree
create_tmp_from_val (tree val)
{
return create_tmp_var (TYPE_MAIN_VARIANT (TREE_TYPE (val)), get_name (val));
}
/* Create a temporary to hold the value of VAL. If IS_FORMAL, try to reuse
an existing expression temporary. */
static tree
lookup_tmp_var (tree val, bool is_formal)
{
tree ret;
/* If not optimizing, never really reuse a temporary. local-alloc
won't allocate any variable that is used in more than one basic
block, which means it will go into memory, causing much extra
work in reload and final and poorer code generation, outweighing
the extra memory allocation here. */
if (!optimize || !is_formal || TREE_SIDE_EFFECTS (val))
ret = create_tmp_from_val (val);
else
{
elt_t elt, *elt_p;
void **slot;
elt.val = val;
slot = htab_find_slot (gimplify_ctxp->temp_htab, (void *)&elt, INSERT);
if (*slot == NULL)
{
elt_p = XNEW (elt_t);
elt_p->val = val;
elt_p->temp = ret = create_tmp_from_val (val);
*slot = (void *) elt_p;
}
else
{
elt_p = (elt_t *) *slot;
ret = elt_p->temp;
}
}
if (is_formal)
DECL_GIMPLE_FORMAL_TEMP_P (ret) = 1;
return ret;
}
/* Returns a formal temporary variable initialized with VAL. PRE_P is as
in gimplify_expr. Only use this function if:
1) The value of the unfactored expression represented by VAL will not
change between the initialization and use of the temporary, and
2) The temporary will not be otherwise modified.
For instance, #1 means that this is inappropriate for SAVE_EXPR temps,
and #2 means it is inappropriate for && temps.
For other cases, use get_initialized_tmp_var instead. */
static tree
internal_get_tmp_var (tree val, tree *pre_p, tree *post_p, bool is_formal)
{
tree t, mod;
gimplify_expr (&val, pre_p, post_p, is_gimple_formal_tmp_rhs, fb_rvalue);
t = lookup_tmp_var (val, is_formal);
if (is_formal)
{
tree u = find_single_pointer_decl (val);
if (u && TREE_CODE (u) == VAR_DECL && DECL_BASED_ON_RESTRICT_P (u))
u = DECL_GET_RESTRICT_BASE (u);
if (u && TYPE_RESTRICT (TREE_TYPE (u)))
{
if (DECL_BASED_ON_RESTRICT_P (t))
gcc_assert (u == DECL_GET_RESTRICT_BASE (t));
else
{
DECL_BASED_ON_RESTRICT_P (t) = 1;
SET_DECL_RESTRICT_BASE (t, u);
}
}
}
if (TREE_CODE (TREE_TYPE (t)) == COMPLEX_TYPE
|| TREE_CODE (TREE_TYPE (t)) == VECTOR_TYPE)
DECL_GIMPLE_REG_P (t) = 1;
mod = build2 (INIT_EXPR, TREE_TYPE (t), t, unshare_expr (val));
if (EXPR_HAS_LOCATION (val))
SET_EXPR_LOCUS (mod, EXPR_LOCUS (val));
else
SET_EXPR_LOCATION (mod, input_location);
/* gimplify_modify_expr might want to reduce this further. */
gimplify_and_add (mod, pre_p);
/* If we're gimplifying into ssa, gimplify_modify_expr will have
given our temporary an ssa name. Find and return it. */
if (gimplify_ctxp->into_ssa)
t = TREE_OPERAND (mod, 0);
return t;
}
/* Returns a formal temporary variable initialized with VAL. PRE_P
points to a statement list where side-effects needed to compute VAL
should be stored. */
tree
get_formal_tmp_var (tree val, tree *pre_p)
{
return internal_get_tmp_var (val, pre_p, NULL, true);
}
/* Returns a temporary variable initialized with VAL. PRE_P and POST_P
are as in gimplify_expr. */
tree
get_initialized_tmp_var (tree val, tree *pre_p, tree *post_p)
{
return internal_get_tmp_var (val, pre_p, post_p, false);
}
/* Declares all the variables in VARS in SCOPE. If DEBUG_INFO is
true, generate debug info for them; otherwise don't. */
void
declare_vars (tree vars, tree scope, bool debug_info)
{
tree last = vars;
if (last)
{
tree temps, block;
/* C99 mode puts the default 'return 0;' for main outside the outer
braces. So drill down until we find an actual scope. */
while (TREE_CODE (scope) == COMPOUND_EXPR)
scope = TREE_OPERAND (scope, 0);
gcc_assert (TREE_CODE (scope) == BIND_EXPR);
temps = nreverse (last);
block = BIND_EXPR_BLOCK (scope);
if (!block || !debug_info)
{
TREE_CHAIN (last) = BIND_EXPR_VARS (scope);
BIND_EXPR_VARS (scope) = temps;
}
else
{
/* We need to attach the nodes both to the BIND_EXPR and to its
associated BLOCK for debugging purposes. The key point here
is that the BLOCK_VARS of the BIND_EXPR_BLOCK of a BIND_EXPR
is a subchain of the BIND_EXPR_VARS of the BIND_EXPR. */
if (BLOCK_VARS (block))
BLOCK_VARS (block) = chainon (BLOCK_VARS (block), temps);
else
{
BIND_EXPR_VARS (scope) = chainon (BIND_EXPR_VARS (scope), temps);
BLOCK_VARS (block) = temps;
}
}
}
}
/* For VAR a VAR_DECL of variable size, try to find a constant upper bound
for the size and adjust DECL_SIZE/DECL_SIZE_UNIT accordingly. Abort if
no such upper bound can be obtained. */
static void
force_constant_size (tree var)
{
/* The only attempt we make is by querying the maximum size of objects
of the variable's type. */
HOST_WIDE_INT max_size;
gcc_assert (TREE_CODE (var) == VAR_DECL);
max_size = max_int_size_in_bytes (TREE_TYPE (var));
gcc_assert (max_size >= 0);
DECL_SIZE_UNIT (var)
= build_int_cst (TREE_TYPE (DECL_SIZE_UNIT (var)), max_size);
DECL_SIZE (var)
= build_int_cst (TREE_TYPE (DECL_SIZE (var)), max_size * BITS_PER_UNIT);
}
void
gimple_add_tmp_var (tree tmp)
{
gcc_assert (!TREE_CHAIN (tmp) && !DECL_SEEN_IN_BIND_EXPR_P (tmp));
/* Later processing assumes that the object size is constant, which might
not be true at this point. Force the use of a constant upper bound in
this case. */
if (!host_integerp (DECL_SIZE_UNIT (tmp), 1))
force_constant_size (tmp);
DECL_CONTEXT (tmp) = current_function_decl;
DECL_SEEN_IN_BIND_EXPR_P (tmp) = 1;
if (gimplify_ctxp)
{
TREE_CHAIN (tmp) = gimplify_ctxp->temps;
gimplify_ctxp->temps = tmp;
/* Mark temporaries local within the nearest enclosing parallel. */
if (gimplify_omp_ctxp)
{
struct gimplify_omp_ctx *ctx = gimplify_omp_ctxp;
while (ctx && !ctx->is_parallel)
ctx = ctx->outer_context;
if (ctx)
omp_add_variable (ctx, tmp, GOVD_LOCAL | GOVD_SEEN);
}
}
else if (cfun)
record_vars (tmp);
else
declare_vars (tmp, DECL_SAVED_TREE (current_function_decl), false);
}
/* Determines whether to assign a locus to the statement STMT. */
static bool
should_carry_locus_p (const_tree stmt)
{
/* Don't emit a line note for a label. We particularly don't want to
emit one for the break label, since it doesn't actually correspond
to the beginning of the loop/switch. */
if (TREE_CODE (stmt) == LABEL_EXPR)
return false;
/* Do not annotate empty statements, since it confuses gcov. */
if (!TREE_SIDE_EFFECTS (stmt))
return false;
return true;
}
static void
annotate_one_with_locus (tree t, location_t locus)
{
if (CAN_HAVE_LOCATION_P (t)
&& ! EXPR_HAS_LOCATION (t) && should_carry_locus_p (t))
SET_EXPR_LOCATION (t, locus);
}
void
annotate_all_with_locus (tree *stmt_p, location_t locus)
{
tree_stmt_iterator i;
if (!*stmt_p)
return;
for (i = tsi_start (*stmt_p); !tsi_end_p (i); tsi_next (&i))
{
tree t = tsi_stmt (i);
/* Assuming we've already been gimplified, we shouldn't
see nested chaining constructs anymore. */
gcc_assert (TREE_CODE (t) != STATEMENT_LIST
&& TREE_CODE (t) != COMPOUND_EXPR);
annotate_one_with_locus (t, locus);
}
}
/* Similar to copy_tree_r() but do not copy SAVE_EXPR or TARGET_EXPR nodes.
These nodes model computations that should only be done once. If we
were to unshare something like SAVE_EXPR(i++), the gimplification
process would create wrong code. */
static tree
mostly_copy_tree_r (tree *tp, int *walk_subtrees, void *data)
{
enum tree_code code = TREE_CODE (*tp);
/* Don't unshare types, decls, constants and SAVE_EXPR nodes. */
if (TREE_CODE_CLASS (code) == tcc_type
|| TREE_CODE_CLASS (code) == tcc_declaration
|| TREE_CODE_CLASS (code) == tcc_constant
|| code == SAVE_EXPR || code == TARGET_EXPR
/* We can't do anything sensible with a BLOCK used as an expression,
but we also can't just die when we see it because of non-expression
uses. So just avert our eyes and cross our fingers. Silly Java. */
|| code == BLOCK)
*walk_subtrees = 0;
else
{
gcc_assert (code != BIND_EXPR);
copy_tree_r (tp, walk_subtrees, data);
}
return NULL_TREE;
}
/* Callback for walk_tree to unshare most of the shared trees rooted at
*TP. If *TP has been visited already (i.e., TREE_VISITED (*TP) == 1),
then *TP is deep copied by calling copy_tree_r.
This unshares the same trees as copy_tree_r with the exception of
SAVE_EXPR nodes. These nodes model computations that should only be
done once. If we were to unshare something like SAVE_EXPR(i++), the
gimplification process would create wrong code. */
static tree
copy_if_shared_r (tree *tp, int *walk_subtrees ATTRIBUTE_UNUSED,
void *data ATTRIBUTE_UNUSED)
{
tree t = *tp;
enum tree_code code = TREE_CODE (t);
/* Skip types, decls, and constants. But we do want to look at their
types and the bounds of types. Mark them as visited so we properly
unmark their subtrees on the unmark pass. If we've already seen them,
don't look down further. */
if (TREE_CODE_CLASS (code) == tcc_type
|| TREE_CODE_CLASS (code) == tcc_declaration
|| TREE_CODE_CLASS (code) == tcc_constant)
{
if (TREE_VISITED (t))
*walk_subtrees = 0;
else
TREE_VISITED (t) = 1;
}
/* If this node has been visited already, unshare it and don't look
any deeper. */
else if (TREE_VISITED (t))
{
walk_tree (tp, mostly_copy_tree_r, NULL, NULL);
*walk_subtrees = 0;
}
/* Otherwise, mark the tree as visited and keep looking. */
else
TREE_VISITED (t) = 1;
return NULL_TREE;
}
static tree
unmark_visited_r (tree *tp, int *walk_subtrees ATTRIBUTE_UNUSED,
void *data ATTRIBUTE_UNUSED)
{
if (TREE_VISITED (*tp))
TREE_VISITED (*tp) = 0;
else
*walk_subtrees = 0;
return NULL_TREE;
}
/* Unshare all the trees in BODY_P, a pointer into the body of FNDECL, and the
bodies of any nested functions if we are unsharing the entire body of
FNDECL. */
static void
unshare_body (tree *body_p, tree fndecl)
{
struct cgraph_node *cgn = cgraph_node (fndecl);
walk_tree (body_p, copy_if_shared_r, NULL, NULL);
if (body_p == &DECL_SAVED_TREE (fndecl))
for (cgn = cgn->nested; cgn; cgn = cgn->next_nested)
unshare_body (&DECL_SAVED_TREE (cgn->decl), cgn->decl);
}
/* Likewise, but mark all trees as not visited. */
static void
unvisit_body (tree *body_p, tree fndecl)
{
struct cgraph_node *cgn = cgraph_node (fndecl);
walk_tree (body_p, unmark_visited_r, NULL, NULL);
if (body_p == &DECL_SAVED_TREE (fndecl))
for (cgn = cgn->nested; cgn; cgn = cgn->next_nested)
unvisit_body (&DECL_SAVED_TREE (cgn->decl), cgn->decl);
}
/* Unshare T and all the trees reached from T via TREE_CHAIN. */
static void
unshare_all_trees (tree t)
{
walk_tree (&t, copy_if_shared_r, NULL, NULL);
walk_tree (&t, unmark_visited_r, NULL, NULL);
}
/* Unconditionally make an unshared copy of EXPR. This is used when using
stored expressions which span multiple functions, such as BINFO_VTABLE,
as the normal unsharing process can't tell that they're shared. */
tree
unshare_expr (tree expr)
{
walk_tree (&expr, mostly_copy_tree_r, NULL, NULL);
return expr;
}
/* A terser interface for building a representation of an exception
specification. */
tree
gimple_build_eh_filter (tree body, tree allowed, tree failure)
{
tree t;
/* FIXME should the allowed types go in TREE_TYPE? */
t = build2 (EH_FILTER_EXPR, void_type_node, allowed, NULL_TREE);
append_to_statement_list (failure, &EH_FILTER_FAILURE (t));
t = build2 (TRY_CATCH_EXPR, void_type_node, NULL_TREE, t);
append_to_statement_list (body, &TREE_OPERAND (t, 0));
return t;
}
/* WRAPPER is a code such as BIND_EXPR or CLEANUP_POINT_EXPR which can both
contain statements and have a value. Assign its value to a temporary
and give it void_type_node. Returns the temporary, or NULL_TREE if
WRAPPER was already void. */
tree
voidify_wrapper_expr (tree wrapper, tree temp)
{
tree type = TREE_TYPE (wrapper);
if (type && !VOID_TYPE_P (type))
{
tree *p;
/* Set p to point to the body of the wrapper. Loop until we find
something that isn't a wrapper. */
for (p = &wrapper; p && *p; )
{
switch (TREE_CODE (*p))
{
case BIND_EXPR:
TREE_SIDE_EFFECTS (*p) = 1;
TREE_TYPE (*p) = void_type_node;
/* For a BIND_EXPR, the body is operand 1. */
p = &BIND_EXPR_BODY (*p);
break;
case CLEANUP_POINT_EXPR:
case TRY_FINALLY_EXPR:
case TRY_CATCH_EXPR:
TREE_SIDE_EFFECTS (*p) = 1;
TREE_TYPE (*p) = void_type_node;
p = &TREE_OPERAND (*p, 0);
break;
case STATEMENT_LIST:
{
tree_stmt_iterator i = tsi_last (*p);
TREE_SIDE_EFFECTS (*p) = 1;
TREE_TYPE (*p) = void_type_node;
p = tsi_end_p (i) ? NULL : tsi_stmt_ptr (i);
}
break;
case COMPOUND_EXPR:
/* Advance to the last statement. Set all container types to void. */
for (; TREE_CODE (*p) == COMPOUND_EXPR; p = &TREE_OPERAND (*p, 1))
{
TREE_SIDE_EFFECTS (*p) = 1;
TREE_TYPE (*p) = void_type_node;
}
break;
default:
goto out;
}
}
out:
if (p == NULL || IS_EMPTY_STMT (*p))
temp = NULL_TREE;
else if (temp)
{
/* The wrapper is on the RHS of an assignment that we're pushing
down. */
gcc_assert (TREE_CODE (temp) == INIT_EXPR
|| TREE_CODE (temp) == GIMPLE_MODIFY_STMT
|| TREE_CODE (temp) == MODIFY_EXPR);
GENERIC_TREE_OPERAND (temp, 1) = *p;
*p = temp;
}
else
{
temp = create_tmp_var (type, "retval");
*p = build2 (INIT_EXPR, type, temp, *p);
}
return temp;
}
return NULL_TREE;
}
/* Prepare calls to builtins to SAVE and RESTORE the stack as well as
a temporary through which they communicate. */
static void
build_stack_save_restore (tree *save, tree *restore)
{
tree save_call, tmp_var;
save_call =
build_call_expr (implicit_built_in_decls[BUILT_IN_STACK_SAVE], 0);
tmp_var = create_tmp_var (ptr_type_node, "saved_stack");
*save = build_gimple_modify_stmt (tmp_var, save_call);
*restore =
build_call_expr (implicit_built_in_decls[BUILT_IN_STACK_RESTORE],
1, tmp_var);
}
/* Gimplify a BIND_EXPR. Just voidify and recurse. */
static enum gimplify_status
gimplify_bind_expr (tree *expr_p, tree *pre_p)
{
tree bind_expr = *expr_p;
bool old_save_stack = gimplify_ctxp->save_stack;
tree t;
tree temp = voidify_wrapper_expr (bind_expr, NULL);
/* Mark variables seen in this bind expr. */
for (t = BIND_EXPR_VARS (bind_expr); t ; t = TREE_CHAIN (t))
{
if (TREE_CODE (t) == VAR_DECL)
{
struct gimplify_omp_ctx *ctx = gimplify_omp_ctxp;
/* Mark variable as local. */
if (ctx && !is_global_var (t)
&& (! DECL_SEEN_IN_BIND_EXPR_P (t)
|| splay_tree_lookup (ctx->variables,
(splay_tree_key) t) == NULL))
omp_add_variable (gimplify_omp_ctxp, t, GOVD_LOCAL | GOVD_SEEN);
DECL_SEEN_IN_BIND_EXPR_P (t) = 1;
}
/* Preliminarily mark non-addressed complex variables as eligible
for promotion to gimple registers. We'll transform their uses
as we find them. */
if ((TREE_CODE (TREE_TYPE (t)) == COMPLEX_TYPE
|| TREE_CODE (TREE_TYPE (t)) == VECTOR_TYPE)
&& !TREE_THIS_VOLATILE (t)
&& (TREE_CODE (t) == VAR_DECL && !DECL_HARD_REGISTER (t))
&& !needs_to_live_in_memory (t))
DECL_GIMPLE_REG_P (t) = 1;
}
gimple_push_bind_expr (bind_expr);
gimplify_ctxp->save_stack = false;
gimplify_to_stmt_list (&BIND_EXPR_BODY (bind_expr));
if (gimplify_ctxp->save_stack)
{
tree stack_save, stack_restore;
/* Save stack on entry and restore it on exit. Add a try_finally
block to achieve this. Note that mudflap depends on the
format of the emitted code: see mx_register_decls(). */
build_stack_save_restore (&stack_save, &stack_restore);
t = build2 (TRY_FINALLY_EXPR, void_type_node,
BIND_EXPR_BODY (bind_expr), NULL_TREE);
append_to_statement_list (stack_restore, &TREE_OPERAND (t, 1));
BIND_EXPR_BODY (bind_expr) = NULL_TREE;
append_to_statement_list (stack_save, &BIND_EXPR_BODY (bind_expr));
append_to_statement_list (t, &BIND_EXPR_BODY (bind_expr));
}
gimplify_ctxp->save_stack = old_save_stack;
gimple_pop_bind_expr ();
if (temp)
{
*expr_p = temp;
append_to_statement_list (bind_expr, pre_p);
return GS_OK;
}
else
return GS_ALL_DONE;
}
/* Gimplify a RETURN_EXPR. If the expression to be returned is not a
GIMPLE value, it is assigned to a new temporary and the statement is
re-written to return the temporary.
PRE_P points to the list where side effects that must happen before
STMT should be stored. */
static enum gimplify_status
gimplify_return_expr (tree stmt, tree *pre_p)
{
tree ret_expr = TREE_OPERAND (stmt, 0);
tree result_decl, result;
if (!ret_expr || TREE_CODE (ret_expr) == RESULT_DECL
|| ret_expr == error_mark_node)
return GS_ALL_DONE;
if (VOID_TYPE_P (TREE_TYPE (TREE_TYPE (current_function_decl))))
result_decl = NULL_TREE;
else
{
result_decl = GENERIC_TREE_OPERAND (ret_expr, 0);
if (TREE_CODE (result_decl) == INDIRECT_REF)
/* See through a return by reference. */
result_decl = TREE_OPERAND (result_decl, 0);
gcc_assert ((TREE_CODE (ret_expr) == MODIFY_EXPR
|| TREE_CODE (ret_expr) == GIMPLE_MODIFY_STMT
|| TREE_CODE (ret_expr) == INIT_EXPR)
&& TREE_CODE (result_decl) == RESULT_DECL);
}
/* If aggregate_value_p is true, then we can return the bare RESULT_DECL.
Recall that aggregate_value_p is FALSE for any aggregate type that is
returned in registers. If we're returning values in registers, then
we don't want to extend the lifetime of the RESULT_DECL, particularly
across another call. In addition, for those aggregates for which
hard_function_value generates a PARALLEL, we'll die during normal
expansion of structure assignments; there's special code in expand_return
to handle this case that does not exist in expand_expr. */
if (!result_decl
|| aggregate_value_p (result_decl, TREE_TYPE (current_function_decl)))
result = result_decl;
else if (gimplify_ctxp->return_temp)
result = gimplify_ctxp->return_temp;
else
{
result = create_tmp_var (TREE_TYPE (result_decl), NULL);
if (TREE_CODE (TREE_TYPE (result)) == COMPLEX_TYPE
|| TREE_CODE (TREE_TYPE (result)) == VECTOR_TYPE)
DECL_GIMPLE_REG_P (result) = 1;
/* ??? With complex control flow (usually involving abnormal edges),
we can wind up warning about an uninitialized value for this. Due
to how this variable is constructed and initialized, this is never
true. Give up and never warn. */
TREE_NO_WARNING (result) = 1;
gimplify_ctxp->return_temp = result;
}
/* Smash the lhs of the GIMPLE_MODIFY_STMT to the temporary we plan to use.
Then gimplify the whole thing. */
if (result != result_decl)
GENERIC_TREE_OPERAND (ret_expr, 0) = result;
gimplify_and_add (TREE_OPERAND (stmt, 0), pre_p);
/* If we didn't use a temporary, then the result is just the result_decl.
Otherwise we need a simple copy. This should already be gimple. */
if (result == result_decl)
ret_expr = result;
else
ret_expr = build_gimple_modify_stmt (result_decl, result);
TREE_OPERAND (stmt, 0) = ret_expr;
return GS_ALL_DONE;
}
static void
gimplify_vla_decl (tree decl, tree *stmt_p)
{
/* This is a variable-sized decl. Simplify its size and mark it
for deferred expansion. Note that mudflap depends on the format
of the emitted code: see mx_register_decls(). */
tree t, addr, ptr_type;
gimplify_one_sizepos (&DECL_SIZE (decl), stmt_p);
gimplify_one_sizepos (&DECL_SIZE_UNIT (decl), stmt_p);
/* All occurrences of this decl in final gimplified code will be
replaced by indirection. Setting DECL_VALUE_EXPR does two
things: First, it lets the rest of the gimplifier know what
replacement to use. Second, it lets the debug info know
where to find the value. */
ptr_type = build_pointer_type (TREE_TYPE (decl));
addr = create_tmp_var (ptr_type, get_name (decl));
DECL_IGNORED_P (addr) = 0;
t = build_fold_indirect_ref (addr);
SET_DECL_VALUE_EXPR (decl, t);
DECL_HAS_VALUE_EXPR_P (decl) = 1;
t = built_in_decls[BUILT_IN_ALLOCA];
t = build_call_expr (t, 1, DECL_SIZE_UNIT (decl));
t = fold_convert (ptr_type, t);
t = build_gimple_modify_stmt (addr, t);
gimplify_and_add (t, stmt_p);
/* Indicate that we need to restore the stack level when the
enclosing BIND_EXPR is exited. */
gimplify_ctxp->save_stack = true;
}
/* Gimplifies a DECL_EXPR node *STMT_P by making any necessary allocation
and initialization explicit. */
static enum gimplify_status
gimplify_decl_expr (tree *stmt_p)
{
tree stmt = *stmt_p;
tree decl = DECL_EXPR_DECL (stmt);
*stmt_p = NULL_TREE;
if (TREE_TYPE (decl) == error_mark_node)
return GS_ERROR;
if ((TREE_CODE (decl) == TYPE_DECL
|| TREE_CODE (decl) == VAR_DECL)
&& !TYPE_SIZES_GIMPLIFIED (TREE_TYPE (decl)))
gimplify_type_sizes (TREE_TYPE (decl), stmt_p);
if (TREE_CODE (decl) == VAR_DECL && !DECL_EXTERNAL (decl))
{
tree init = DECL_INITIAL (decl);
if (TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
gimplify_vla_decl (decl, stmt_p);
if (init && init != error_mark_node)
{
if (!TREE_STATIC (decl))
{
DECL_INITIAL (decl) = NULL_TREE;
init = build2 (INIT_EXPR, void_type_node, decl, init);
gimplify_and_add (init, stmt_p);
}
else
/* We must still examine initializers for static variables
as they may contain a label address. */
walk_tree (&init, force_labels_r, NULL, NULL);
}
/* Some front ends do not explicitly declare all anonymous
artificial variables. We compensate here by declaring the
variables, though it would be better if the front ends would
explicitly declare them. */
if (!DECL_SEEN_IN_BIND_EXPR_P (decl)
&& DECL_ARTIFICIAL (decl) && DECL_NAME (decl) == NULL_TREE)
gimple_add_tmp_var (decl);
}
return GS_ALL_DONE;
}
/* Gimplify a LOOP_EXPR. Normally this just involves gimplifying the body
and replacing the LOOP_EXPR with goto, but if the loop contains an
EXIT_EXPR, we need to append a label for it to jump to. */
static enum gimplify_status
gimplify_loop_expr (tree *expr_p, tree *pre_p)
{
tree saved_label = gimplify_ctxp->exit_label;
tree start_label = build1 (LABEL_EXPR, void_type_node, NULL_TREE);
tree jump_stmt = build_and_jump (&LABEL_EXPR_LABEL (start_label));
append_to_statement_list (start_label, pre_p);
gimplify_ctxp->exit_label = NULL_TREE;
gimplify_and_add (LOOP_EXPR_BODY (*expr_p), pre_p);
if (gimplify_ctxp->exit_label)
{
append_to_statement_list (jump_stmt, pre_p);
*expr_p = build1 (LABEL_EXPR, void_type_node, gimplify_ctxp->exit_label);
}
else
*expr_p = jump_stmt;
gimplify_ctxp->exit_label = saved_label;
return GS_ALL_DONE;
}
/* Compare two case labels. Because the front end should already have
made sure that case ranges do not overlap, it is enough to only compare
the CASE_LOW values of each case label. */
static int
compare_case_labels (const void *p1, const void *p2)
{
const_tree const case1 = *(const_tree const*)p1;
const_tree const case2 = *(const_tree const*)p2;
return tree_int_cst_compare (CASE_LOW (case1), CASE_LOW (case2));
}
/* Sort the case labels in LABEL_VEC in place in ascending order. */
void
sort_case_labels (tree label_vec)
{
size_t len = TREE_VEC_LENGTH (label_vec);
tree default_case = TREE_VEC_ELT (label_vec, len - 1);
if (CASE_LOW (default_case))
{
size_t i;
/* The last label in the vector should be the default case
but it is not. */
for (i = 0; i < len; ++i)
{
tree t = TREE_VEC_ELT (label_vec, i);
if (!CASE_LOW (t))
{
default_case = t;
TREE_VEC_ELT (label_vec, i) = TREE_VEC_ELT (label_vec, len - 1);
TREE_VEC_ELT (label_vec, len - 1) = default_case;
break;
}
}
}
qsort (&TREE_VEC_ELT (label_vec, 0), len - 1, sizeof (tree),
compare_case_labels);
}
/* Gimplify a SWITCH_EXPR, and collect a TREE_VEC of the labels it can
branch to. */
static enum gimplify_status
gimplify_switch_expr (tree *expr_p, tree *pre_p)
{
tree switch_expr = *expr_p;
enum gimplify_status ret;
ret = gimplify_expr (&SWITCH_COND (switch_expr), pre_p, NULL,
is_gimple_val, fb_rvalue);
if (SWITCH_BODY (switch_expr))
{
VEC(tree,heap) *labels, *saved_labels;
tree label_vec, default_case = NULL_TREE;
size_t i, len;
/* If someone can be bothered to fill in the labels, they can
be bothered to null out the body too. */
gcc_assert (!SWITCH_LABELS (switch_expr));
saved_labels = gimplify_ctxp->case_labels;
gimplify_ctxp->case_labels = VEC_alloc (tree, heap, 8);
gimplify_to_stmt_list (&SWITCH_BODY (switch_expr));
labels = gimplify_ctxp->case_labels;
gimplify_ctxp->case_labels = saved_labels;
i = 0;
while (i < VEC_length (tree, labels))
{
tree elt = VEC_index (tree, labels, i);
tree low = CASE_LOW (elt);
bool remove_element = FALSE;
if (low)
{
/* Discard empty ranges. */
tree high = CASE_HIGH (elt);
if (high && tree_int_cst_lt (high, low))
remove_element = TRUE;
}
else
{
/* The default case must be the last label in the list. */
gcc_assert (!default_case);
default_case = elt;
remove_element = TRUE;
}
if (remove_element)
VEC_ordered_remove (tree, labels, i);
else
i++;
}
len = i;
label_vec = make_tree_vec (len + 1);
SWITCH_LABELS (*expr_p) = label_vec;
append_to_statement_list (switch_expr, pre_p);
if (! default_case)
{
/* If the switch has no default label, add one, so that we jump
around the switch body. */
default_case = build3 (CASE_LABEL_EXPR, void_type_node, NULL_TREE,
NULL_TREE, create_artificial_label ());
append_to_statement_list (SWITCH_BODY (switch_expr), pre_p);
*expr_p = build1 (LABEL_EXPR, void_type_node,
CASE_LABEL (default_case));
}
else
*expr_p = SWITCH_BODY (switch_expr);
for (i = 0; i < len; ++i)
TREE_VEC_ELT (label_vec, i) = VEC_index (tree, labels, i);
TREE_VEC_ELT (label_vec, len) = default_case;
VEC_free (tree, heap, labels);
sort_case_labels (label_vec);
SWITCH_BODY (switch_expr) = NULL;
}
else
gcc_assert (SWITCH_LABELS (switch_expr));
return ret;
}
static enum gimplify_status
gimplify_case_label_expr (tree *expr_p)
{
tree expr = *expr_p;
struct gimplify_ctx *ctxp;
/* Invalid OpenMP programs can play Duff's Device type games with
#pragma omp parallel. At least in the C front end, we don't
detect such invalid branches until after gimplification. */
for (ctxp = gimplify_ctxp; ; ctxp = ctxp->prev_context)
if (ctxp->case_labels)
break;
VEC_safe_push (tree, heap, ctxp->case_labels, expr);
*expr_p = build1 (LABEL_EXPR, void_type_node, CASE_LABEL (expr));
return GS_ALL_DONE;
}
/* Build a GOTO to the LABEL_DECL pointed to by LABEL_P, building it first
if necessary. */
tree
build_and_jump (tree *label_p)
{
if (label_p == NULL)
/* If there's nowhere to jump, just fall through. */
return NULL_TREE;
if (*label_p == NULL_TREE)
{
tree label = create_artificial_label ();
*label_p = label;
}
return build1 (GOTO_EXPR, void_type_node, *label_p);
}
/* Gimplify an EXIT_EXPR by converting to a GOTO_EXPR inside a COND_EXPR.
This also involves building a label to jump to and communicating it to
gimplify_loop_expr through gimplify_ctxp->exit_label. */
static enum gimplify_status
gimplify_exit_expr (tree *expr_p)
{
tree cond = TREE_OPERAND (*expr_p, 0);
tree expr;
expr = build_and_jump (&gimplify_ctxp->exit_label);
expr = build3 (COND_EXPR, void_type_node, cond, expr, NULL_TREE);
*expr_p = expr;
return GS_OK;
}
/* A helper function to be called via walk_tree. Mark all labels under *TP
as being forced. To be called for DECL_INITIAL of static variables. */
tree
force_labels_r (tree *tp, int *walk_subtrees, void *data ATTRIBUTE_UNUSED)
{
if (TYPE_P (*tp))
*walk_subtrees = 0;
if (TREE_CODE (*tp) == LABEL_DECL)
FORCED_LABEL (*tp) = 1;
return NULL_TREE;
}
/* *EXPR_P is a COMPONENT_REF being used as an rvalue. If its type is
different from its canonical type, wrap the whole thing inside a
NOP_EXPR and force the type of the COMPONENT_REF to be the canonical
type.
The canonical type of a COMPONENT_REF is the type of the field being
referenced--unless the field is a bit-field which can be read directly
in a smaller mode, in which case the canonical type is the
sign-appropriate type corresponding to that mode. */
static void
canonicalize_component_ref (tree *expr_p)
{
tree expr = *expr_p;
tree type;
gcc_assert (TREE_CODE (expr) == COMPONENT_REF);
if (INTEGRAL_TYPE_P (TREE_TYPE (expr)))
type = TREE_TYPE (get_unwidened (expr, NULL_TREE));
else
type = TREE_TYPE (TREE_OPERAND (expr, 1));
/* One could argue that all the stuff below is not necessary for
the non-bitfield case and declare it a FE error if type
adjustment would be needed. */
if (TREE_TYPE (expr) != type)
{
#ifdef ENABLE_TYPES_CHECKING
tree old_type = TREE_TYPE (expr);
#endif
int type_quals;
/* We need to preserve qualifiers and propagate them from
operand 0. */
type_quals = TYPE_QUALS (type)
| TYPE_QUALS (TREE_TYPE (TREE_OPERAND (expr, 0)));
if (TYPE_QUALS (type) != type_quals)
type = build_qualified_type (TYPE_MAIN_VARIANT (type), type_quals);
/* Set the type of the COMPONENT_REF to the underlying type. */
TREE_TYPE (expr) = type;
#ifdef ENABLE_TYPES_CHECKING
/* It is now a FE error, if the conversion from the canonical
type to the original expression type is not useless. */
gcc_assert (useless_type_conversion_p (old_type, type));
#endif
}
}
/* If a NOP conversion is changing a pointer to array of foo to a pointer
to foo, embed that change in the ADDR_EXPR by converting
T array[U];
(T *)&array
==>
&array[L]
where L is the lower bound. For simplicity, only do this for constant
lower bound.
The constraint is that the type of &array[L] is trivially convertible
to T *. */
static void
canonicalize_addr_expr (tree *expr_p)
{
tree expr = *expr_p;
tree addr_expr = TREE_OPERAND (expr, 0);
tree datype, ddatype, pddatype;
/* We simplify only conversions from an ADDR_EXPR to a pointer type. */
if (!POINTER_TYPE_P (TREE_TYPE (expr))
|| TREE_CODE (addr_expr) != ADDR_EXPR)
return;
/* The addr_expr type should be a pointer to an array. */
datype = TREE_TYPE (TREE_TYPE (addr_expr));
if (TREE_CODE (datype) != ARRAY_TYPE)
return;
/* The pointer to element type shall be trivially convertible to
the expression pointer type. */
ddatype = TREE_TYPE (datype);
pddatype = build_pointer_type (ddatype);
if (!useless_type_conversion_p (pddatype, ddatype))
return;
/* The lower bound and element sizes must be constant. */
if (!TYPE_SIZE_UNIT (ddatype)
|| TREE_CODE (TYPE_SIZE_UNIT (ddatype)) != INTEGER_CST
|| !TYPE_DOMAIN (datype) || !TYPE_MIN_VALUE (TYPE_DOMAIN (datype))
|| TREE_CODE (TYPE_MIN_VALUE (TYPE_DOMAIN (datype))) != INTEGER_CST)
return;
/* All checks succeeded. Build a new node to merge the cast. */
*expr_p = build4 (ARRAY_REF, ddatype, TREE_OPERAND (addr_expr, 0),
TYPE_MIN_VALUE (TYPE_DOMAIN (datype)),
NULL_TREE, NULL_TREE);
*expr_p = build1 (ADDR_EXPR, pddatype, *expr_p);
}
/* *EXPR_P is a NOP_EXPR or CONVERT_EXPR. Remove it and/or other conversions
underneath as appropriate. */
static enum gimplify_status
gimplify_conversion (tree *expr_p)
{
tree tem;
gcc_assert (TREE_CODE (*expr_p) == NOP_EXPR
|| TREE_CODE (*expr_p) == CONVERT_EXPR);
/* Then strip away all but the outermost conversion. */
STRIP_SIGN_NOPS (TREE_OPERAND (*expr_p, 0));
/* And remove the outermost conversion if it's useless. */
if (tree_ssa_useless_type_conversion (*expr_p))
*expr_p = TREE_OPERAND (*expr_p, 0);
/* Attempt to avoid NOP_EXPR by producing reference to a subtype.
For example this fold (subclass *)&A into &A->subclass avoiding
a need for statement. */
if (TREE_CODE (*expr_p) == NOP_EXPR
&& POINTER_TYPE_P (TREE_TYPE (*expr_p))
&& POINTER_TYPE_P (TREE_TYPE (TREE_OPERAND (*expr_p, 0)))
&& (tem = maybe_fold_offset_to_reference
(TREE_OPERAND (*expr_p, 0),
integer_zero_node, TREE_TYPE (TREE_TYPE (*expr_p)))))
{
tree ptr_type = build_pointer_type (TREE_TYPE (tem));
if (useless_type_conversion_p (TREE_TYPE (*expr_p), ptr_type))
*expr_p = build_fold_addr_expr_with_type (tem, ptr_type);
}
/* If we still have a conversion at the toplevel,
then canonicalize some constructs. */
if (TREE_CODE (*expr_p) == NOP_EXPR || TREE_CODE (*expr_p) == CONVERT_EXPR)
{
tree sub = TREE_OPERAND (*expr_p, 0);
/* If a NOP conversion is changing the type of a COMPONENT_REF
expression, then canonicalize its type now in order to expose more
redundant conversions. */
if (TREE_CODE (sub) == COMPONENT_REF)
canonicalize_component_ref (&TREE_OPERAND (*expr_p, 0));
/* If a NOP conversion is changing a pointer to array of foo
to a pointer to foo, embed that change in the ADDR_EXPR. */
else if (TREE_CODE (sub) == ADDR_EXPR)
canonicalize_addr_expr (expr_p);
}
return GS_OK;
}
/* Gimplify a VAR_DECL or PARM_DECL. Returns GS_OK if we expanded a
DECL_VALUE_EXPR, and it's worth re-examining things. */
static enum gimplify_status
gimplify_var_or_parm_decl (tree *expr_p)
{
tree decl = *expr_p;
/* ??? If this is a local variable, and it has not been seen in any
outer BIND_EXPR, then it's probably the result of a duplicate
declaration, for which we've already issued an error. It would
be really nice if the front end wouldn't leak these at all.
Currently the only known culprit is C++ destructors, as seen
in g++.old-deja/g++.jason/binding.C. */
if (TREE_CODE (decl) == VAR_DECL
&& !DECL_SEEN_IN_BIND_EXPR_P (decl)
&& !TREE_STATIC (decl) && !DECL_EXTERNAL (decl)
&& decl_function_context (decl) == current_function_decl)
{
gcc_assert (errorcount || sorrycount);
return GS_ERROR;
}
/* When within an OpenMP context, notice uses of variables. */
if (gimplify_omp_ctxp && omp_notice_variable (gimplify_omp_ctxp, decl, true))
return GS_ALL_DONE;
/* If the decl is an alias for another expression, substitute it now. */
if (DECL_HAS_VALUE_EXPR_P (decl))
{
*expr_p = unshare_expr (DECL_VALUE_EXPR (decl));
return GS_OK;
}
return GS_ALL_DONE;
}
/* Gimplify the COMPONENT_REF, ARRAY_REF, REALPART_EXPR or IMAGPART_EXPR
node pointed to by EXPR_P.
compound_lval
: min_lval '[' val ']'
| min_lval '.' ID
| compound_lval '[' val ']'
| compound_lval '.' ID
This is not part of the original SIMPLE definition, which separates
array and member references, but it seems reasonable to handle them
together. Also, this way we don't run into problems with union
aliasing; gcc requires that for accesses through a union to alias, the
union reference must be explicit, which was not always the case when we
were splitting up array and member refs.
PRE_P points to the list where side effects that must happen before
*EXPR_P should be stored.
POST_P points to the list where side effects that must happen after
*EXPR_P should be stored. */
static enum gimplify_status
gimplify_compound_lval (tree *expr_p, tree *pre_p,
tree *post_p, fallback_t fallback)
{
tree *p;
VEC(tree,heap) *stack;
enum gimplify_status ret = GS_OK, tret;
int i;
/* Create a stack of the subexpressions so later we can walk them in
order from inner to outer. */
stack = VEC_alloc (tree, heap, 10);
/* We can handle anything that get_inner_reference can deal with. */
for (p = expr_p; ; p = &TREE_OPERAND (*p, 0))
{
restart:
/* Fold INDIRECT_REFs now to turn them into ARRAY_REFs. */
if (TREE_CODE (*p) == INDIRECT_REF)
*p = fold_indirect_ref (*p);
if (handled_component_p (*p))
;
/* Expand DECL_VALUE_EXPR now. In some cases that may expose
additional COMPONENT_REFs. */
else if ((TREE_CODE (*p) == VAR_DECL || TREE_CODE (*p) == PARM_DECL)
&& gimplify_var_or_parm_decl (p) == GS_OK)
goto restart;
else
break;
VEC_safe_push (tree, heap, stack, *p);
}
gcc_assert (VEC_length (tree, stack));
/* Now STACK is a stack of pointers to all the refs we've walked through
and P points to the innermost expression.
Java requires that we elaborated nodes in source order. That
means we must gimplify the inner expression followed by each of
the indices, in order. But we can't gimplify the inner
expression until we deal with any variable bounds, sizes, or
positions in order to deal with PLACEHOLDER_EXPRs.
So we do this in three steps. First we deal with the annotations
for any variables in the components, then we gimplify the base,
then we gimplify any indices, from left to right. */
for (i = VEC_length (tree, stack) - 1; i >= 0; i--)
{
tree t = VEC_index (tree, stack, i);
if (TREE_CODE (t) == ARRAY_REF || TREE_CODE (t) == ARRAY_RANGE_REF)
{
/* Gimplify the low bound and element type size and put them into
the ARRAY_REF. If these values are set, they have already been
gimplified. */
if (!TREE_OPERAND (t, 2))
{
tree low = unshare_expr (array_ref_low_bound (t));
if (!is_gimple_min_invariant (low))
{
TREE_OPERAND (t, 2) = low;
tret = gimplify_expr (&TREE_OPERAND (t, 2), pre_p, post_p,
is_gimple_formal_tmp_reg, fb_rvalue);
ret = MIN (ret, tret);
}
}
if (!TREE_OPERAND (t, 3))
{
tree elmt_type = TREE_TYPE (TREE_TYPE (TREE_OPERAND (t, 0)));
tree elmt_size = unshare_expr (array_ref_element_size (t));
tree factor = size_int (TYPE_ALIGN_UNIT (elmt_type));
/* Divide the element size by the alignment of the element
type (above). */
elmt_size = size_binop (EXACT_DIV_EXPR, elmt_size, factor);
if (!is_gimple_min_invariant (elmt_size))
{
TREE_OPERAND (t, 3) = elmt_size;
tret = gimplify_expr (&TREE_OPERAND (t, 3), pre_p, post_p,
is_gimple_formal_tmp_reg, fb_rvalue);
ret = MIN (ret, tret);
}
}
}
else if (TREE_CODE (t) == COMPONENT_REF)
{
/* Set the field offset into T and gimplify it. */
if (!TREE_OPERAND (t, 2))
{
tree offset = unshare_expr (component_ref_field_offset (t));
tree field = TREE_OPERAND (t, 1);
tree factor
= size_int (DECL_OFFSET_ALIGN (field) / BITS_PER_UNIT);
/* Divide the offset by its alignment. */
offset = size_binop (EXACT_DIV_EXPR, offset, factor);
if (!is_gimple_min_invariant (offset))
{
TREE_OPERAND (t, 2) = offset;
tret = gimplify_expr (&TREE_OPERAND (t, 2), pre_p, post_p,
is_gimple_formal_tmp_reg, fb_rvalue);
ret = MIN (ret, tret);
}
}
}
}
/* Step 2 is to gimplify the base expression. Make sure lvalue is set
so as to match the min_lval predicate. Failure to do so may result
in the creation of large aggregate temporaries. */
tret = gimplify_expr (p, pre_p, post_p, is_gimple_min_lval,
fallback | fb_lvalue);
ret = MIN (ret, tret);
/* And finally, the indices and operands to BIT_FIELD_REF. During this
loop we also remove any useless conversions. */
for (; VEC_length (tree, stack) > 0; )
{
tree t = VEC_pop (tree, stack);
if (TREE_CODE (t) == ARRAY_REF || TREE_CODE (t) == ARRAY_RANGE_REF)
{
/* Gimplify the dimension.
Temporary fix for gcc.c-torture/execute/20040313-1.c.
Gimplify non-constant array indices into a temporary
variable.
FIXME - The real fix is to gimplify post-modify
expressions into a minimal gimple lvalue. However, that
exposes bugs in alias analysis. The alias analyzer does
not handle &PTR->FIELD very well. Will fix after the
branch is merged into mainline (dnovillo 2004-05-03). */
if (!is_gimple_min_invariant (TREE_OPERAND (t, 1)))
{
tret = gimplify_expr (&TREE_OPERAND (t, 1), pre_p, post_p,
is_gimple_formal_tmp_reg, fb_rvalue);
ret = MIN (ret, tret);
}
}
else if (TREE_CODE (t) == BIT_FIELD_REF)
{
tret = gimplify_expr (&TREE_OPERAND (t, 1), pre_p, post_p,
is_gimple_val, fb_rvalue);
ret = MIN (ret, tret);
tret = gimplify_expr (&TREE_OPERAND (t, 2), pre_p, post_p,
is_gimple_val, fb_rvalue);
ret = MIN (ret, tret);
}
STRIP_USELESS_TYPE_CONVERSION (TREE_OPERAND (t, 0));
/* The innermost expression P may have originally had TREE_SIDE_EFFECTS
set which would have caused all the outer expressions in EXPR_P
leading to P to also have had TREE_SIDE_EFFECTS set. */
recalculate_side_effects (t);
}
tret = gimplify_expr (p, pre_p, post_p, is_gimple_min_lval, fallback);
ret = MIN (ret, tret);
/* If the outermost expression is a COMPONENT_REF, canonicalize its type. */
if ((fallback & fb_rvalue) && TREE_CODE (*expr_p) == COMPONENT_REF)
{
canonicalize_component_ref (expr_p);
ret = MIN (ret, GS_OK);
}
VEC_free (tree, heap, stack);
return ret;
}
/* Gimplify the self modifying expression pointed to by EXPR_P
(++, --, +=, -=).
PRE_P points to the list where side effects that must happen before
*EXPR_P should be stored.
POST_P points to the list where side effects that must happen after
*EXPR_P should be stored.
WANT_VALUE is nonzero iff we want to use the value of this expression
in another expression. */
static enum gimplify_status
gimplify_self_mod_expr (tree *expr_p, tree *pre_p, tree *post_p,
bool want_value)
{
enum tree_code code;
tree lhs, lvalue, rhs, t1, post = NULL, *orig_post_p = post_p;
bool postfix;
enum tree_code arith_code;
enum gimplify_status ret;
code = TREE_CODE (*expr_p);
gcc_assert (code == POSTINCREMENT_EXPR || code == POSTDECREMENT_EXPR
|| code == PREINCREMENT_EXPR || code == PREDECREMENT_EXPR);
/* Prefix or postfix? */
if (code == POSTINCREMENT_EXPR || code == POSTDECREMENT_EXPR)
/* Faster to treat as prefix if result is not used. */
postfix = want_value;
else
postfix = false;
/* For postfix, make sure the inner expression's post side effects
are executed after side effects from this expression. */
if (postfix)
post_p = &post;
/* Add or subtract? */
if (code == PREINCREMENT_EXPR || code == POSTINCREMENT_EXPR)
arith_code = PLUS_EXPR;
else
arith_code = MINUS_EXPR;
/* Gimplify the LHS into a GIMPLE lvalue. */
lvalue = TREE_OPERAND (*expr_p, 0);
ret = gimplify_expr (&lvalue, pre_p, post_p, is_gimple_lvalue, fb_lvalue);
if (ret == GS_ERROR)
return ret;
/* Extract the operands to the arithmetic operation. */
lhs = lvalue;
rhs = TREE_OPERAND (*expr_p, 1);
/* For postfix operator, we evaluate the LHS to an rvalue and then use
that as the result value and in the postqueue operation. */
if (postfix)
{
ret = gimplify_expr (&lhs, pre_p, post_p, is_gimple_val, fb_rvalue);
if (ret == GS_ERROR)
return ret;
}
/* For POINTERs increment, use POINTER_PLUS_EXPR. */
if (POINTER_TYPE_P (TREE_TYPE (lhs)))
{
rhs = fold_convert (sizetype, rhs);
if (arith_code == MINUS_EXPR)
rhs = fold_build1 (NEGATE_EXPR, TREE_TYPE (rhs), rhs);
arith_code = POINTER_PLUS_EXPR;
}
t1 = build2 (arith_code, TREE_TYPE (*expr_p), lhs, rhs);
t1 = build_gimple_modify_stmt (lvalue, t1);
if (postfix)
{
gimplify_and_add (t1, orig_post_p);
append_to_statement_list (post, orig_post_p);
*expr_p = lhs;
return GS_ALL_DONE;
}
else
{
*expr_p = t1;
return GS_OK;
}
}
/* If *EXPR_P has a variable sized type, wrap it in a WITH_SIZE_EXPR. */
static void
maybe_with_size_expr (tree *expr_p)
{
tree expr = *expr_p;
tree type = TREE_TYPE (expr);
tree size;
/* If we've already wrapped this or the type is error_mark_node, we can't do
anything. */
if (TREE_CODE (expr) == WITH_SIZE_EXPR
|| type == error_mark_node)
return;
/* If the size isn't known or is a constant, we have nothing to do. */
size = TYPE_SIZE_UNIT (type);
if (!size || TREE_CODE (size) == INTEGER_CST)
return;
/* Otherwise, make a WITH_SIZE_EXPR. */
size = unshare_expr (size);
size = SUBSTITUTE_PLACEHOLDER_IN_EXPR (size, expr);
*expr_p = build2 (WITH_SIZE_EXPR, type, expr, size);
}
/* Subroutine of gimplify_call_expr: Gimplify a single argument. */
static enum gimplify_status
gimplify_arg (tree *expr_p, tree *pre_p)
{
bool (*test) (tree);
fallback_t fb;
/* In general, we allow lvalues for function arguments to avoid
extra overhead of copying large aggregates out of even larger
aggregates into temporaries only to copy the temporaries to
the argument list. Make optimizers happy by pulling out to
temporaries those types that fit in registers. */
if (is_gimple_reg_type (TREE_TYPE (*expr_p)))
test = is_gimple_val, fb = fb_rvalue;
else
test = is_gimple_lvalue, fb = fb_either;
/* If this is a variable sized type, we must remember the size. */
maybe_with_size_expr (expr_p);
/* There is a sequence point before a function call. Side effects in
the argument list must occur before the actual call. So, when
gimplifying arguments, force gimplify_expr to use an internal
post queue which is then appended to the end of PRE_P. */
return gimplify_expr (expr_p, pre_p, NULL, test, fb);
}
/* Gimplify the CALL_EXPR node pointed to by EXPR_P. PRE_P points to the
list where side effects that must happen before *EXPR_P should be stored.
WANT_VALUE is true if the result of the call is desired. */
static enum gimplify_status
gimplify_call_expr (tree *expr_p, tree *pre_p, bool want_value)
{
tree decl, parms, p;
enum gimplify_status ret;
int i, nargs;
gcc_assert (TREE_CODE (*expr_p) == CALL_EXPR);
/* For reliable diagnostics during inlining, it is necessary that
every call_expr be annotated with file and line. */
if (! EXPR_HAS_LOCATION (*expr_p))
SET_EXPR_LOCATION (*expr_p, input_location);
/* This may be a call to a builtin function.
Builtin function calls may be transformed into different
(and more efficient) builtin function calls under certain
circumstances. Unfortunately, gimplification can muck things
up enough that the builtin expanders are not aware that certain
transformations are still valid.
So we attempt transformation/gimplification of the call before
we gimplify the CALL_EXPR. At this time we do not manage to
transform all calls in the same manner as the expanders do, but
we do transform most of them. */
decl = get_callee_fndecl (*expr_p);
if (decl && DECL_BUILT_IN (decl))
{
tree new = fold_call_expr (*expr_p, !want_value);
if (new && new != *expr_p)
{
/* There was a transformation of this call which computes the
same value, but in a more efficient way. Return and try
again. */
*expr_p = new;
return GS_OK;
}
if (DECL_BUILT_IN_CLASS (decl) == BUILT_IN_NORMAL
&& DECL_FUNCTION_CODE (decl) == BUILT_IN_VA_START)
{
if (call_expr_nargs (*expr_p) < 2)
{
error ("too few arguments to function %<va_start%>");
*expr_p = build_empty_stmt ();
return GS_OK;
}
if (fold_builtin_next_arg (*expr_p, true))
{
*expr_p = build_empty_stmt ();
return GS_OK;
}
/* Avoid gimplifying the second argument to va_start, which needs
to be the plain PARM_DECL. */
return gimplify_arg (&CALL_EXPR_ARG (*expr_p, 0), pre_p);
}
}
/* There is a sequence point before the call, so any side effects in
the calling expression must occur before the actual call. Force
gimplify_expr to use an internal post queue. */
ret = gimplify_expr (&CALL_EXPR_FN (*expr_p), pre_p, NULL,
is_gimple_call_addr, fb_rvalue);
nargs = call_expr_nargs (*expr_p);
/* Get argument types for verification. */
decl = get_callee_fndecl (*expr_p);
parms = NULL_TREE;
if (decl)
parms = TYPE_ARG_TYPES (TREE_TYPE (decl));
else if (POINTER_TYPE_P (TREE_TYPE (CALL_EXPR_FN (*expr_p))))
parms = TYPE_ARG_TYPES (TREE_TYPE (TREE_TYPE (CALL_EXPR_FN (*expr_p))));
/* Verify if the type of the argument matches that of the function
declaration. If we cannot verify this or there is a mismatch,
mark the call expression so it doesn't get inlined later. */
if (decl && DECL_ARGUMENTS (decl))
{
for (i = 0, p = DECL_ARGUMENTS (decl); i < nargs;
i++, p = TREE_CHAIN (p))
{
/* We cannot distinguish a varargs function from the case
of excess parameters, still deferring the inlining decision
to the callee is possible. */
if (!p)
break;
if (p == error_mark_node
|| CALL_EXPR_ARG (*expr_p, i) == error_mark_node
|| !fold_convertible_p (DECL_ARG_TYPE (p),
CALL_EXPR_ARG (*expr_p, i)))
{
CALL_CANNOT_INLINE_P (*expr_p) = 1;
break;
}
}
}
else if (parms)
{
for (i = 0, p = parms; i < nargs; i++, p = TREE_CHAIN (p))
{
/* If this is a varargs function defer inlining decision
to callee. */
if (!p)
break;
if (TREE_VALUE (p) == error_mark_node
|| CALL_EXPR_ARG (*expr_p, i) == error_mark_node
|| TREE_CODE (TREE_VALUE (p)) == VOID_TYPE
|| !fold_convertible_p (TREE_VALUE (p),
CALL_EXPR_ARG (*expr_p, i)))
{
CALL_CANNOT_INLINE_P (*expr_p) = 1;
break;
}
}
}
else
{
if (nargs != 0)
CALL_CANNOT_INLINE_P (*expr_p) = 1;
i = 0;
p = NULL_TREE;
}
/* If the last argument is __builtin_va_arg_pack () and it is not
passed as a named argument, decrease the number of CALL_EXPR
arguments and set instead the CALL_EXPR_VA_ARG_PACK flag. */
if (!p
&& i < nargs
&& TREE_CODE (CALL_EXPR_ARG (*expr_p, nargs - 1)) == CALL_EXPR)
{
tree last_arg = CALL_EXPR_ARG (*expr_p, nargs - 1);
tree last_arg_fndecl = get_callee_fndecl (last_arg);
if (last_arg_fndecl
&& TREE_CODE (last_arg_fndecl) == FUNCTION_DECL
&& DECL_BUILT_IN_CLASS (last_arg_fndecl) == BUILT_IN_NORMAL
&& DECL_FUNCTION_CODE (last_arg_fndecl) == BUILT_IN_VA_ARG_PACK)
{
tree call = *expr_p;
--nargs;
*expr_p = build_call_array (TREE_TYPE (call), CALL_EXPR_FN (call),
nargs, CALL_EXPR_ARGP (call));
/* Copy all CALL_EXPR flags, locus and block, except
CALL_EXPR_VA_ARG_PACK flag. */
CALL_EXPR_STATIC_CHAIN (*expr_p) = CALL_EXPR_STATIC_CHAIN (call);
CALL_EXPR_TAILCALL (*expr_p) = CALL_EXPR_TAILCALL (call);
CALL_EXPR_RETURN_SLOT_OPT (*expr_p)
= CALL_EXPR_RETURN_SLOT_OPT (call);
CALL_FROM_THUNK_P (*expr_p) = CALL_FROM_THUNK_P (call);
CALL_CANNOT_INLINE_P (*expr_p)
= CALL_CANNOT_INLINE_P (call);
TREE_NOTHROW (*expr_p) = TREE_NOTHROW (call);
SET_EXPR_LOCUS (*expr_p, EXPR_LOCUS (call));
TREE_BLOCK (*expr_p) = TREE_BLOCK (call);
/* Set CALL_EXPR_VA_ARG_PACK. */
CALL_EXPR_VA_ARG_PACK (*expr_p) = 1;
}
}
/* Finally, gimplify the function arguments. */
for (i = (PUSH_ARGS_REVERSED ? nargs - 1 : 0);
PUSH_ARGS_REVERSED ? i >= 0 : i < nargs;
PUSH_ARGS_REVERSED ? i-- : i++)
{
enum gimplify_status t;
t = gimplify_arg (&CALL_EXPR_ARG (*expr_p, i), pre_p);
if (t == GS_ERROR)
ret = GS_ERROR;
}
/* Try this again in case gimplification exposed something. */
if (ret != GS_ERROR)
{
tree new = fold_call_expr (*expr_p, !want_value);
if (new && new != *expr_p)
{
/* There was a transformation of this call which computes the
same value, but in a more efficient way. Return and try
again. */
*expr_p = new;
return GS_OK;
}
}
/* If the function is "const" or "pure", then clear TREE_SIDE_EFFECTS on its
decl. This allows us to eliminate redundant or useless
calls to "const" functions. */
if (TREE_CODE (*expr_p) == CALL_EXPR
&& (call_expr_flags (*expr_p) & (ECF_CONST | ECF_PURE)))
TREE_SIDE_EFFECTS (*expr_p) = 0;
return ret;
}
/* Handle shortcut semantics in the predicate operand of a COND_EXPR by
rewriting it into multiple COND_EXPRs, and possibly GOTO_EXPRs.
TRUE_LABEL_P and FALSE_LABEL_P point to the labels to jump to if the
condition is true or false, respectively. If null, we should generate
our own to skip over the evaluation of this specific expression.
This function is the tree equivalent of do_jump.
shortcut_cond_r should only be called by shortcut_cond_expr. */
static tree
shortcut_cond_r (tree pred, tree *true_label_p, tree *false_label_p)
{
tree local_label = NULL_TREE;
tree t, expr = NULL;
/* OK, it's not a simple case; we need to pull apart the COND_EXPR to
retain the shortcut semantics. Just insert the gotos here;
shortcut_cond_expr will append the real blocks later. */
if (TREE_CODE (pred) == TRUTH_ANDIF_EXPR)
{
/* Turn if (a && b) into
if (a); else goto no;
if (b) goto yes; else goto no;
(no:) */
if (false_label_p == NULL)
false_label_p = &local_label;
t = shortcut_cond_r (TREE_OPERAND (pred, 0), NULL, false_label_p);
append_to_statement_list (t, &expr);
t = shortcut_cond_r (TREE_OPERAND (pred, 1), true_label_p,
false_label_p);
append_to_statement_list (t, &expr);
}
else if (TREE_CODE (pred) == TRUTH_ORIF_EXPR)
{
/* Turn if (a || b) into
if (a) goto yes;
if (b) goto yes; else goto no;
(yes:) */
if (true_label_p == NULL)
true_label_p = &local_label;
t = shortcut_cond_r (TREE_OPERAND (pred, 0), true_label_p, NULL);
append_to_statement_list (t, &expr);
t = shortcut_cond_r (TREE_OPERAND (pred, 1), true_label_p,
false_label_p);
append_to_statement_list (t, &expr);
}
else if (TREE_CODE (pred) == COND_EXPR)
{
/* As long as we're messing with gotos, turn if (a ? b : c) into
if (a)
if (b) goto yes; else goto no;
else
if (c) goto yes; else goto no; */
expr = build3 (COND_EXPR, void_type_node, TREE_OPERAND (pred, 0),
shortcut_cond_r (TREE_OPERAND (pred, 1), true_label_p,
false_label_p),
shortcut_cond_r (TREE_OPERAND (pred, 2), true_label_p,
false_label_p));
}
else
{
expr = build3 (COND_EXPR, void_type_node, pred,
build_and_jump (true_label_p),
build_and_jump (false_label_p));
}
if (local_label)
{
t = build1 (LABEL_EXPR, void_type_node, local_label);
append_to_statement_list (t, &expr);
}
return expr;
}
static tree
shortcut_cond_expr (tree expr)
{
tree pred = TREE_OPERAND (expr, 0);
tree then_ = TREE_OPERAND (expr, 1);
tree else_ = TREE_OPERAND (expr, 2);
tree true_label, false_label, end_label, t;
tree *true_label_p;
tree *false_label_p;
bool emit_end, emit_false, jump_over_else;
bool then_se = then_ && TREE_SIDE_EFFECTS (then_);
bool else_se = else_ && TREE_SIDE_EFFECTS (else_);
/* First do simple transformations. */
if (!else_se)
{
/* If there is no 'else', turn (a && b) into if (a) if (b). */
while (TREE_CODE (pred) == TRUTH_ANDIF_EXPR)
{
TREE_OPERAND (expr, 0) = TREE_OPERAND (pred, 1);
then_ = shortcut_cond_expr (expr);
then_se = then_ && TREE_SIDE_EFFECTS (then_);
pred = TREE_OPERAND (pred, 0);
expr = build3 (COND_EXPR, void_type_node, pred, then_, NULL_TREE);
}
}
if (!then_se)
{
/* If there is no 'then', turn
if (a || b); else d
into
if (a); else if (b); else d. */
while (TREE_CODE (pred) == TRUTH_ORIF_EXPR)
{
TREE_OPERAND (expr, 0) = TREE_OPERAND (pred, 1);
else_ = shortcut_cond_expr (expr);
else_se = else_ && TREE_SIDE_EFFECTS (else_);
pred = TREE_OPERAND (pred, 0);
expr = build3 (COND_EXPR, void_type_node, pred, NULL_TREE, else_);
}
}
/* If we're done, great. */
if (TREE_CODE (pred) != TRUTH_ANDIF_EXPR
&& TREE_CODE (pred) != TRUTH_ORIF_EXPR)
return expr;
/* Otherwise we need to mess with gotos. Change
if (a) c; else d;
to
if (a); else goto no;
c; goto end;
no: d; end:
and recursively gimplify the condition. */
true_label = false_label = end_label = NULL_TREE;
/* If our arms just jump somewhere, hijack those labels so we don't
generate jumps to jumps. */
if (then_
&& TREE_CODE (then_) == GOTO_EXPR
&& TREE_CODE (GOTO_DESTINATION (then_)) == LABEL_DECL)
{
true_label = GOTO_DESTINATION (then_);
then_ = NULL;
then_se = false;
}
if (else_
&& TREE_CODE (else_) == GOTO_EXPR
&& TREE_CODE (GOTO_DESTINATION (else_)) == LABEL_DECL)
{
false_label = GOTO_DESTINATION (else_);
else_ = NULL;
else_se = false;
}
/* If we aren't hijacking a label for the 'then' branch, it falls through. */
if (true_label)
true_label_p = &true_label;
else
true_label_p = NULL;
/* The 'else' branch also needs a label if it contains interesting code. */
if (false_label || else_se)
false_label_p = &false_label;
else
false_label_p = NULL;
/* If there was nothing else in our arms, just forward the label(s). */
if (!then_se && !else_se)
return shortcut_cond_r (pred, true_label_p, false_label_p);
/* If our last subexpression already has a terminal label, reuse it. */
if (else_se)
expr = expr_last (else_);
else if (then_se)
expr = expr_last (then_);
else
expr = NULL;
if (expr && TREE_CODE (expr) == LABEL_EXPR)
end_label = LABEL_EXPR_LABEL (expr);
/* If we don't care about jumping to the 'else' branch, jump to the end
if the condition is false. */
if (!false_label_p)
false_label_p = &end_label;
/* We only want to emit these labels if we aren't hijacking them. */
emit_end = (end_label == NULL_TREE);
emit_false = (false_label == NULL_TREE);
/* We only emit the jump over the else clause if we have to--if the
then clause may fall through. Otherwise we can wind up with a
useless jump and a useless label at the end of gimplified code,
which will cause us to think that this conditional as a whole
falls through even if it doesn't. If we then inline a function
which ends with such a condition, that can cause us to issue an
inappropriate warning about control reaching the end of a
non-void function. */
jump_over_else = block_may_fallthru (then_);
pred = shortcut_cond_r (pred, true_label_p, false_label_p);
expr = NULL;
append_to_statement_list (pred, &expr);
append_to_statement_list (then_, &expr);
if (else_se)
{
if (jump_over_else)
{
t = build_and_jump (&end_label);
append_to_statement_list (t, &expr);
}
if (emit_false)
{
t = build1 (LABEL_EXPR, void_type_node, false_label);
append_to_statement_list (t, &expr);
}
append_to_statement_list (else_, &expr);
}
if (emit_end && end_label)
{
t = build1 (LABEL_EXPR, void_type_node, end_label);
append_to_statement_list (t, &expr);
}
return expr;
}
/* EXPR is used in a boolean context; make sure it has BOOLEAN_TYPE. */
tree
gimple_boolify (tree expr)
{
tree type = TREE_TYPE (expr);
if (TREE_CODE (type) == BOOLEAN_TYPE)
return expr;
switch (TREE_CODE (expr))
{
case TRUTH_AND_EXPR:
case TRUTH_OR_EXPR:
case TRUTH_XOR_EXPR:
case TRUTH_ANDIF_EXPR:
case TRUTH_ORIF_EXPR:
/* Also boolify the arguments of truth exprs. */
TREE_OPERAND (expr, 1) = gimple_boolify (TREE_OPERAND (expr, 1));
/* FALLTHRU */
case TRUTH_NOT_EXPR:
TREE_OPERAND (expr, 0) = gimple_boolify (TREE_OPERAND (expr, 0));
/* FALLTHRU */
case EQ_EXPR: case NE_EXPR:
case LE_EXPR: case GE_EXPR: case LT_EXPR: case GT_EXPR:
/* These expressions always produce boolean results. */
TREE_TYPE (expr) = boolean_type_node;
return expr;
default:
/* Other expressions that get here must have boolean values, but
might need to be converted to the appropriate mode. */
return fold_convert (boolean_type_node, expr);
}
}
/* Given a conditional expression *EXPR_P without side effects, gimplify
its operands. New statements are inserted to PRE_P. */
static enum gimplify_status
gimplify_pure_cond_expr (tree *expr_p, tree *pre_p)
{
tree expr = *expr_p, cond;
enum gimplify_status ret, tret;
enum tree_code code;
cond = gimple_boolify (COND_EXPR_COND (expr));
/* We need to handle && and || specially, as their gimplification
creates pure cond_expr, thus leading to an infinite cycle otherwise. */
code = TREE_CODE (cond);
if (code == TRUTH_ANDIF_EXPR)
TREE_SET_CODE (cond, TRUTH_AND_EXPR);
else if (code == TRUTH_ORIF_EXPR)
TREE_SET_CODE (cond, TRUTH_OR_EXPR);
ret = gimplify_expr (&cond, pre_p, NULL,
is_gimple_condexpr, fb_rvalue);
COND_EXPR_COND (*expr_p) = cond;
tret = gimplify_expr (&COND_EXPR_THEN (expr), pre_p, NULL,
is_gimple_val, fb_rvalue);
ret = MIN (ret, tret);
tret = gimplify_expr (&COND_EXPR_ELSE (expr), pre_p, NULL,
is_gimple_val, fb_rvalue);
return MIN (ret, tret);
}
/* Returns true if evaluating EXPR could trap.
EXPR is GENERIC, while tree_could_trap_p can be called
only on GIMPLE. */
static bool
generic_expr_could_trap_p (tree expr)
{
unsigned i, n;
if (!expr || is_gimple_val (expr))
return false;
if (!EXPR_P (expr) || tree_could_trap_p (expr))
return true;
n = TREE_OPERAND_LENGTH (expr);
for (i = 0; i < n; i++)
if (generic_expr_could_trap_p (TREE_OPERAND (expr, i)))
return true;
return false;
}
/* Convert the conditional expression pointed to by EXPR_P '(p) ? a : b;'
into
if (p) if (p)
t1 = a; a;
else or else
t1 = b; b;
t1;
The second form is used when *EXPR_P is of type void.
TARGET is the tree for T1 above.
PRE_P points to the list where side effects that must happen before
*EXPR_P should be stored. */
static enum gimplify_status
gimplify_cond_expr (tree *expr_p, tree *pre_p, fallback_t fallback)
{
tree expr = *expr_p;
tree tmp, tmp2, type;
enum gimplify_status ret;
type = TREE_TYPE (expr);
/* If this COND_EXPR has a value, copy the values into a temporary within
the arms. */
if (! VOID_TYPE_P (type))
{
tree result;
/* If an rvalue is ok or we do not require an lvalue, avoid creating
an addressable temporary. */
if (((fallback & fb_rvalue)
|| !(fallback & fb_lvalue))
&& !TREE_ADDRESSABLE (type))
{
if (gimplify_ctxp->allow_rhs_cond_expr
/* If either branch has side effects or could trap, it can't be
evaluated unconditionally. */
&& !TREE_SIDE_EFFECTS (TREE_OPERAND (*expr_p, 1))
&& !generic_expr_could_trap_p (TREE_OPERAND (*expr_p, 1))
&& !TREE_SIDE_EFFECTS (TREE_OPERAND (*expr_p, 2))
&& !generic_expr_could_trap_p (TREE_OPERAND (*expr_p, 2)))
return gimplify_pure_cond_expr (expr_p, pre_p);
result = tmp2 = tmp = create_tmp_var (TREE_TYPE (expr), "iftmp");
ret = GS_ALL_DONE;
}
else
{
tree type = build_pointer_type (TREE_TYPE (expr));
if (TREE_TYPE (TREE_OPERAND (expr, 1)) != void_type_node)
TREE_OPERAND (expr, 1) =
build_fold_addr_expr (TREE_OPERAND (expr, 1));
if (TREE_TYPE (TREE_OPERAND (expr, 2)) != void_type_node)
TREE_OPERAND (expr, 2) =
build_fold_addr_expr (TREE_OPERAND (expr, 2));
tmp2 = tmp = create_tmp_var (type, "iftmp");
expr = build3 (COND_EXPR, void_type_node, TREE_OPERAND (expr, 0),
TREE_OPERAND (expr, 1), TREE_OPERAND (expr, 2));
result = build_fold_indirect_ref (tmp);
ret = GS_ALL_DONE;
}
/* Build the then clause, 't1 = a;'. But don't build an assignment
if this branch is void; in C++ it can be, if it's a throw. */
if (TREE_TYPE (TREE_OPERAND (expr, 1)) != void_type_node)
TREE_OPERAND (expr, 1)
= build_gimple_modify_stmt (tmp, TREE_OPERAND (expr, 1));
/* Build the else clause, 't1 = b;'. */
if (TREE_TYPE (TREE_OPERAND (expr, 2)) != void_type_node)
TREE_OPERAND (expr, 2)
= build_gimple_modify_stmt (tmp2, TREE_OPERAND (expr, 2));
TREE_TYPE (expr) = void_type_node;
recalculate_side_effects (expr);
/* Move the COND_EXPR to the prequeue. */
gimplify_and_add (expr, pre_p);
*expr_p = result;
return ret;
}
/* Make sure the condition has BOOLEAN_TYPE. */
TREE_OPERAND (expr, 0) = gimple_boolify (TREE_OPERAND (expr, 0));
/* Break apart && and || conditions. */
if (TREE_CODE (TREE_OPERAND (expr, 0)) == TRUTH_ANDIF_EXPR
|| TREE_CODE (TREE_OPERAND (expr, 0)) == TRUTH_ORIF_EXPR)
{
expr = shortcut_cond_expr (expr);
if (expr != *expr_p)
{
*expr_p = expr;
/* We can't rely on gimplify_expr to re-gimplify the expanded
form properly, as cleanups might cause the target labels to be
wrapped in a TRY_FINALLY_EXPR. To prevent that, we need to
set up a conditional context. */
gimple_push_condition ();
gimplify_stmt (expr_p);
gimple_pop_condition (pre_p);
return GS_ALL_DONE;
}
}
/* Now do the normal gimplification. */
ret = gimplify_expr (&TREE_OPERAND (expr, 0), pre_p, NULL,
is_gimple_condexpr, fb_rvalue);
gimple_push_condition ();
gimplify_to_stmt_list (&TREE_OPERAND (expr, 1));
gimplify_to_stmt_list (&TREE_OPERAND (expr, 2));
recalculate_side_effects (expr);
gimple_pop_condition (pre_p);
if (ret == GS_ERROR)
;
else if (TREE_SIDE_EFFECTS (TREE_OPERAND (expr, 1)))
ret = GS_ALL_DONE;
else if (TREE_SIDE_EFFECTS (TREE_OPERAND (expr, 2)))
/* Rewrite "if (a); else b" to "if (!a) b" */
{
TREE_OPERAND (expr, 0) = invert_truthvalue (TREE_OPERAND (expr, 0));
ret = gimplify_expr (&TREE_OPERAND (expr, 0), pre_p, NULL,
is_gimple_condexpr, fb_rvalue);
tmp = TREE_OPERAND (expr, 1);
TREE_OPERAND (expr, 1) = TREE_OPERAND (expr, 2);
TREE_OPERAND (expr, 2) = tmp;
}
else
/* Both arms are empty; replace the COND_EXPR with its predicate. */
expr = TREE_OPERAND (expr, 0);
*expr_p = expr;
return ret;
}
/* A subroutine of gimplify_modify_expr. Replace a MODIFY_EXPR with
a call to __builtin_memcpy. */
static enum gimplify_status
gimplify_modify_expr_to_memcpy (tree *expr_p, tree size, bool want_value)
{
tree t, to, to_ptr, from, from_ptr;
to = GENERIC_TREE_OPERAND (*expr_p, 0);
from = GENERIC_TREE_OPERAND (*expr_p, 1);
from_ptr = build_fold_addr_expr (from);
to_ptr = build_fold_addr_expr (to);
t = implicit_built_in_decls[BUILT_IN_MEMCPY];
t = build_call_expr (t, 3, to_ptr, from_ptr, size);
if (want_value)
{
t = build1 (NOP_EXPR, TREE_TYPE (to_ptr), t);
t = build1 (INDIRECT_REF, TREE_TYPE (to), t);
}
*expr_p = t;
return GS_OK;
}
/* A subroutine of gimplify_modify_expr. Replace a MODIFY_EXPR with
a call to __builtin_memset. In this case we know that the RHS is
a CONSTRUCTOR with an empty element list. */
static enum gimplify_status
gimplify_modify_expr_to_memset (tree *expr_p, tree size, bool want_value)
{
tree t, to, to_ptr;
to = GENERIC_TREE_OPERAND (*expr_p, 0);
to_ptr = build_fold_addr_expr (to);
t = implicit_built_in_decls[BUILT_IN_MEMSET];
t = build_call_expr (t, 3, to_ptr, integer_zero_node, size);
if (want_value)
{
t = build1 (NOP_EXPR, TREE_TYPE (to_ptr), t);
t = build1 (INDIRECT_REF, TREE_TYPE (to), t);
}
*expr_p = t;
return GS_OK;
}
/* A subroutine of gimplify_init_ctor_preeval. Called via walk_tree,
determine, cautiously, if a CONSTRUCTOR overlaps the lhs of an
assignment. Returns non-null if we detect a potential overlap. */
struct gimplify_init_ctor_preeval_data
{
/* The base decl of the lhs object. May be NULL, in which case we
have to assume the lhs is indirect. */
tree lhs_base_decl;
/* The alias set of the lhs object. */
alias_set_type lhs_alias_set;
};
static tree
gimplify_init_ctor_preeval_1 (tree *tp, int *walk_subtrees, void *xdata)
{
struct gimplify_init_ctor_preeval_data *data
= (struct gimplify_init_ctor_preeval_data *) xdata;
tree t = *tp;
/* If we find the base object, obviously we have overlap. */
if (data->lhs_base_decl == t)
return t;
/* If the constructor component is indirect, determine if we have a
potential overlap with the lhs. The only bits of information we
have to go on at this point are addressability and alias sets. */
if (TREE_CODE (t) == INDIRECT_REF
&& (!data->lhs_base_decl || TREE_ADDRESSABLE (data->lhs_base_decl))
&& alias_sets_conflict_p (data->lhs_alias_set, get_alias_set (t)))
return t;
/* If the constructor component is a call, determine if it can hide a
potential overlap with the lhs through an INDIRECT_REF like above. */
if (TREE_CODE (t) == CALL_EXPR)
{
tree type, fntype = TREE_TYPE (TREE_TYPE (CALL_EXPR_FN (t)));
for (type = TYPE_ARG_TYPES (fntype); type; type = TREE_CHAIN (type))
if (POINTER_TYPE_P (TREE_VALUE (type))
&& (!data->lhs_base_decl || TREE_ADDRESSABLE (data->lhs_base_decl))
&& alias_sets_conflict_p (data->lhs_alias_set,
get_alias_set
(TREE_TYPE (TREE_VALUE (type)))))
return t;
}
if (IS_TYPE_OR_DECL_P (t))
*walk_subtrees = 0;
return NULL;
}
/* A subroutine of gimplify_init_constructor. Pre-evaluate *EXPR_P,
force values that overlap with the lhs (as described by *DATA)
into temporaries. */
static void
gimplify_init_ctor_preeval (tree *expr_p, tree *pre_p, tree *post_p,
struct gimplify_init_ctor_preeval_data *data)
{
enum gimplify_status one;
/* If the value is invariant, then there's nothing to pre-evaluate.
But ensure it doesn't have any side-effects since a SAVE_EXPR is
invariant but has side effects and might contain a reference to
the object we're initializing. */
if (TREE_INVARIANT (*expr_p) && !TREE_SIDE_EFFECTS (*expr_p))
return;
/* If the type has non-trivial constructors, we can't pre-evaluate. */
if (TREE_ADDRESSABLE (TREE_TYPE (*expr_p)))
return;
/* Recurse for nested constructors. */
if (TREE_CODE (*expr_p) == CONSTRUCTOR)
{
unsigned HOST_WIDE_INT ix;
constructor_elt *ce;
VEC(constructor_elt,gc) *v = CONSTRUCTOR_ELTS (*expr_p);
for (ix = 0; VEC_iterate (constructor_elt, v, ix, ce); ix++)
gimplify_init_ctor_preeval (&ce->value, pre_p, post_p, data);
return;
}
/* If this is a variable sized type, we must remember the size. */
maybe_with_size_expr (expr_p);
/* Gimplify the constructor element to something appropriate for the rhs
of a MODIFY_EXPR. Given that we know the lhs is an aggregate, we know
the gimplifier will consider this a store to memory. Doing this
gimplification now means that we won't have to deal with complicated
language-specific trees, nor trees like SAVE_EXPR that can induce
exponential search behavior. */
one = gimplify_expr (expr_p, pre_p, post_p, is_gimple_mem_rhs, fb_rvalue);
if (one == GS_ERROR)
{
*expr_p = NULL;
return;
}
/* If we gimplified to a bare decl, we can be sure that it doesn't overlap
with the lhs, since "a = { .x=a }" doesn't make sense. This will
always be true for all scalars, since is_gimple_mem_rhs insists on a
temporary variable for them. */
if (DECL_P (*expr_p))
return;
/* If this is of variable size, we have no choice but to assume it doesn't
overlap since we can't make a temporary for it. */
if (TREE_CODE (TYPE_SIZE (TREE_TYPE (*expr_p))) != INTEGER_CST)
return;
/* Otherwise, we must search for overlap ... */
if (!walk_tree (expr_p, gimplify_init_ctor_preeval_1, data, NULL))
return;
/* ... and if found, force the value into a temporary. */
*expr_p = get_formal_tmp_var (*expr_p, pre_p);
}
/* A subroutine of gimplify_init_ctor_eval. Create a loop for
a RANGE_EXPR in a CONSTRUCTOR for an array.
var = lower;
loop_entry:
object[var] = value;
if (var == upper)
goto loop_exit;
var = var + 1;
goto loop_entry;
loop_exit:
We increment var _after_ the loop exit check because we might otherwise
fail if upper == TYPE_MAX_VALUE (type for upper).
Note that we never have to deal with SAVE_EXPRs here, because this has
already been taken care of for us, in gimplify_init_ctor_preeval(). */
static void gimplify_init_ctor_eval (tree, VEC(constructor_elt,gc) *,
tree *, bool);
static void
gimplify_init_ctor_eval_range (tree object, tree lower, tree upper,
tree value, tree array_elt_type,
tree *pre_p, bool cleared)
{
tree loop_entry_label, loop_exit_label;
tree var, var_type, cref, tmp;
loop_entry_label = create_artificial_label ();
loop_exit_label = create_artificial_label ();
/* Create and initialize the index variable. */
var_type = TREE_TYPE (upper);
var = create_tmp_var (var_type, NULL);
append_to_statement_list (build_gimple_modify_stmt (var, lower), pre_p);
/* Add the loop entry label. */
append_to_statement_list (build1 (LABEL_EXPR,
void_type_node,
loop_entry_label),
pre_p);
/* Build the reference. */
cref = build4 (ARRAY_REF, array_elt_type, unshare_expr (object),
var, NULL_TREE, NULL_TREE);
/* If we are a constructor, just call gimplify_init_ctor_eval to do
the store. Otherwise just assign value to the reference. */
if (TREE_CODE (value) == CONSTRUCTOR)
/* NB we might have to call ourself recursively through
gimplify_init_ctor_eval if the value is a constructor. */
gimplify_init_ctor_eval (cref, CONSTRUCTOR_ELTS (value),
pre_p, cleared);
else
append_to_statement_list (build_gimple_modify_stmt (cref, value), pre_p);
/* We exit the loop when the index var is equal to the upper bound. */
gimplify_and_add (build3 (COND_EXPR, void_type_node,
build2 (EQ_EXPR, boolean_type_node,
var, upper),
build1 (GOTO_EXPR,
void_type_node,
loop_exit_label),
NULL_TREE),
pre_p);
/* Otherwise, increment the index var... */
tmp = build2 (PLUS_EXPR, var_type, var,
fold_convert (var_type, integer_one_node));
append_to_statement_list (build_gimple_modify_stmt (var, tmp), pre_p);
/* ...and jump back to the loop entry. */
append_to_statement_list (build1 (GOTO_EXPR,
void_type_node,
loop_entry_label),
pre_p);
/* Add the loop exit label. */
append_to_statement_list (build1 (LABEL_EXPR,
void_type_node,
loop_exit_label),
pre_p);
}
/* Return true if FDECL is accessing a field that is zero sized. */
static bool
zero_sized_field_decl (const_tree fdecl)
{
if (TREE_CODE (fdecl) == FIELD_DECL && DECL_SIZE (fdecl)
&& integer_zerop (DECL_SIZE (fdecl)))
return true;
return false;
}
/* Return true if TYPE is zero sized. */
static bool
zero_sized_type (const_tree type)
{
if (AGGREGATE_TYPE_P (type) && TYPE_SIZE (type)
&& integer_zerop (TYPE_SIZE (type)))
return true;
return false;
}
static void tree_to_gimple_tuple (tree *);
/* A subroutine of gimplify_init_constructor. Generate individual
MODIFY_EXPRs for a CONSTRUCTOR. OBJECT is the LHS against which the
assignments should happen. ELTS is the CONSTRUCTOR_ELTS of the
CONSTRUCTOR. CLEARED is true if the entire LHS object has been
zeroed first. */
static void
gimplify_init_ctor_eval (tree object, VEC(constructor_elt,gc) *elts,
tree *pre_p, bool cleared)
{
tree array_elt_type = NULL;
unsigned HOST_WIDE_INT ix;
tree purpose, value;
if (TREE_CODE (TREE_TYPE (object)) == ARRAY_TYPE)
array_elt_type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (object)));
FOR_EACH_CONSTRUCTOR_ELT (elts, ix, purpose, value)
{
tree cref, init;
/* NULL values are created above for gimplification errors. */
if (value == NULL)
continue;
if (cleared && initializer_zerop (value))
continue;
/* ??? Here's to hoping the front end fills in all of the indices,
so we don't have to figure out what's missing ourselves. */
gcc_assert (purpose);
/* Skip zero-sized fields, unless value has side-effects. This can
happen with calls to functions returning a zero-sized type, which
we shouldn't discard. As a number of downstream passes don't
expect sets of zero-sized fields, we rely on the gimplification of
the MODIFY_EXPR we make below to drop the assignment statement. */
if (! TREE_SIDE_EFFECTS (value) && zero_sized_field_decl (purpose))
continue;
/* If we have a RANGE_EXPR, we have to build a loop to assign the
whole range. */
if (TREE_CODE (purpose) == RANGE_EXPR)
{
tree lower = TREE_OPERAND (purpose, 0);
tree upper = TREE_OPERAND (purpose, 1);
/* If the lower bound is equal to upper, just treat it as if
upper was the index. */
if (simple_cst_equal (lower, upper))
purpose = upper;
else
{
gimplify_init_ctor_eval_range (object, lower, upper, value,
array_elt_type, pre_p, cleared);
continue;
}
}
if (array_elt_type)
{
/* Do not use bitsizetype for ARRAY_REF indices. */
if (TYPE_DOMAIN (TREE_TYPE (object)))
purpose = fold_convert (TREE_TYPE (TYPE_DOMAIN (TREE_TYPE (object))),
purpose);
cref = build4 (ARRAY_REF, array_elt_type, unshare_expr (object),
purpose, NULL_TREE, NULL_TREE);
}
else
{
gcc_assert (TREE_CODE (purpose) == FIELD_DECL);
cref = build3 (COMPONENT_REF, TREE_TYPE (purpose),
unshare_expr (object), purpose, NULL_TREE);
}
if (TREE_CODE (value) == CONSTRUCTOR
&& TREE_CODE (TREE_TYPE (value)) != VECTOR_TYPE)
gimplify_init_ctor_eval (cref, CONSTRUCTOR_ELTS (value),
pre_p, cleared);
else
{
init = build2 (INIT_EXPR, TREE_TYPE (cref), cref, value);
gimplify_and_add (init, pre_p);
}
}
}
/* A subroutine of gimplify_modify_expr. Break out elements of a
CONSTRUCTOR used as an initializer into separate MODIFY_EXPRs.
Note that we still need to clear any elements that don't have explicit
initializers, so if not all elements are initialized we keep the
original MODIFY_EXPR, we just remove all of the constructor elements.
If NOTIFY_TEMP_CREATION is true, do not gimplify, just return
GS_ERROR if we would have to create a temporary when gimplifying
this constructor. Otherwise, return GS_OK.
If NOTIFY_TEMP_CREATION is false, just do the gimplification. */
static enum gimplify_status
gimplify_init_constructor (tree *expr_p, tree *pre_p,
tree *post_p, bool want_value,
bool notify_temp_creation)
{
tree object;
tree ctor = GENERIC_TREE_OPERAND (*expr_p, 1);
tree type = TREE_TYPE (ctor);
enum gimplify_status ret;
VEC(constructor_elt,gc) *elts;
if (TREE_CODE (ctor) != CONSTRUCTOR)
return GS_UNHANDLED;
if (!notify_temp_creation)
{
ret = gimplify_expr (&GENERIC_TREE_OPERAND (*expr_p, 0), pre_p, post_p,
is_gimple_lvalue, fb_lvalue);
if (ret == GS_ERROR)
return ret;
}
object = GENERIC_TREE_OPERAND (*expr_p, 0);
elts = CONSTRUCTOR_ELTS (ctor);
ret = GS_ALL_DONE;
switch (TREE_CODE (type))
{
case RECORD_TYPE:
case UNION_TYPE:
case QUAL_UNION_TYPE:
case ARRAY_TYPE:
{
struct gimplify_init_ctor_preeval_data preeval_data;
HOST_WIDE_INT num_type_elements, num_ctor_elements;
HOST_WIDE_INT num_nonzero_elements;
bool cleared, valid_const_initializer;
/* Aggregate types must lower constructors to initialization of
individual elements. The exception is that a CONSTRUCTOR node
with no elements indicates zero-initialization of the whole. */
if (VEC_empty (constructor_elt, elts))
{
if (notify_temp_creation)
return GS_OK;
break;
}
/* Fetch information about the constructor to direct later processing.
We might want to make static versions of it in various cases, and
can only do so if it known to be a valid constant initializer. */
valid_const_initializer
= categorize_ctor_elements (ctor, &num_nonzero_elements,
&num_ctor_elements, &cleared);
/* If a const aggregate variable is being initialized, then it
should never be a lose to promote the variable to be static. */
if (valid_const_initializer
&& num_nonzero_elements > 1
&& TREE_READONLY (object)
&& TREE_CODE (object) == VAR_DECL
&& (flag_merge_constants >= 2 || !TREE_ADDRESSABLE (object)))
{
if (notify_temp_creation)
return GS_ERROR;
DECL_INITIAL (object) = ctor;
TREE_STATIC (object) = 1;
if (!DECL_NAME (object))
DECL_NAME (object) = create_tmp_var_name ("C");
walk_tree (&DECL_INITIAL (object), force_labels_r, NULL, NULL);
/* ??? C++ doesn't automatically append a .<number> to the
assembler name, and even when it does, it looks a FE private
data structures to figure out what that number should be,
which are not set for this variable. I suppose this is
important for local statics for inline functions, which aren't
"local" in the object file sense. So in order to get a unique
TU-local symbol, we must invoke the lhd version now. */
lhd_set_decl_assembler_name (object);
*expr_p = NULL_TREE;
break;
}
/* If there are "lots" of initialized elements, even discounting
those that are not address constants (and thus *must* be
computed at runtime), then partition the constructor into
constant and non-constant parts. Block copy the constant
parts in, then generate code for the non-constant parts. */
/* TODO. There's code in cp/typeck.c to do this. */
num_type_elements = count_type_elements (type, true);
/* If count_type_elements could not determine number of type elements
for a constant-sized object, assume clearing is needed.
Don't do this for variable-sized objects, as store_constructor
will ignore the clearing of variable-sized objects. */
if (num_type_elements < 0 && int_size_in_bytes (type) >= 0)
cleared = true;
/* If there are "lots" of zeros, then block clear the object first. */
else if (num_type_elements - num_nonzero_elements > CLEAR_RATIO
&& num_nonzero_elements < num_type_elements/4)
cleared = true;
/* ??? This bit ought not be needed. For any element not present
in the initializer, we should simply set them to zero. Except
we'd need to *find* the elements that are not present, and that
requires trickery to avoid quadratic compile-time behavior in
large cases or excessive memory use in small cases. */
else if (num_ctor_elements < num_type_elements)
cleared = true;
/* If there are "lots" of initialized elements, and all of them
are valid address constants, then the entire initializer can
be dropped to memory, and then memcpy'd out. Don't do this
for sparse arrays, though, as it's more efficient to follow
the standard CONSTRUCTOR behavior of memset followed by
individual element initialization. */
if (valid_const_initializer && !cleared)
{
HOST_WIDE_INT size = int_size_in_bytes (type);
unsigned int align;
/* ??? We can still get unbounded array types, at least
from the C++ front end. This seems wrong, but attempt
to work around it for now. */
if (size < 0)
{
size = int_size_in_bytes (TREE_TYPE (object));
if (size >= 0)
TREE_TYPE (ctor) = type = TREE_TYPE (object);
}
/* Find the maximum alignment we can assume for the object. */
/* ??? Make use of DECL_OFFSET_ALIGN. */
if (DECL_P (object))
align = DECL_ALIGN (object);
else
align = TYPE_ALIGN (type);
if (size > 0 && !can_move_by_pieces (size, align))
{
tree new;
if (notify_temp_creation)
return GS_ERROR;
new = create_tmp_var_raw (type, "C");
gimple_add_tmp_var (new);
TREE_STATIC (new) = 1;
TREE_READONLY (new) = 1;
DECL_INITIAL (new) = ctor;
if (align > DECL_ALIGN (new))
{
DECL_ALIGN (new) = align;
DECL_USER_ALIGN (new) = 1;
}
walk_tree (&DECL_INITIAL (new), force_labels_r, NULL, NULL);
GENERIC_TREE_OPERAND (*expr_p, 1) = new;
/* This is no longer an assignment of a CONSTRUCTOR, but
we still may have processing to do on the LHS. So
pretend we didn't do anything here to let that happen. */
return GS_UNHANDLED;
}
}
/* If the target is volatile and we have non-zero elements
initialize the target from a temporary. */
if (TREE_THIS_VOLATILE (object)
&& !TREE_ADDRESSABLE (type)
&& num_nonzero_elements > 0)
{
tree temp = create_tmp_var (TYPE_MAIN_VARIANT (type), NULL);
TREE_OPERAND (*expr_p, 0) = temp;
*expr_p = build2 (COMPOUND_EXPR, TREE_TYPE (*expr_p),
*expr_p,
build2 (MODIFY_EXPR, void_type_node,
object, temp));
return GS_OK;
}
if (notify_temp_creation)
return GS_OK;
/* If there are nonzero elements, pre-evaluate to capture elements
overlapping with the lhs into temporaries. We must do this before
clearing to fetch the values before they are zeroed-out. */
if (num_nonzero_elements > 0)
{
preeval_data.lhs_base_decl = get_base_address (object);
if (!DECL_P (preeval_data.lhs_base_decl))
preeval_data.lhs_base_decl = NULL;
preeval_data.lhs_alias_set = get_alias_set (object);
gimplify_init_ctor_preeval (&GENERIC_TREE_OPERAND (*expr_p, 1),
pre_p, post_p, &preeval_data);
}
if (cleared)
{
/* Zap the CONSTRUCTOR element list, which simplifies this case.
Note that we still have to gimplify, in order to handle the
case of variable sized types. Avoid shared tree structures. */
CONSTRUCTOR_ELTS (ctor) = NULL;
object = unshare_expr (object);
gimplify_stmt (expr_p);
append_to_statement_list (*expr_p, pre_p);
}
/* If we have not block cleared the object, or if there are nonzero
elements in the constructor, add assignments to the individual
scalar fields of the object. */
if (!cleared || num_nonzero_elements > 0)
gimplify_init_ctor_eval (object, elts, pre_p, cleared);
*expr_p = NULL_TREE;
}
break;
case COMPLEX_TYPE:
{
tree r, i;
if (notify_temp_creation)
return GS_OK;
/* Extract the real and imaginary parts out of the ctor. */
gcc_assert (VEC_length (constructor_elt, elts) == 2);
r = VEC_index (constructor_elt, elts, 0)->value;
i = VEC_index (constructor_elt, elts, 1)->value;
if (r == NULL || i == NULL)
{
tree zero = fold_convert (TREE_TYPE (type), integer_zero_node);
if (r == NULL)
r = zero;
if (i == NULL)
i = zero;
}
/* Complex types have either COMPLEX_CST or COMPLEX_EXPR to
represent creation of a complex value. */
if (TREE_CONSTANT (r) && TREE_CONSTANT (i))
{
ctor = build_complex (type, r, i);
TREE_OPERAND (*expr_p, 1) = ctor;
}
else
{
ctor = build2 (COMPLEX_EXPR, type, r, i);
TREE_OPERAND (*expr_p, 1) = ctor;
ret = gimplify_expr (&TREE_OPERAND (*expr_p, 1), pre_p, post_p,
rhs_predicate_for (TREE_OPERAND (*expr_p, 0)),
fb_rvalue);
}
}
break;
case VECTOR_TYPE:
{
unsigned HOST_WIDE_INT ix;
constructor_elt *ce;
if (notify_temp_creation)
return GS_OK;
/* Go ahead and simplify constant constructors to VECTOR_CST. */
if (TREE_CONSTANT (ctor))
{
bool constant_p = true;
tree value;
/* Even when ctor is constant, it might contain non-*_CST
elements, such as addresses or trapping values like
1.0/0.0 - 1.0/0.0. Such expressions don't belong
in VECTOR_CST nodes. */
FOR_EACH_CONSTRUCTOR_VALUE (elts, ix, value)
if (!CONSTANT_CLASS_P (value))
{
constant_p = false;
break;
}
if (constant_p)
{
GENERIC_TREE_OPERAND (*expr_p, 1)
= build_vector_from_ctor (type, elts);
break;
}
/* Don't reduce an initializer constant even if we can't
make a VECTOR_CST. It won't do anything for us, and it'll
prevent us from representing it as a single constant. */
if (initializer_constant_valid_p (ctor, type))
break;
TREE_CONSTANT (ctor) = 0;
TREE_INVARIANT (ctor) = 0;
}
/* Vector types use CONSTRUCTOR all the way through gimple
compilation as a general initializer. */
for (ix = 0; VEC_iterate (constructor_elt, elts, ix, ce); ix++)
{
enum gimplify_status tret;
tret = gimplify_expr (&ce->value, pre_p, post_p,
is_gimple_val, fb_rvalue);
if (tret == GS_ERROR)
ret = GS_ERROR;
}
if (!is_gimple_reg (GENERIC_TREE_OPERAND (*expr_p, 0)))
GENERIC_TREE_OPERAND (*expr_p, 1) = get_formal_tmp_var (ctor, pre_p);
}
break;
default:
/* So how did we get a CONSTRUCTOR for a scalar type? */
gcc_unreachable ();
}
if (ret == GS_ERROR)
return GS_ERROR;
else if (want_value)
{
if (*expr_p)
tree_to_gimple_tuple (expr_p);
append_to_statement_list (*expr_p, pre_p);
*expr_p = object;
return GS_OK;
}
else
return GS_ALL_DONE;
}
/* Given a pointer value OP0, return a simplified version of an
indirection through OP0, or NULL_TREE if no simplification is
possible. Note that the resulting type may be different from
the type pointed to in the sense that it is still compatible
from the langhooks point of view. */
tree
gimple_fold_indirect_ref (tree t)
{
tree type = TREE_TYPE (TREE_TYPE (t));
tree sub = t;
tree subtype;
STRIP_USELESS_TYPE_CONVERSION (sub);
subtype = TREE_TYPE (sub);
if (!POINTER_TYPE_P (subtype))
return NULL_TREE;
if (TREE_CODE (sub) == ADDR_EXPR)
{
tree op = TREE_OPERAND (sub, 0);
tree optype = TREE_TYPE (op);
/* *&p => p */
if (useless_type_conversion_p (type, optype))
return op;
/* *(foo *)&fooarray => fooarray[0] */
if (TREE_CODE (optype) == ARRAY_TYPE
&& useless_type_conversion_p (type, TREE_TYPE (optype)))
{
tree type_domain = TYPE_DOMAIN (optype);
tree min_val = size_zero_node;
if (type_domain && TYPE_MIN_VALUE (type_domain))
min_val = TYPE_MIN_VALUE (type_domain);
return build4 (ARRAY_REF, type, op, min_val, NULL_TREE, NULL_TREE);
}
}
/* *(foo *)fooarrptr => (*fooarrptr)[0] */
if (TREE_CODE (TREE_TYPE (subtype)) == ARRAY_TYPE
&& useless_type_conversion_p (type, TREE_TYPE (TREE_TYPE (subtype))))
{
tree type_domain;
tree min_val = size_zero_node;
tree osub = sub;
sub = gimple_fold_indirect_ref (sub);
if (! sub)
sub = build1 (INDIRECT_REF, TREE_TYPE (subtype), osub);
type_domain = TYPE_DOMAIN (TREE_TYPE (sub));
if (type_domain && TYPE_MIN_VALUE (type_domain))
min_val = TYPE_MIN_VALUE (type_domain);
return build4 (ARRAY_REF, type, sub, min_val, NULL_TREE, NULL_TREE);
}
return NULL_TREE;
}
/* Given a pointer value OP0, return a simplified version of an
indirection through OP0, or NULL_TREE if no simplification is
possible. This may only be applied to a rhs of an expression.
Note that the resulting type may be different from the type pointed
to in the sense that it is still compatible from the langhooks
point of view. */
static tree
gimple_fold_indirect_ref_rhs (tree t)
{
return gimple_fold_indirect_ref (t);
}
/* Subroutine of gimplify_modify_expr to do simplifications of
MODIFY_EXPRs based on the code of the RHS. We loop for as long as
something changes. */
static enum gimplify_status
gimplify_modify_expr_rhs (tree *expr_p, tree *from_p, tree *to_p, tree *pre_p,
tree *post_p, bool want_value)
{
enum gimplify_status ret = GS_OK;
while (ret != GS_UNHANDLED)
switch (TREE_CODE (*from_p))
{
case VAR_DECL:
/* If we're assigning from a read-only variable initialized with
a constructor, do the direct assignment from the constructor,
but only if neither source nor target are volatile since this
latter assignment might end up being done on a per-field basis. */
if (DECL_INITIAL (*from_p)
&& TREE_READONLY (*from_p)
&& !TREE_THIS_VOLATILE (*from_p)
&& !TREE_THIS_VOLATILE (*to_p)
&& TREE_CODE (DECL_INITIAL (*from_p)) == CONSTRUCTOR)
{
tree old_from = *from_p;
/* Move the constructor into the RHS. */
*from_p = unshare_expr (DECL_INITIAL (*from_p));
/* Let's see if gimplify_init_constructor will need to put
it in memory. If so, revert the change. */
ret = gimplify_init_constructor (expr_p, NULL, NULL, false, true);
if (ret == GS_ERROR)
{
*from_p = old_from;
/* Fall through. */
}
else
{
ret = GS_OK;
break;
}
}
ret = GS_UNHANDLED;
break;
case INDIRECT_REF:
{
/* If we have code like
*(const A*)(A*)&x
where the type of "x" is a (possibly cv-qualified variant
of "A"), treat the entire expression as identical to "x".
This kind of code arises in C++ when an object is bound
to a const reference, and if "x" is a TARGET_EXPR we want
to take advantage of the optimization below. */
tree t = gimple_fold_indirect_ref_rhs (TREE_OPERAND (*from_p, 0));
if (t)
{
*from_p = t;
ret = GS_OK;
}
else
ret = GS_UNHANDLED;
break;
}
case TARGET_EXPR:
{
/* If we are initializing something from a TARGET_EXPR, strip the
TARGET_EXPR and initialize it directly, if possible. This can't
be done if the initializer is void, since that implies that the
temporary is set in some non-trivial way.
??? What about code that pulls out the temp and uses it
elsewhere? I think that such code never uses the TARGET_EXPR as
an initializer. If I'm wrong, we'll die because the temp won't
have any RTL. In that case, I guess we'll need to replace
references somehow. */
tree init = TARGET_EXPR_INITIAL (*from_p);
if (!VOID_TYPE_P (TREE_TYPE (init)))
{
*from_p = init;
ret = GS_OK;
}
else
ret = GS_UNHANDLED;
}
break;
case COMPOUND_EXPR:
/* Remove any COMPOUND_EXPR in the RHS so the following cases will be
caught. */
gimplify_compound_expr (from_p, pre_p, true);
ret = GS_OK;
break;
case CONSTRUCTOR:
/* If we're initializing from a CONSTRUCTOR, break this into
individual MODIFY_EXPRs. */
return gimplify_init_constructor (expr_p, pre_p, post_p, want_value,
false);
case COND_EXPR:
/* If we're assigning to a non-register type, push the assignment
down into the branches. This is mandatory for ADDRESSABLE types,
since we cannot generate temporaries for such, but it saves a
copy in other cases as well. */
if (!is_gimple_reg_type (TREE_TYPE (*from_p)))
{
/* This code should mirror the code in gimplify_cond_expr. */
enum tree_code code = TREE_CODE (*expr_p);
tree cond = *from_p;
tree result = *to_p;
ret = gimplify_expr (&result, pre_p, post_p,
is_gimple_min_lval, fb_lvalue);
if (ret != GS_ERROR)
ret = GS_OK;
if (TREE_TYPE (TREE_OPERAND (cond, 1)) != void_type_node)
TREE_OPERAND (cond, 1)
= build2 (code, void_type_node, result,
TREE_OPERAND (cond, 1));
if (TREE_TYPE (TREE_OPERAND (cond, 2)) != void_type_node)
TREE_OPERAND (cond, 2)
= build2 (code, void_type_node, unshare_expr (result),
TREE_OPERAND (cond, 2));
TREE_TYPE (cond) = void_type_node;
recalculate_side_effects (cond);
if (want_value)
{
gimplify_and_add (cond, pre_p);
*expr_p = unshare_expr (result);
}
else
*expr_p = cond;
return ret;
}
else
ret = GS_UNHANDLED;
break;
case CALL_EXPR:
/* For calls that return in memory, give *to_p as the CALL_EXPR's
return slot so that we don't generate a temporary. */
if (!CALL_EXPR_RETURN_SLOT_OPT (*from_p)
&& aggregate_value_p (*from_p, *from_p))
{
bool use_target;
if (!(rhs_predicate_for (*to_p))(*from_p))
/* If we need a temporary, *to_p isn't accurate. */
use_target = false;
else if (TREE_CODE (*to_p) == RESULT_DECL
&& DECL_NAME (*to_p) == NULL_TREE
&& needs_to_live_in_memory (*to_p))
/* It's OK to use the return slot directly unless it's an NRV. */
use_target = true;
else if (is_gimple_reg_type (TREE_TYPE (*to_p))
|| (DECL_P (*to_p) && DECL_REGISTER (*to_p)))
/* Don't force regs into memory. */
use_target = false;
else if (TREE_CODE (*to_p) == VAR_DECL
&& DECL_GIMPLE_FORMAL_TEMP_P (*to_p))
/* Don't use the original target if it's a formal temp; we
don't want to take their addresses. */
use_target = false;
else if (TREE_CODE (*expr_p) == INIT_EXPR)
/* It's OK to use the target directly if it's being
initialized. */
use_target = true;
else if (!is_gimple_non_addressable (*to_p))
/* Don't use the original target if it's already addressable;
if its address escapes, and the called function uses the
NRV optimization, a conforming program could see *to_p
change before the called function returns; see c++/19317.
When optimizing, the return_slot pass marks more functions
as safe after we have escape info. */
use_target = false;
else
use_target = true;
if (use_target)
{
CALL_EXPR_RETURN_SLOT_OPT (*from_p) = 1;
mark_addressable (*to_p);
}
}
ret = GS_UNHANDLED;
break;
/* If we're initializing from a container, push the initialization
inside it. */
case CLEANUP_POINT_EXPR:
case BIND_EXPR:
case STATEMENT_LIST:
{
tree wrap = *from_p;
tree t;
ret = gimplify_expr (to_p, pre_p, post_p,
is_gimple_min_lval, fb_lvalue);
if (ret != GS_ERROR)
ret = GS_OK;
t = voidify_wrapper_expr (wrap, *expr_p);
gcc_assert (t == *expr_p);
if (want_value)
{
gimplify_and_add (wrap, pre_p);
*expr_p = unshare_expr (*to_p);
}
else
*expr_p = wrap;
return GS_OK;
}
default:
ret = GS_UNHANDLED;
break;
}
return ret;
}
/* Destructively convert the TREE pointer in TP into a gimple tuple if
appropriate. */
static void
tree_to_gimple_tuple (tree *tp)
{
switch (TREE_CODE (*tp))
{
case GIMPLE_MODIFY_STMT:
return;
case MODIFY_EXPR:
{
struct gimple_stmt *gs;
tree lhs = TREE_OPERAND (*tp, 0);
bool def_stmt_self_p = false;
if (TREE_CODE (lhs) == SSA_NAME)
{
if (SSA_NAME_DEF_STMT (lhs) == *tp)
def_stmt_self_p = true;
}
gs = &make_node (GIMPLE_MODIFY_STMT)->gstmt;
gs->base = (*tp)->base;
/* The set to base above overwrites the CODE. */
TREE_SET_CODE ((tree) gs, GIMPLE_MODIFY_STMT);
SET_EXPR_LOCUS ((tree) gs, EXPR_LOCUS (*tp));
gs->operands[0] = TREE_OPERAND (*tp, 0);
gs->operands[1] = TREE_OPERAND (*tp, 1);
gs->block = TREE_BLOCK (*tp);
*tp = (tree)gs;
/* If we re-gimplify a set to an SSA_NAME, we must change the
SSA name's DEF_STMT link. */
if (def_stmt_self_p)
SSA_NAME_DEF_STMT (GIMPLE_STMT_OPERAND (*tp, 0)) = *tp;
return;
}
default:
break;
}
}
/* Promote partial stores to COMPLEX variables to total stores. *EXPR_P is
a MODIFY_EXPR with a lhs of a REAL/IMAGPART_EXPR of a variable with
DECL_GIMPLE_REG_P set.
IMPORTANT NOTE: This promotion is performed by introducing a load of the
other, unmodified part of the complex object just before the total store.
As a consequence, if the object is still uninitialized, an undefined value
will be loaded into a register, which may result in a spurious exception
if the register is floating-point and the value happens to be a signaling
NaN for example. Then the fully-fledged complex operations lowering pass
followed by a DCE pass are necessary in order to fix things up. */
static enum gimplify_status
gimplify_modify_expr_complex_part (tree *expr_p, tree *pre_p, bool want_value)
{
enum tree_code code, ocode;
tree lhs, rhs, new_rhs, other, realpart, imagpart;
lhs = GENERIC_TREE_OPERAND (*expr_p, 0);
rhs = GENERIC_TREE_OPERAND (*expr_p, 1);
code = TREE_CODE (lhs);
lhs = TREE_OPERAND (lhs, 0);
ocode = code == REALPART_EXPR ? IMAGPART_EXPR : REALPART_EXPR;
other = build1 (ocode, TREE_TYPE (rhs), lhs);
other = get_formal_tmp_var (other, pre_p);
realpart = code == REALPART_EXPR ? rhs : other;
imagpart = code == REALPART_EXPR ? other : rhs;
if (TREE_CONSTANT (realpart) && TREE_CONSTANT (imagpart))
new_rhs = build_complex (TREE_TYPE (lhs), realpart, imagpart);
else
new_rhs = build2 (COMPLEX_EXPR, TREE_TYPE (lhs), realpart, imagpart);
GENERIC_TREE_OPERAND (*expr_p, 0) = lhs;
GENERIC_TREE_OPERAND (*expr_p, 1) = new_rhs;
if (want_value)
{
tree_to_gimple_tuple (expr_p);
append_to_statement_list (*expr_p, pre_p);
*expr_p = rhs;
}
return GS_ALL_DONE;
}
/* Gimplify the MODIFY_EXPR node pointed to by EXPR_P.
modify_expr
: varname '=' rhs
| '*' ID '=' rhs
PRE_P points to the list where side effects that must happen before
*EXPR_P should be stored.
POST_P points to the list where side effects that must happen after
*EXPR_P should be stored.
WANT_VALUE is nonzero iff we want to use the value of this expression
in another expression. */
static enum gimplify_status
gimplify_modify_expr (tree *expr_p, tree *pre_p, tree *post_p, bool want_value)
{
tree *from_p = &GENERIC_TREE_OPERAND (*expr_p, 1);
tree *to_p = &GENERIC_TREE_OPERAND (*expr_p, 0);
enum gimplify_status ret = GS_UNHANDLED;
gcc_assert (TREE_CODE (*expr_p) == MODIFY_EXPR
|| TREE_CODE (*expr_p) == GIMPLE_MODIFY_STMT
|| TREE_CODE (*expr_p) == INIT_EXPR);
/* See if any simplifications can be done based on what the RHS is. */
ret = gimplify_modify_expr_rhs (expr_p, from_p, to_p, pre_p, post_p,
want_value);
if (ret != GS_UNHANDLED)
return ret;
/* For zero sized types only gimplify the left hand side and right hand
side as statements and throw away the assignment. Do this after
gimplify_modify_expr_rhs so we handle TARGET_EXPRs of addressable
types properly. */
if (zero_sized_type (TREE_TYPE (*from_p)) && !want_value)
{
gimplify_stmt (from_p);
gimplify_stmt (to_p);
append_to_statement_list (*from_p, pre_p);
append_to_statement_list (*to_p, pre_p);
*expr_p = NULL_TREE;
return GS_ALL_DONE;
}
/* If the value being copied is of variable width, compute the length
of the copy into a WITH_SIZE_EXPR. Note that we need to do this
before gimplifying any of the operands so that we can resolve any
PLACEHOLDER_EXPRs in the size. Also note that the RTL expander uses
the size of the expression to be copied, not of the destination, so
that is what we must here. */
maybe_with_size_expr (from_p);
ret = gimplify_expr (to_p, pre_p, post_p, is_gimple_lvalue, fb_lvalue);
if (ret == GS_ERROR)
return ret;
ret = gimplify_expr (from_p, pre_p, post_p,
rhs_predicate_for (*to_p), fb_rvalue);
if (ret == GS_ERROR)
return ret;
/* Now see if the above changed *from_p to something we handle specially. */
ret = gimplify_modify_expr_rhs (expr_p, from_p, to_p, pre_p, post_p,
want_value);
if (ret != GS_UNHANDLED)
return ret;
/* If we've got a variable sized assignment between two lvalues (i.e. does
not involve a call), then we can make things a bit more straightforward
by converting the assignment to memcpy or memset. */
if (TREE_CODE (*from_p) == WITH_SIZE_EXPR)
{
tree from = TREE_OPERAND (*from_p, 0);
tree size = TREE_OPERAND (*from_p, 1);
if (TREE_CODE (from) == CONSTRUCTOR)
return gimplify_modify_expr_to_memset (expr_p, size, want_value);
if (is_gimple_addressable (from))
{
*from_p = from;
return gimplify_modify_expr_to_memcpy (expr_p, size, want_value);
}
}
/* Transform partial stores to non-addressable complex variables into
total stores. This allows us to use real instead of virtual operands
for these variables, which improves optimization. */
if ((TREE_CODE (*to_p) == REALPART_EXPR
|| TREE_CODE (*to_p) == IMAGPART_EXPR)
&& is_gimple_reg (TREE_OPERAND (*to_p, 0)))
return gimplify_modify_expr_complex_part (expr_p, pre_p, want_value);
if (gimplify_ctxp->into_ssa && is_gimple_reg (*to_p))
{
/* If we've somehow already got an SSA_NAME on the LHS, then
we're probably modified it twice. Not good. */
gcc_assert (TREE_CODE (*to_p) != SSA_NAME);
*to_p = make_ssa_name (*to_p, *expr_p);
}
/* Try to alleviate the effects of the gimplification creating artificial
temporaries (see for example is_gimple_reg_rhs) on the debug info. */
if (!gimplify_ctxp->into_ssa
&& DECL_P (*from_p) && DECL_IGNORED_P (*from_p)
&& DECL_P (*to_p) && !DECL_IGNORED_P (*to_p))
{
if (!DECL_NAME (*from_p) && DECL_NAME (*to_p))
DECL_NAME (*from_p)
= create_tmp_var_name (IDENTIFIER_POINTER (DECL_NAME (*to_p)));
DECL_DEBUG_EXPR_IS_FROM (*from_p) = 1;
SET_DECL_DEBUG_EXPR (*from_p, *to_p);
}
if (want_value)
{
tree_to_gimple_tuple (expr_p);
append_to_statement_list (*expr_p, pre_p);
*expr_p = *to_p;
return GS_OK;
}
return GS_ALL_DONE;
}
/* Gimplify a comparison between two variable-sized objects. Do this
with a call to BUILT_IN_MEMCMP. */
static enum gimplify_status
gimplify_variable_sized_compare (tree *expr_p)
{
tree op0 = TREE_OPERAND (*expr_p, 0);
tree op1 = TREE_OPERAND (*expr_p, 1);
tree t, arg, dest, src;
arg = TYPE_SIZE_UNIT (TREE_TYPE (op0));
arg = unshare_expr (arg);
arg = SUBSTITUTE_PLACEHOLDER_IN_EXPR (arg, op0);
src = build_fold_addr_expr (op1);
dest = build_fold_addr_expr (op0);
t = implicit_built_in_decls[BUILT_IN_MEMCMP];
t = build_call_expr (t, 3, dest, src, arg);
*expr_p
= build2 (TREE_CODE (*expr_p), TREE_TYPE (*expr_p), t, integer_zero_node);
return GS_OK;
}
/* Gimplify a comparison between two aggregate objects of integral scalar
mode as a comparison between the bitwise equivalent scalar values. */
static enum gimplify_status
gimplify_scalar_mode_aggregate_compare (tree *expr_p)
{
tree op0 = TREE_OPERAND (*expr_p, 0);
tree op1 = TREE_OPERAND (*expr_p, 1);
tree type = TREE_TYPE (op0);
tree scalar_type = lang_hooks.types.type_for_mode (TYPE_MODE (type), 1);
op0 = fold_build1 (VIEW_CONVERT_EXPR, scalar_type, op0);
op1 = fold_build1 (VIEW_CONVERT_EXPR, scalar_type, op1);
*expr_p
= fold_build2 (TREE_CODE (*expr_p), TREE_TYPE (*expr_p), op0, op1);
return GS_OK;
}
/* Gimplify TRUTH_ANDIF_EXPR and TRUTH_ORIF_EXPR expressions. EXPR_P
points to the expression to gimplify.
Expressions of the form 'a && b' are gimplified to:
a && b ? true : false
gimplify_cond_expr will do the rest.
PRE_P points to the list where side effects that must happen before
*EXPR_P should be stored. */
static enum gimplify_status
gimplify_boolean_expr (tree *expr_p)
{
/* Preserve the original type of the expression. */
tree type = TREE_TYPE (*expr_p);
*expr_p = build3 (COND_EXPR, type, *expr_p,
fold_convert (type, boolean_true_node),
fold_convert (type, boolean_false_node));
return GS_OK;
}
/* Gimplifies an expression sequence. This function gimplifies each
expression and re-writes the original expression with the last
expression of the sequence in GIMPLE form.
PRE_P points to the list where the side effects for all the
expressions in the sequence will be emitted.
WANT_VALUE is true when the result of the last COMPOUND_EXPR is used. */
/* ??? Should rearrange to share the pre-queue with all the indirect
invocations of gimplify_expr. Would probably save on creations
of statement_list nodes. */
static enum gimplify_status
gimplify_compound_expr (tree *expr_p, tree *pre_p, bool want_value)
{
tree t = *expr_p;
do
{
tree *sub_p = &TREE_OPERAND (t, 0);
if (TREE_CODE (*sub_p) == COMPOUND_EXPR)
gimplify_compound_expr (sub_p, pre_p, false);
else
gimplify_stmt (sub_p);
append_to_statement_list (*sub_p, pre_p);
t = TREE_OPERAND (t, 1);
}
while (TREE_CODE (t) == COMPOUND_EXPR);
*expr_p = t;
if (want_value)
return GS_OK;
else
{
gimplify_stmt (expr_p);
return GS_ALL_DONE;
}
}
/* Gimplifies a statement list. These may be created either by an
enlightened front-end, or by shortcut_cond_expr. */
static enum gimplify_status
gimplify_statement_list (tree *expr_p, tree *pre_p)
{
tree temp = voidify_wrapper_expr (*expr_p, NULL);
tree_stmt_iterator i = tsi_start (*expr_p);
while (!tsi_end_p (i))
{
tree t;
gimplify_stmt (tsi_stmt_ptr (i));
t = tsi_stmt (i);
if (t == NULL)
tsi_delink (&i);
else if (TREE_CODE (t) == STATEMENT_LIST)
{
tsi_link_before (&i, t, TSI_SAME_STMT);
tsi_delink (&i);
}
else
tsi_next (&i);
}
if (temp)
{
append_to_statement_list (*expr_p, pre_p);
*expr_p = temp;
return GS_OK;
}
return GS_ALL_DONE;
}
/* Gimplify a SAVE_EXPR node. EXPR_P points to the expression to
gimplify. After gimplification, EXPR_P will point to a new temporary
that holds the original value of the SAVE_EXPR node.
PRE_P points to the list where side effects that must happen before
*EXPR_P should be stored. */
static enum gimplify_status
gimplify_save_expr (tree *expr_p, tree *pre_p, tree *post_p)
{
enum gimplify_status ret = GS_ALL_DONE;
tree val;
gcc_assert (TREE_CODE (*expr_p) == SAVE_EXPR);
val = TREE_OPERAND (*expr_p, 0);
/* If the SAVE_EXPR has not been resolved, then evaluate it once. */
if (!SAVE_EXPR_RESOLVED_P (*expr_p))
{
/* The operand may be a void-valued expression such as SAVE_EXPRs
generated by the Java frontend for class initialization. It is
being executed only for its side-effects. */
if (TREE_TYPE (val) == void_type_node)
{
ret = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p,
is_gimple_stmt, fb_none);
append_to_statement_list (TREE_OPERAND (*expr_p, 0), pre_p);
val = NULL;
}
else
val = get_initialized_tmp_var (val, pre_p, post_p);
TREE_OPERAND (*expr_p, 0) = val;
SAVE_EXPR_RESOLVED_P (*expr_p) = 1;
}
*expr_p = val;
return ret;
}
/* Re-write the ADDR_EXPR node pointed to by EXPR_P
unary_expr
: ...
| '&' varname
...
PRE_P points to the list where side effects that must happen before
*EXPR_P should be stored.
POST_P points to the list where side effects that must happen after
*EXPR_P should be stored. */
static enum gimplify_status
gimplify_addr_expr (tree *expr_p, tree *pre_p, tree *post_p)
{
tree expr = *expr_p;
tree op0 = TREE_OPERAND (expr, 0);
enum gimplify_status ret;
switch (TREE_CODE (op0))
{
case INDIRECT_REF:
case MISALIGNED_INDIRECT_REF:
do_indirect_ref:
/* Check if we are dealing with an expression of the form '&*ptr'.
While the front end folds away '&*ptr' into 'ptr', these
expressions may be generated internally by the compiler (e.g.,
builtins like __builtin_va_end). */
/* Caution: the silent array decomposition semantics we allow for
ADDR_EXPR means we can't always discard the pair. */
/* Gimplification of the ADDR_EXPR operand may drop
cv-qualification conversions, so make sure we add them if
needed. */
{
tree op00 = TREE_OPERAND (op0, 0);
tree t_expr = TREE_TYPE (expr);
tree t_op00 = TREE_TYPE (op00);
if (!useless_type_conversion_p (t_expr, t_op00))
op00 = fold_convert (TREE_TYPE (expr), op00);
*expr_p = op00;
ret = GS_OK;
}
break;
case VIEW_CONVERT_EXPR:
/* Take the address of our operand and then convert it to the type of
this ADDR_EXPR.
??? The interactions of VIEW_CONVERT_EXPR and aliasing is not at
all clear. The impact of this transformation is even less clear. */
/* If the operand is a useless conversion, look through it. Doing so
guarantees that the ADDR_EXPR and its operand will remain of the
same type. */
if (tree_ssa_useless_type_conversion (TREE_OPERAND (op0, 0)))
op0 = TREE_OPERAND (op0, 0);
*expr_p = fold_convert (TREE_TYPE (expr),
build_fold_addr_expr (TREE_OPERAND (op0, 0)));
ret = GS_OK;
break;
default:
/* We use fb_either here because the C frontend sometimes takes
the address of a call that returns a struct; see
gcc.dg/c99-array-lval-1.c. The gimplifier will correctly make
the implied temporary explicit. */
/* Mark the RHS addressable. */
ret = gimplify_expr (&TREE_OPERAND (expr, 0), pre_p, post_p,
is_gimple_addressable, fb_either);
if (ret != GS_ERROR)
{
op0 = TREE_OPERAND (expr, 0);
/* For various reasons, the gimplification of the expression
may have made a new INDIRECT_REF. */
if (TREE_CODE (op0) == INDIRECT_REF)
goto do_indirect_ref;
/* Make sure TREE_INVARIANT, TREE_CONSTANT, and TREE_SIDE_EFFECTS
is set properly. */
recompute_tree_invariant_for_addr_expr (expr);
mark_addressable (TREE_OPERAND (expr, 0));
}
break;
}
return ret;
}
/* Gimplify the operands of an ASM_EXPR. Input operands should be a gimple
value; output operands should be a gimple lvalue. */
static enum gimplify_status
gimplify_asm_expr (tree *expr_p, tree *pre_p, tree *post_p)
{
tree expr = *expr_p;
int noutputs = list_length (ASM_OUTPUTS (expr));
const char **oconstraints
= (const char **) alloca ((noutputs) * sizeof (const char *));
int i;
tree link;
const char *constraint;
bool allows_mem, allows_reg, is_inout;
enum gimplify_status ret, tret;
ret = GS_ALL_DONE;
for (i = 0, link = ASM_OUTPUTS (expr); link; ++i, link = TREE_CHAIN (link))
{
size_t constraint_len;
oconstraints[i] = constraint
= TREE_STRING_POINTER (TREE_VALUE (TREE_PURPOSE (link)));
constraint_len = strlen (constraint);
if (constraint_len == 0)
continue;
parse_output_constraint (&constraint, i, 0, 0,
&allows_mem, &allows_reg, &is_inout);
if (!allows_reg && allows_mem)
mark_addressable (TREE_VALUE (link));
tret = gimplify_expr (&TREE_VALUE (link), pre_p, post_p,
is_inout ? is_gimple_min_lval : is_gimple_lvalue,
fb_lvalue | fb_mayfail);
if (tret == GS_ERROR)
{
error ("invalid lvalue in asm output %d", i);
ret = tret;
}
if (is_inout)
{
/* An input/output operand. To give the optimizers more
flexibility, split it into separate input and output
operands. */
tree input;
char buf[10];
/* Turn the in/out constraint into an output constraint. */
char *p = xstrdup (constraint);
p[0] = '=';
TREE_VALUE (TREE_PURPOSE (link)) = build_string (constraint_len, p);
/* And add a matching input constraint. */
if (allows_reg)
{
sprintf (buf, "%d", i);
/* If there are multiple alternatives in the constraint,
handle each of them individually. Those that allow register
will be replaced with operand number, the others will stay
unchanged. */
if (strchr (p, ',') != NULL)
{
size_t len = 0, buflen = strlen (buf);
char *beg, *end, *str, *dst;
for (beg = p + 1;;)
{
end = strchr (beg, ',');
if (end == NULL)
end = strchr (beg, '\0');
if ((size_t) (end - beg) < buflen)
len += buflen + 1;
else
len += end - beg + 1;
if (*end)
beg = end + 1;
else
break;
}
str = (char *) alloca (len);
for (beg = p + 1, dst = str;;)
{
const char *tem;
bool mem_p, reg_p, inout_p;
end = strchr (beg, ',');
if (end)
*end = '\0';
beg[-1] = '=';
tem = beg - 1;
parse_output_constraint (&tem, i, 0, 0,
&mem_p, ®_p, &inout_p);
if (dst != str)
*dst++ = ',';
if (reg_p)
{
memcpy (dst, buf, buflen);
dst += buflen;
}
else
{
if (end)
len = end - beg;
else
len = strlen (beg);
memcpy (dst, beg, len);
dst += len;
}
if (end)
beg = end + 1;
else
break;
}
*dst = '\0';
input = build_string (dst - str, str);
}
else
input = build_string (strlen (buf), buf);
}
else
input = build_string (constraint_len - 1, constraint + 1);
free (p);
input = build_tree_list (build_tree_list (NULL_TREE, input),
unshare_expr (TREE_VALUE (link)));
ASM_INPUTS (expr) = chainon (ASM_INPUTS (expr), input);
}
}
for (link = ASM_INPUTS (expr); link; ++i, link = TREE_CHAIN (link))
{
constraint
= TREE_STRING_POINTER (TREE_VALUE (TREE_PURPOSE (link)));
parse_input_constraint (&constraint, 0, 0, noutputs, 0,
oconstraints, &allows_mem, &allows_reg);
/* If we can't make copies, we can only accept memory. */
if (TREE_ADDRESSABLE (TREE_TYPE (TREE_VALUE (link))))
{
if (allows_mem)
allows_reg = 0;
else
{
error ("impossible constraint in %<asm%>");
error ("non-memory input %d must stay in memory", i);
return GS_ERROR;
}
}
/* If the operand is a memory input, it should be an lvalue. */
if (!allows_reg && allows_mem)
{
tret = gimplify_expr (&TREE_VALUE (link), pre_p, post_p,
is_gimple_lvalue, fb_lvalue | fb_mayfail);
mark_addressable (TREE_VALUE (link));
if (tret == GS_ERROR)
{
error ("memory input %d is not directly addressable", i);
ret = tret;
}
}
else
{
tret = gimplify_expr (&TREE_VALUE (link), pre_p, post_p,
is_gimple_asm_val, fb_rvalue);
if (tret == GS_ERROR)
ret = tret;
}
}
return ret;
}
/* Gimplify a CLEANUP_POINT_EXPR. Currently this works by adding
WITH_CLEANUP_EXPRs to the prequeue as we encounter cleanups while
gimplifying the body, and converting them to TRY_FINALLY_EXPRs when we
return to this function.
FIXME should we complexify the prequeue handling instead? Or use flags
for all the cleanups and let the optimizer tighten them up? The current
code seems pretty fragile; it will break on a cleanup within any
non-conditional nesting. But any such nesting would be broken, anyway;
we can't write a TRY_FINALLY_EXPR that starts inside a nesting construct
and continues out of it. We can do that at the RTL level, though, so
having an optimizer to tighten up try/finally regions would be a Good
Thing. */
static enum gimplify_status
gimplify_cleanup_point_expr (tree *expr_p, tree *pre_p)
{
tree_stmt_iterator iter;
tree body;
tree temp = voidify_wrapper_expr (*expr_p, NULL);
/* We only care about the number of conditions between the innermost
CLEANUP_POINT_EXPR and the cleanup. So save and reset the count and
any cleanups collected outside the CLEANUP_POINT_EXPR. */
int old_conds = gimplify_ctxp->conditions;
tree old_cleanups = gimplify_ctxp->conditional_cleanups;
gimplify_ctxp->conditions = 0;
gimplify_ctxp->conditional_cleanups = NULL_TREE;
body = TREE_OPERAND (*expr_p, 0);
gimplify_to_stmt_list (&body);
gimplify_ctxp->conditions = old_conds;
gimplify_ctxp->conditional_cleanups = old_cleanups;
for (iter = tsi_start (body); !tsi_end_p (iter); )
{
tree *wce_p = tsi_stmt_ptr (iter);
tree wce = *wce_p;
if (TREE_CODE (wce) == WITH_CLEANUP_EXPR)
{
if (tsi_one_before_end_p (iter))
{
tsi_link_before (&iter, TREE_OPERAND (wce, 0), TSI_SAME_STMT);
tsi_delink (&iter);
break;
}
else
{
tree sl, tfe;
enum tree_code code;
if (CLEANUP_EH_ONLY (wce))
code = TRY_CATCH_EXPR;
else
code = TRY_FINALLY_EXPR;
sl = tsi_split_statement_list_after (&iter);
tfe = build2 (code, void_type_node, sl, NULL_TREE);
append_to_statement_list (TREE_OPERAND (wce, 0),
&TREE_OPERAND (tfe, 1));
*wce_p = tfe;
iter = tsi_start (sl);
}
}
else
tsi_next (&iter);
}
if (temp)
{
*expr_p = temp;
append_to_statement_list (body, pre_p);
return GS_OK;
}
else
{
*expr_p = body;
return GS_ALL_DONE;
}
}
/* Insert a cleanup marker for gimplify_cleanup_point_expr. CLEANUP
is the cleanup action required. */
static void
gimple_push_cleanup (tree var, tree cleanup, bool eh_only, tree *pre_p)
{
tree wce;
/* Errors can result in improperly nested cleanups. Which results in
confusion when trying to resolve the WITH_CLEANUP_EXPR. */
if (errorcount || sorrycount)
return;
if (gimple_conditional_context ())
{
/* If we're in a conditional context, this is more complex. We only
want to run the cleanup if we actually ran the initialization that
necessitates it, but we want to run it after the end of the
conditional context. So we wrap the try/finally around the
condition and use a flag to determine whether or not to actually
run the destructor. Thus
test ? f(A()) : 0
becomes (approximately)
flag = 0;
try {
if (test) { A::A(temp); flag = 1; val = f(temp); }
else { val = 0; }
} finally {
if (flag) A::~A(temp);
}
val
*/
tree flag = create_tmp_var (boolean_type_node, "cleanup");
tree ffalse = build_gimple_modify_stmt (flag, boolean_false_node);
tree ftrue = build_gimple_modify_stmt (flag, boolean_true_node);
cleanup = build3 (COND_EXPR, void_type_node, flag, cleanup, NULL);
wce = build1 (WITH_CLEANUP_EXPR, void_type_node, cleanup);
append_to_statement_list (ffalse, &gimplify_ctxp->conditional_cleanups);
append_to_statement_list (wce, &gimplify_ctxp->conditional_cleanups);
append_to_statement_list (ftrue, pre_p);
/* Because of this manipulation, and the EH edges that jump
threading cannot redirect, the temporary (VAR) will appear
to be used uninitialized. Don't warn. */
TREE_NO_WARNING (var) = 1;
}
else
{
wce = build1 (WITH_CLEANUP_EXPR, void_type_node, cleanup);
CLEANUP_EH_ONLY (wce) = eh_only;
append_to_statement_list (wce, pre_p);
}
gimplify_stmt (&TREE_OPERAND (wce, 0));
}
/* Gimplify a TARGET_EXPR which doesn't appear on the rhs of an INIT_EXPR. */
static enum gimplify_status
gimplify_target_expr (tree *expr_p, tree *pre_p, tree *post_p)
{
tree targ = *expr_p;
tree temp = TARGET_EXPR_SLOT (targ);
tree init = TARGET_EXPR_INITIAL (targ);
enum gimplify_status ret;
if (init)
{
/* TARGET_EXPR temps aren't part of the enclosing block, so add it
to the temps list. Handle also variable length TARGET_EXPRs. */
if (TREE_CODE (DECL_SIZE (temp)) != INTEGER_CST)
{
if (!TYPE_SIZES_GIMPLIFIED (TREE_TYPE (temp)))
gimplify_type_sizes (TREE_TYPE (temp), pre_p);
gimplify_vla_decl (temp, pre_p);
}
else
gimple_add_tmp_var (temp);
/* If TARGET_EXPR_INITIAL is void, then the mere evaluation of the
expression is supposed to initialize the slot. */
if (VOID_TYPE_P (TREE_TYPE (init)))
ret = gimplify_expr (&init, pre_p, post_p, is_gimple_stmt, fb_none);
else
{
init = build2 (INIT_EXPR, void_type_node, temp, init);
ret = gimplify_expr (&init, pre_p, post_p, is_gimple_stmt,
fb_none);
}
if (ret == GS_ERROR)
{
/* PR c++/28266 Make sure this is expanded only once. */
TARGET_EXPR_INITIAL (targ) = NULL_TREE;
return GS_ERROR;
}
append_to_statement_list (init, pre_p);
/* If needed, push the cleanup for the temp. */
if (TARGET_EXPR_CLEANUP (targ))
{
gimplify_stmt (&TARGET_EXPR_CLEANUP (targ));
gimple_push_cleanup (temp, TARGET_EXPR_CLEANUP (targ),
CLEANUP_EH_ONLY (targ), pre_p);
}
/* Only expand this once. */
TREE_OPERAND (targ, 3) = init;
TARGET_EXPR_INITIAL (targ) = NULL_TREE;
}
else
/* We should have expanded this before. */
gcc_assert (DECL_SEEN_IN_BIND_EXPR_P (temp));
*expr_p = temp;
return GS_OK;
}
/* Gimplification of expression trees. */
/* Gimplify an expression which appears at statement context; usually, this
means replacing it with a suitably gimple STATEMENT_LIST. */
void
gimplify_stmt (tree *stmt_p)
{
gimplify_expr (stmt_p, NULL, NULL, is_gimple_stmt, fb_none);
}
/* Similarly, but force the result to be a STATEMENT_LIST. */
void
gimplify_to_stmt_list (tree *stmt_p)
{
gimplify_stmt (stmt_p);
if (!*stmt_p)
*stmt_p = alloc_stmt_list ();
else if (TREE_CODE (*stmt_p) != STATEMENT_LIST)
{
tree t = *stmt_p;
*stmt_p = alloc_stmt_list ();
append_to_statement_list (t, stmt_p);
}
}
/* Add FIRSTPRIVATE entries for DECL in the OpenMP the surrounding parallels
to CTX. If entries already exist, force them to be some flavor of private.
If there is no enclosing parallel, do nothing. */
void
omp_firstprivatize_variable (struct gimplify_omp_ctx *ctx, tree decl)
{
splay_tree_node n;
if (decl == NULL || !DECL_P (decl))
return;
do
{
n = splay_tree_lookup (ctx->variables, (splay_tree_key)decl);
if (n != NULL)
{
if (n->value & GOVD_SHARED)
n->value = GOVD_FIRSTPRIVATE | (n->value & GOVD_SEEN);
else
return;
}
else if (ctx->is_parallel)
omp_add_variable (ctx, decl, GOVD_FIRSTPRIVATE);
ctx = ctx->outer_context;
}
while (ctx);
}
/* Similarly for each of the type sizes of TYPE. */
static void
omp_firstprivatize_type_sizes (struct gimplify_omp_ctx *ctx, tree type)
{
if (type == NULL || type == error_mark_node)
return;
type = TYPE_MAIN_VARIANT (type);
if (pointer_set_insert (ctx->privatized_types, type))
return;
switch (TREE_CODE (type))
{
case INTEGER_TYPE:
case ENUMERAL_TYPE:
case BOOLEAN_TYPE:
case REAL_TYPE:
case FIXED_POINT_TYPE:
omp_firstprivatize_variable (ctx, TYPE_MIN_VALUE (type));
omp_firstprivatize_variable (ctx, TYPE_MAX_VALUE (type));
break;
case ARRAY_TYPE:
omp_firstprivatize_type_sizes (ctx, TREE_TYPE (type));
omp_firstprivatize_type_sizes (ctx, TYPE_DOMAIN (type));
break;
case RECORD_TYPE:
case UNION_TYPE:
case QUAL_UNION_TYPE:
{
tree field;
for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
if (TREE_CODE (field) == FIELD_DECL)
{
omp_firstprivatize_variable (ctx, DECL_FIELD_OFFSET (field));
omp_firstprivatize_type_sizes (ctx, TREE_TYPE (field));
}
}
break;
case POINTER_TYPE:
case REFERENCE_TYPE:
omp_firstprivatize_type_sizes (ctx, TREE_TYPE (type));
break;
default:
break;
}
omp_firstprivatize_variable (ctx, TYPE_SIZE (type));
omp_firstprivatize_variable (ctx, TYPE_SIZE_UNIT (type));
lang_hooks.types.omp_firstprivatize_type_sizes (ctx, type);
}
/* Add an entry for DECL in the OpenMP context CTX with FLAGS. */
static void
omp_add_variable (struct gimplify_omp_ctx *ctx, tree decl, unsigned int flags)
{
splay_tree_node n;
unsigned int nflags;
tree t;
if (decl == error_mark_node || TREE_TYPE (decl) == error_mark_node)
return;
/* Never elide decls whose type has TREE_ADDRESSABLE set. This means
there are constructors involved somewhere. */
if (TREE_ADDRESSABLE (TREE_TYPE (decl))
|| TYPE_NEEDS_CONSTRUCTING (TREE_TYPE (decl)))
flags |= GOVD_SEEN;
n = splay_tree_lookup (ctx->variables, (splay_tree_key)decl);
if (n != NULL)
{
/* We shouldn't be re-adding the decl with the same data
sharing class. */
gcc_assert ((n->value & GOVD_DATA_SHARE_CLASS & flags) == 0);
/* The only combination of data sharing classes we should see is
FIRSTPRIVATE and LASTPRIVATE. */
nflags = n->value | flags;
gcc_assert ((nflags & GOVD_DATA_SHARE_CLASS)
== (GOVD_FIRSTPRIVATE | GOVD_LASTPRIVATE));
n->value = nflags;
return;
}
/* When adding a variable-sized variable, we have to handle all sorts
of additional bits of data: the pointer replacement variable, and
the parameters of the type. */
if (DECL_SIZE (decl) && TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
{
/* Add the pointer replacement variable as PRIVATE if the variable
replacement is private, else FIRSTPRIVATE since we'll need the
address of the original variable either for SHARED, or for the
copy into or out of the context. */
if (!(flags & GOVD_LOCAL))
{
nflags = flags & GOVD_PRIVATE ? GOVD_PRIVATE : GOVD_FIRSTPRIVATE;
nflags |= flags & GOVD_SEEN;
t = DECL_VALUE_EXPR (decl);
gcc_assert (TREE_CODE (t) == INDIRECT_REF);
t = TREE_OPERAND (t, 0);
gcc_assert (DECL_P (t));
omp_add_variable (ctx, t, nflags);
}
/* Add all of the variable and type parameters (which should have
been gimplified to a formal temporary) as FIRSTPRIVATE. */
omp_firstprivatize_variable (ctx, DECL_SIZE_UNIT (decl));
omp_firstprivatize_variable (ctx, DECL_SIZE (decl));
omp_firstprivatize_type_sizes (ctx, TREE_TYPE (decl));
/* The variable-sized variable itself is never SHARED, only some form
of PRIVATE. The sharing would take place via the pointer variable
which we remapped above. */
if (flags & GOVD_SHARED)
flags = GOVD_PRIVATE | GOVD_DEBUG_PRIVATE
| (flags & (GOVD_SEEN | GOVD_EXPLICIT));
/* We're going to make use of the TYPE_SIZE_UNIT at least in the
alloca statement we generate for the variable, so make sure it
is available. This isn't automatically needed for the SHARED
case, since we won't be allocating local storage then.
For local variables TYPE_SIZE_UNIT might not be gimplified yet,
in this case omp_notice_variable will be called later
on when it is gimplified. */
else if (! (flags & GOVD_LOCAL))
omp_notice_variable (ctx, TYPE_SIZE_UNIT (TREE_TYPE (decl)), true);
}
else if (lang_hooks.decls.omp_privatize_by_reference (decl))
{
gcc_assert ((flags & GOVD_LOCAL) == 0);
omp_firstprivatize_type_sizes (ctx, TREE_TYPE (decl));
/* Similar to the direct variable sized case above, we'll need the
size of references being privatized. */
if ((flags & GOVD_SHARED) == 0)
{
t = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (decl)));
if (TREE_CODE (t) != INTEGER_CST)
omp_notice_variable (ctx, t, true);
}
}
splay_tree_insert (ctx->variables, (splay_tree_key)decl, flags);
}
/* Record the fact that DECL was used within the OpenMP context CTX.
IN_CODE is true when real code uses DECL, and false when we should
merely emit default(none) errors. Return true if DECL is going to
be remapped and thus DECL shouldn't be gimplified into its
DECL_VALUE_EXPR (if any). */
static bool
omp_notice_variable (struct gimplify_omp_ctx *ctx, tree decl, bool in_code)
{
splay_tree_node n;
unsigned flags = in_code ? GOVD_SEEN : 0;
bool ret = false, shared;
if (decl == error_mark_node || TREE_TYPE (decl) == error_mark_node)
return false;
/* Threadprivate variables are predetermined. */
if (is_global_var (decl))
{
if (DECL_THREAD_LOCAL_P (decl))
return false;
if (DECL_HAS_VALUE_EXPR_P (decl))
{
tree value = get_base_address (DECL_VALUE_EXPR (decl));
if (value && DECL_P (value) && DECL_THREAD_LOCAL_P (value))
return false;
}
}
n = splay_tree_lookup (ctx->variables, (splay_tree_key)decl);
if (n == NULL)
{
enum omp_clause_default_kind default_kind, kind;
if (!ctx->is_parallel)
goto do_outer;
/* ??? Some compiler-generated variables (like SAVE_EXPRs) could be
remapped firstprivate instead of shared. To some extent this is
addressed in omp_firstprivatize_type_sizes, but not effectively. */
default_kind = ctx->default_kind;
kind = lang_hooks.decls.omp_predetermined_sharing (decl);
if (kind != OMP_CLAUSE_DEFAULT_UNSPECIFIED)
default_kind = kind;
switch (default_kind)
{
case OMP_CLAUSE_DEFAULT_NONE:
error ("%qs not specified in enclosing parallel",
IDENTIFIER_POINTER (DECL_NAME (decl)));
error ("%Henclosing parallel", &ctx->location);
/* FALLTHRU */
case OMP_CLAUSE_DEFAULT_SHARED:
flags |= GOVD_SHARED;
break;
case OMP_CLAUSE_DEFAULT_PRIVATE:
flags |= GOVD_PRIVATE;
break;
default:
gcc_unreachable ();
}
omp_add_variable (ctx, decl, flags);
shared = (flags & GOVD_SHARED) != 0;
ret = lang_hooks.decls.omp_disregard_value_expr (decl, shared);
goto do_outer;
}
shared = ((flags | n->value) & GOVD_SHARED) != 0;
ret = lang_hooks.decls.omp_disregard_value_expr (decl, shared);
/* If nothing changed, there's nothing left to do. */
if ((n->value & flags) == flags)
return ret;
flags |= n->value;
n->value = flags;
do_outer:
/* If the variable is private in the current context, then we don't
need to propagate anything to an outer context. */
if (flags & GOVD_PRIVATE)
return ret;
if (ctx->outer_context
&& omp_notice_variable (ctx->outer_context, decl, in_code))
return true;
return ret;
}
/* Verify that DECL is private within CTX. If there's specific information
to the contrary in the innermost scope, generate an error. */
static bool
omp_is_private (struct gimplify_omp_ctx *ctx, tree decl)
{
splay_tree_node n;
n = splay_tree_lookup (ctx->variables, (splay_tree_key)decl);
if (n != NULL)
{
if (n->value & GOVD_SHARED)
{
if (ctx == gimplify_omp_ctxp)
{
error ("iteration variable %qs should be private",
IDENTIFIER_POINTER (DECL_NAME (decl)));
n->value = GOVD_PRIVATE;
return true;
}
else
return false;
}
else if ((n->value & GOVD_EXPLICIT) != 0
&& (ctx == gimplify_omp_ctxp
|| (ctx->is_combined_parallel
&& gimplify_omp_ctxp->outer_context == ctx)))
{
if ((n->value & GOVD_FIRSTPRIVATE) != 0)
error ("iteration variable %qs should not be firstprivate",
IDENTIFIER_POINTER (DECL_NAME (decl)));
else if ((n->value & GOVD_REDUCTION) != 0)
error ("iteration variable %qs should not be reduction",
IDENTIFIER_POINTER (DECL_NAME (decl)));
}
return true;
}
if (ctx->is_parallel)
return false;
else if (ctx->outer_context)
return omp_is_private (ctx->outer_context, decl);
else
return !is_global_var (decl);
}
/* Return true if DECL is private within a parallel region
that binds to the current construct's context or in parallel
region's REDUCTION clause. */
static bool
omp_check_private (struct gimplify_omp_ctx *ctx, tree decl)
{
splay_tree_node n;
do
{
ctx = ctx->outer_context;
if (ctx == NULL)
return !(is_global_var (decl)
/* References might be private, but might be shared too. */
|| lang_hooks.decls.omp_privatize_by_reference (decl));
n = splay_tree_lookup (ctx->variables, (splay_tree_key) decl);
if (n != NULL)
return (n->value & GOVD_SHARED) == 0;
}
while (!ctx->is_parallel);
return false;
}
/* Scan the OpenMP clauses in *LIST_P, installing mappings into a new
and previous omp contexts. */
static void
gimplify_scan_omp_clauses (tree *list_p, tree *pre_p, bool in_parallel,
bool in_combined_parallel)
{
struct gimplify_omp_ctx *ctx, *outer_ctx;
tree c;
ctx = new_omp_context (in_parallel, in_combined_parallel);
outer_ctx = ctx->outer_context;
while ((c = *list_p) != NULL)
{
enum gimplify_status gs;
bool remove = false;
bool notice_outer = true;
const char *check_non_private = NULL;
unsigned int flags;
tree decl;
switch (OMP_CLAUSE_CODE (c))
{
case OMP_CLAUSE_PRIVATE:
flags = GOVD_PRIVATE | GOVD_EXPLICIT;
notice_outer = false;
goto do_add;
case OMP_CLAUSE_SHARED:
flags = GOVD_SHARED | GOVD_EXPLICIT;
goto do_add;
case OMP_CLAUSE_FIRSTPRIVATE:
flags = GOVD_FIRSTPRIVATE | GOVD_EXPLICIT;
check_non_private = "firstprivate";
goto do_add;
case OMP_CLAUSE_LASTPRIVATE:
flags = GOVD_LASTPRIVATE | GOVD_SEEN | GOVD_EXPLICIT;
check_non_private = "lastprivate";
goto do_add;
case OMP_CLAUSE_REDUCTION:
flags = GOVD_REDUCTION | GOVD_SEEN | GOVD_EXPLICIT;
check_non_private = "reduction";
goto do_add;
do_add:
decl = OMP_CLAUSE_DECL (c);
if (decl == error_mark_node || TREE_TYPE (decl) == error_mark_node)
{
remove = true;
break;
}
omp_add_variable (ctx, decl, flags);
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
&& OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
{
omp_add_variable (ctx, OMP_CLAUSE_REDUCTION_PLACEHOLDER (c),
GOVD_LOCAL | GOVD_SEEN);
gimplify_omp_ctxp = ctx;
push_gimplify_context ();
gimplify_stmt (&OMP_CLAUSE_REDUCTION_INIT (c));
pop_gimplify_context (OMP_CLAUSE_REDUCTION_INIT (c));
push_gimplify_context ();
gimplify_stmt (&OMP_CLAUSE_REDUCTION_MERGE (c));
pop_gimplify_context (OMP_CLAUSE_REDUCTION_MERGE (c));
gimplify_omp_ctxp = outer_ctx;
}
if (notice_outer)
goto do_notice;
break;
case OMP_CLAUSE_COPYIN:
case OMP_CLAUSE_COPYPRIVATE:
decl = OMP_CLAUSE_DECL (c);
if (decl == error_mark_node || TREE_TYPE (decl) == error_mark_node)
{
remove = true;
break;
}
do_notice:
if (outer_ctx)
omp_notice_variable (outer_ctx, decl, true);
if (check_non_private
&& !in_parallel
&& omp_check_private (ctx, decl))
{
error ("%s variable %qs is private in outer context",
check_non_private, IDENTIFIER_POINTER (DECL_NAME (decl)));
remove = true;
}
break;
case OMP_CLAUSE_IF:
OMP_CLAUSE_OPERAND (c, 0)
= gimple_boolify (OMP_CLAUSE_OPERAND (c, 0));
/* Fall through. */
case OMP_CLAUSE_SCHEDULE:
case OMP_CLAUSE_NUM_THREADS:
gs = gimplify_expr (&OMP_CLAUSE_OPERAND (c, 0), pre_p, NULL,
is_gimple_val, fb_rvalue);
if (gs == GS_ERROR)
remove = true;
break;
case OMP_CLAUSE_NOWAIT:
case OMP_CLAUSE_ORDERED:
break;
case OMP_CLAUSE_DEFAULT:
ctx->default_kind = OMP_CLAUSE_DEFAULT_KIND (c);
break;
default:
gcc_unreachable ();
}
if (remove)
*list_p = OMP_CLAUSE_CHAIN (c);
else
list_p = &OMP_CLAUSE_CHAIN (c);
}
gimplify_omp_ctxp = ctx;
}
/* For all variables that were not actually used within the context,
remove PRIVATE, SHARED, and FIRSTPRIVATE clauses. */
static int
gimplify_adjust_omp_clauses_1 (splay_tree_node n, void *data)
{
tree *list_p = (tree *) data;
tree decl = (tree) n->key;
unsigned flags = n->value;
enum omp_clause_code code;
tree clause;
bool private_debug;
if (flags & (GOVD_EXPLICIT | GOVD_LOCAL))
return 0;
if ((flags & GOVD_SEEN) == 0)
return 0;
if (flags & GOVD_DEBUG_PRIVATE)
{
gcc_assert ((flags & GOVD_DATA_SHARE_CLASS) == GOVD_PRIVATE);
private_debug = true;
}
else
private_debug
= lang_hooks.decls.omp_private_debug_clause (decl,
!!(flags & GOVD_SHARED));
if (private_debug)
code = OMP_CLAUSE_PRIVATE;
else if (flags & GOVD_SHARED)
{
if (is_global_var (decl))
{
struct gimplify_omp_ctx *ctx = gimplify_omp_ctxp->outer_context;
while (ctx != NULL)
{
splay_tree_node on
= splay_tree_lookup (ctx->variables, (splay_tree_key) decl);
if (on && (on->value & (GOVD_FIRSTPRIVATE | GOVD_LASTPRIVATE
| GOVD_PRIVATE | GOVD_REDUCTION)) != 0)
break;
ctx = ctx->outer_context;
}
if (ctx == NULL)
return 0;
}
code = OMP_CLAUSE_SHARED;
}
else if (flags & GOVD_PRIVATE)
code = OMP_CLAUSE_PRIVATE;
else if (flags & GOVD_FIRSTPRIVATE)
code = OMP_CLAUSE_FIRSTPRIVATE;
else
gcc_unreachable ();
clause = build_omp_clause (code);
OMP_CLAUSE_DECL (clause) = decl;
OMP_CLAUSE_CHAIN (clause) = *list_p;
if (private_debug)
OMP_CLAUSE_PRIVATE_DEBUG (clause) = 1;
*list_p = clause;
return 0;
}
static void
gimplify_adjust_omp_clauses (tree *list_p)
{
struct gimplify_omp_ctx *ctx = gimplify_omp_ctxp;
tree c, decl;
while ((c = *list_p) != NULL)
{
splay_tree_node n;
bool remove = false;
switch (OMP_CLAUSE_CODE (c))
{
case OMP_CLAUSE_PRIVATE:
case OMP_CLAUSE_SHARED:
case OMP_CLAUSE_FIRSTPRIVATE:
decl = OMP_CLAUSE_DECL (c);
n = splay_tree_lookup (ctx->variables, (splay_tree_key) decl);
remove = !(n->value & GOVD_SEEN);
if (! remove)
{
bool shared = OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED;
if ((n->value & GOVD_DEBUG_PRIVATE)
|| lang_hooks.decls.omp_private_debug_clause (decl, shared))
{
gcc_assert ((n->value & GOVD_DEBUG_PRIVATE) == 0
|| ((n->value & GOVD_DATA_SHARE_CLASS)
== GOVD_PRIVATE));
OMP_CLAUSE_SET_CODE (c, OMP_CLAUSE_PRIVATE);
OMP_CLAUSE_PRIVATE_DEBUG (c) = 1;
}
}
break;
case OMP_CLAUSE_LASTPRIVATE:
/* Make sure OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE is set to
accurately reflect the presence of a FIRSTPRIVATE clause. */
decl = OMP_CLAUSE_DECL (c);
n = splay_tree_lookup (ctx->variables, (splay_tree_key) decl);
OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c)
= (n->value & GOVD_FIRSTPRIVATE) != 0;
break;
case OMP_CLAUSE_REDUCTION:
case OMP_CLAUSE_COPYIN:
case OMP_CLAUSE_COPYPRIVATE:
case OMP_CLAUSE_IF:
case OMP_CLAUSE_NUM_THREADS:
case OMP_CLAUSE_SCHEDULE:
case OMP_CLAUSE_NOWAIT:
case OMP_CLAUSE_ORDERED:
case OMP_CLAUSE_DEFAULT:
break;
default:
gcc_unreachable ();
}
if (remove)
*list_p = OMP_CLAUSE_CHAIN (c);
else
list_p = &OMP_CLAUSE_CHAIN (c);
}
/* Add in any implicit data sharing. */
splay_tree_foreach (ctx->variables, gimplify_adjust_omp_clauses_1, list_p);
gimplify_omp_ctxp = ctx->outer_context;
delete_omp_context (ctx);
}
/* Gimplify the contents of an OMP_PARALLEL statement. This involves
gimplification of the body, as well as scanning the body for used
variables. We need to do this scan now, because variable-sized
decls will be decomposed during gimplification. */
static enum gimplify_status
gimplify_omp_parallel (tree *expr_p, tree *pre_p)
{
tree expr = *expr_p;
gimplify_scan_omp_clauses (&OMP_PARALLEL_CLAUSES (expr), pre_p, true,
OMP_PARALLEL_COMBINED (expr));
push_gimplify_context ();
gimplify_stmt (&OMP_PARALLEL_BODY (expr));
if (TREE_CODE (OMP_PARALLEL_BODY (expr)) == BIND_EXPR)
pop_gimplify_context (OMP_PARALLEL_BODY (expr));
else
pop_gimplify_context (NULL_TREE);
gimplify_adjust_omp_clauses (&OMP_PARALLEL_CLAUSES (expr));
return GS_ALL_DONE;
}
/* Gimplify the gross structure of an OMP_FOR statement. */
static enum gimplify_status
gimplify_omp_for (tree *expr_p, tree *pre_p)
{
tree for_stmt, decl, var, t;
enum gimplify_status ret = GS_OK;
tree body, init_decl = NULL_TREE;
for_stmt = *expr_p;
gimplify_scan_omp_clauses (&OMP_FOR_CLAUSES (for_stmt), pre_p, false, false);
t = OMP_FOR_INIT (for_stmt);
gcc_assert (TREE_CODE (t) == MODIFY_EXPR
|| TREE_CODE (t) == GIMPLE_MODIFY_STMT);
decl = GENERIC_TREE_OPERAND (t, 0);
gcc_assert (DECL_P (decl));
gcc_assert (INTEGRAL_TYPE_P (TREE_TYPE (decl)));
/* Make sure the iteration variable is private. */
if (omp_is_private (gimplify_omp_ctxp, decl))
omp_notice_variable (gimplify_omp_ctxp, decl, true);
else
omp_add_variable (gimplify_omp_ctxp, decl, GOVD_PRIVATE | GOVD_SEEN);
/* If DECL is not a gimple register, create a temporary variable to act as an
iteration counter. This is valid, since DECL cannot be modified in the
body of the loop. */
if (!is_gimple_reg (decl))
{
var = create_tmp_var (TREE_TYPE (decl), get_name (decl));
GENERIC_TREE_OPERAND (t, 0) = var;
init_decl = build_gimple_modify_stmt (decl, var);
omp_add_variable (gimplify_omp_ctxp, var, GOVD_PRIVATE | GOVD_SEEN);
}
else
var = decl;
/* If OMP_FOR is re-gimplified, ensure all variables in pre-body
are noticed. */
gimplify_stmt (&OMP_FOR_PRE_BODY (for_stmt));
ret |= gimplify_expr (&GENERIC_TREE_OPERAND (t, 1),
&OMP_FOR_PRE_BODY (for_stmt),
NULL, is_gimple_val, fb_rvalue);
tree_to_gimple_tuple (&OMP_FOR_INIT (for_stmt));
t = OMP_FOR_COND (for_stmt);
gcc_assert (COMPARISON_CLASS_P (t));
gcc_assert (GENERIC_TREE_OPERAND (t, 0) == decl);
TREE_OPERAND (t, 0) = var;
ret |= gimplify_expr (&GENERIC_TREE_OPERAND (t, 1),
&OMP_FOR_PRE_BODY (for_stmt),
NULL, is_gimple_val, fb_rvalue);
tree_to_gimple_tuple (&OMP_FOR_INCR (for_stmt));
t = OMP_FOR_INCR (for_stmt);
switch (TREE_CODE (t))
{
case PREINCREMENT_EXPR:
case POSTINCREMENT_EXPR:
t = build_int_cst (TREE_TYPE (decl), 1);
t = build2 (PLUS_EXPR, TREE_TYPE (decl), var, t);
t = build_gimple_modify_stmt (var, t);
OMP_FOR_INCR (for_stmt) = t;
break;
case PREDECREMENT_EXPR:
case POSTDECREMENT_EXPR:
t = build_int_cst (TREE_TYPE (decl), -1);
t = build2 (PLUS_EXPR, TREE_TYPE (decl), var, t);
t = build_gimple_modify_stmt (var, t);
OMP_FOR_INCR (for_stmt) = t;
break;
case GIMPLE_MODIFY_STMT:
gcc_assert (GIMPLE_STMT_OPERAND (t, 0) == decl);
GIMPLE_STMT_OPERAND (t, 0) = var;
t = GIMPLE_STMT_OPERAND (t, 1);
switch (TREE_CODE (t))
{
case PLUS_EXPR:
if (TREE_OPERAND (t, 1) == decl)
{
TREE_OPERAND (t, 1) = TREE_OPERAND (t, 0);
TREE_OPERAND (t, 0) = var;
break;
}
/* Fallthru. */
case MINUS_EXPR:
gcc_assert (TREE_OPERAND (t, 0) == decl);
TREE_OPERAND (t, 0) = var;
break;
default:
gcc_unreachable ();
}
ret |= gimplify_expr (&TREE_OPERAND (t, 1), &OMP_FOR_PRE_BODY (for_stmt),
NULL, is_gimple_val, fb_rvalue);
break;
default:
gcc_unreachable ();
}
body = OMP_FOR_BODY (for_stmt);
gimplify_to_stmt_list (&body);
t = alloc_stmt_list ();
if (init_decl)
append_to_statement_list (init_decl, &t);
append_to_statement_list (body, &t);
OMP_FOR_BODY (for_stmt) = t;
gimplify_adjust_omp_clauses (&OMP_FOR_CLAUSES (for_stmt));
return ret == GS_ALL_DONE ? GS_ALL_DONE : GS_ERROR;
}
/* Gimplify the gross structure of other OpenMP worksharing constructs.
In particular, OMP_SECTIONS and OMP_SINGLE. */
static enum gimplify_status
gimplify_omp_workshare (tree *expr_p, tree *pre_p)
{
tree stmt = *expr_p;
gimplify_scan_omp_clauses (&OMP_CLAUSES (stmt), pre_p, false, false);
gimplify_to_stmt_list (&OMP_BODY (stmt));
gimplify_adjust_omp_clauses (&OMP_CLAUSES (stmt));
return GS_ALL_DONE;
}
/* A subroutine of gimplify_omp_atomic. The front end is supposed to have
stabilized the lhs of the atomic operation as *ADDR. Return true if
EXPR is this stabilized form. */
static bool
goa_lhs_expr_p (tree expr, tree addr)
{
/* Also include casts to other type variants. The C front end is fond
of adding these for e.g. volatile variables. This is like
STRIP_TYPE_NOPS but includes the main variant lookup. */
while ((TREE_CODE (expr) == NOP_EXPR
|| TREE_CODE (expr) == CONVERT_EXPR
|| TREE_CODE (expr) == NON_LVALUE_EXPR)
&& TREE_OPERAND (expr, 0) != error_mark_node
&& (TYPE_MAIN_VARIANT (TREE_TYPE (expr))
== TYPE_MAIN_VARIANT (TREE_TYPE (TREE_OPERAND (expr, 0)))))
expr = TREE_OPERAND (expr, 0);
if (TREE_CODE (expr) == INDIRECT_REF)
{
expr = TREE_OPERAND (expr, 0);
while (expr != addr
&& (TREE_CODE (expr) == NOP_EXPR
|| TREE_CODE (expr) == CONVERT_EXPR
|| TREE_CODE (expr) == NON_LVALUE_EXPR)
&& TREE_CODE (expr) == TREE_CODE (addr)
&& TYPE_MAIN_VARIANT (TREE_TYPE (expr))
== TYPE_MAIN_VARIANT (TREE_TYPE (addr)))
{
expr = TREE_OPERAND (expr, 0);
addr = TREE_OPERAND (addr, 0);
}
if (expr == addr)
return true;
return (TREE_CODE (addr) == ADDR_EXPR
&& TREE_CODE (expr) == ADDR_EXPR
&& TREE_OPERAND (addr, 0) == TREE_OPERAND (expr, 0));
}
if (TREE_CODE (addr) == ADDR_EXPR && expr == TREE_OPERAND (addr, 0))
return true;
return false;
}
/* Walk *EXPR_P and replace
appearances of *LHS_ADDR with LHS_VAR. If an expression does not involve
the lhs, evaluate it into a temporary. Return 1 if the lhs appeared as
a subexpression, 0 if it did not, or -1 if an error was encountered. */
static int
goa_stabilize_expr (tree *expr_p, tree *pre_p, tree lhs_addr, tree lhs_var)
{
tree expr = *expr_p;
int saw_lhs;
if (goa_lhs_expr_p (expr, lhs_addr))
{
*expr_p = lhs_var;
return 1;
}
if (is_gimple_val (expr))
return 0;
saw_lhs = 0;
switch (TREE_CODE_CLASS (TREE_CODE (expr)))
{
case tcc_binary:
saw_lhs |= goa_stabilize_expr (&TREE_OPERAND (expr, 1), pre_p,
lhs_addr, lhs_var);
case tcc_unary:
saw_lhs |= goa_stabilize_expr (&TREE_OPERAND (expr, 0), pre_p,
lhs_addr, lhs_var);
break;
default:
break;
}
if (saw_lhs == 0)
{
enum gimplify_status gs;
gs = gimplify_expr (expr_p, pre_p, NULL, is_gimple_val, fb_rvalue);
if (gs != GS_ALL_DONE)
saw_lhs = -1;
}
return saw_lhs;
}
/* Gimplify an OMP_ATOMIC statement. */
static enum gimplify_status
gimplify_omp_atomic (tree *expr_p, tree *pre_p)
{
tree addr = TREE_OPERAND (*expr_p, 0);
tree rhs = TREE_OPERAND (*expr_p, 1);
tree type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
tree tmp_load, load, store;
tmp_load = create_tmp_var (type, NULL);
if (goa_stabilize_expr (&rhs, pre_p, addr, tmp_load) < 0)
return GS_ERROR;
if (gimplify_expr (&addr, pre_p, NULL, is_gimple_val, fb_rvalue)
!= GS_ALL_DONE)
return GS_ERROR;
load = build2 (OMP_ATOMIC_LOAD, void_type_node, tmp_load, addr);
append_to_statement_list (load, pre_p);
if (gimplify_expr (&rhs, pre_p, NULL, is_gimple_val, fb_rvalue)
!= GS_ALL_DONE)
return GS_ERROR;
store = build1 (OMP_ATOMIC_STORE, void_type_node, rhs);
*expr_p = store;
return GS_ALL_DONE;
}
/* Gimplifies the expression tree pointed to by EXPR_P. Return 0 if
gimplification failed.
PRE_P points to the list where side effects that must happen before
EXPR should be stored.
POST_P points to the list where side effects that must happen after
EXPR should be stored, or NULL if there is no suitable list. In
that case, we copy the result to a temporary, emit the
post-effects, and then return the temporary.
GIMPLE_TEST_F points to a function that takes a tree T and
returns nonzero if T is in the GIMPLE form requested by the
caller. The GIMPLE predicates are in tree-gimple.c.
This test is used twice. Before gimplification, the test is
invoked to determine whether *EXPR_P is already gimple enough. If
that fails, *EXPR_P is gimplified according to its code and
GIMPLE_TEST_F is called again. If the test still fails, then a new
temporary variable is created and assigned the value of the
gimplified expression.
FALLBACK tells the function what sort of a temporary we want. If the 1
bit is set, an rvalue is OK. If the 2 bit is set, an lvalue is OK.
If both are set, either is OK, but an lvalue is preferable.
The return value is either GS_ERROR or GS_ALL_DONE, since this function
iterates until solution. */
enum gimplify_status
gimplify_expr (tree *expr_p, tree *pre_p, tree *post_p,
bool (* gimple_test_f) (tree), fallback_t fallback)
{
tree tmp;
tree internal_pre = NULL_TREE;
tree internal_post = NULL_TREE;
tree save_expr;
int is_statement = (pre_p == NULL);
location_t saved_location;
enum gimplify_status ret;
save_expr = *expr_p;
if (save_expr == NULL_TREE)
return GS_ALL_DONE;
/* We used to check the predicate here and return immediately if it
succeeds. This is wrong; the design is for gimplification to be
idempotent, and for the predicates to only test for valid forms, not
whether they are fully simplified. */
/* Set up our internal queues if needed. */
if (pre_p == NULL)
pre_p = &internal_pre;
if (post_p == NULL)
post_p = &internal_post;
saved_location = input_location;
if (save_expr != error_mark_node
&& EXPR_HAS_LOCATION (*expr_p))
input_location = EXPR_LOCATION (*expr_p);
/* Loop over the specific gimplifiers until the toplevel node
remains the same. */
do
{
/* Strip away as many useless type conversions as possible
at the toplevel. */
STRIP_USELESS_TYPE_CONVERSION (*expr_p);
/* Remember the expr. */
save_expr = *expr_p;
/* Die, die, die, my darling. */
if (save_expr == error_mark_node
|| (!GIMPLE_STMT_P (save_expr)
&& TREE_TYPE (save_expr)
&& TREE_TYPE (save_expr) == error_mark_node))
{
ret = GS_ERROR;
break;
}
/* Do any language-specific gimplification. */
ret = lang_hooks.gimplify_expr (expr_p, pre_p, post_p);
if (ret == GS_OK)
{
if (*expr_p == NULL_TREE)
break;
if (*expr_p != save_expr)
continue;
}
else if (ret != GS_UNHANDLED)
break;
ret = GS_OK;
switch (TREE_CODE (*expr_p))
{
/* First deal with the special cases. */
case POSTINCREMENT_EXPR:
case POSTDECREMENT_EXPR:
case PREINCREMENT_EXPR:
case PREDECREMENT_EXPR:
ret = gimplify_self_mod_expr (expr_p, pre_p, post_p,
fallback != fb_none);
break;
case ARRAY_REF:
case ARRAY_RANGE_REF:
case REALPART_EXPR:
case IMAGPART_EXPR:
case COMPONENT_REF:
case VIEW_CONVERT_EXPR:
ret = gimplify_compound_lval (expr_p, pre_p, post_p,
fallback ? fallback : fb_rvalue);
break;
case COND_EXPR:
ret = gimplify_cond_expr (expr_p, pre_p, fallback);
/* C99 code may assign to an array in a structure value of a
conditional expression, and this has undefined behavior
only on execution, so create a temporary if an lvalue is
required. */
if (fallback == fb_lvalue)
{
*expr_p = get_initialized_tmp_var (*expr_p, pre_p, post_p);
mark_addressable (*expr_p);
}
break;
case CALL_EXPR:
ret = gimplify_call_expr (expr_p, pre_p, fallback != fb_none);
/* C99 code may assign to an array in a structure returned
from a function, and this has undefined behavior only on
execution, so create a temporary if an lvalue is
required. */
if (fallback == fb_lvalue)
{
*expr_p = get_initialized_tmp_var (*expr_p, pre_p, post_p);
mark_addressable (*expr_p);
}
break;
case STATIC_CHAIN_EXPR:
/* The argument is used as information only. No need to gimplify */
ret = GS_ALL_DONE;
break;
case TREE_LIST:
gcc_unreachable ();
case COMPOUND_EXPR:
ret = gimplify_compound_expr (expr_p, pre_p, fallback != fb_none);
break;
case MODIFY_EXPR:
case GIMPLE_MODIFY_STMT:
case INIT_EXPR:
ret = gimplify_modify_expr (expr_p, pre_p, post_p,
fallback != fb_none);
if (*expr_p && ret != GS_ERROR)
{
/* The distinction between MODIFY_EXPR and INIT_EXPR is no longer
useful. */
if (TREE_CODE (*expr_p) == INIT_EXPR)
TREE_SET_CODE (*expr_p, MODIFY_EXPR);
/* Convert MODIFY_EXPR to GIMPLE_MODIFY_STMT. */
if (TREE_CODE (*expr_p) == MODIFY_EXPR)
tree_to_gimple_tuple (expr_p);
}
break;
case TRUTH_ANDIF_EXPR:
case TRUTH_ORIF_EXPR:
ret = gimplify_boolean_expr (expr_p);
break;
case TRUTH_NOT_EXPR:
if (TREE_CODE (TREE_TYPE (*expr_p)) != BOOLEAN_TYPE)
{
tree type = TREE_TYPE (*expr_p);
*expr_p = fold_convert (type, gimple_boolify (*expr_p));
ret = GS_OK;
break;
}
ret = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p,
is_gimple_val, fb_rvalue);
recalculate_side_effects (*expr_p);
break;
case ADDR_EXPR:
ret = gimplify_addr_expr (expr_p, pre_p, post_p);
break;
case VA_ARG_EXPR:
ret = gimplify_va_arg_expr (expr_p, pre_p, post_p);
break;
case CONVERT_EXPR:
case NOP_EXPR:
if (IS_EMPTY_STMT (*expr_p))
{
ret = GS_ALL_DONE;
break;
}
if (VOID_TYPE_P (TREE_TYPE (*expr_p))
|| fallback == fb_none)
{
/* Just strip a conversion to void (or in void context) and
try again. */
*expr_p = TREE_OPERAND (*expr_p, 0);
break;
}
ret = gimplify_conversion (expr_p);
if (ret == GS_ERROR)
break;
if (*expr_p != save_expr)
break;
/* FALLTHRU */
case FIX_TRUNC_EXPR:
/* unary_expr: ... | '(' cast ')' val | ... */
ret = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p,
is_gimple_val, fb_rvalue);
recalculate_side_effects (*expr_p);
break;
case INDIRECT_REF:
*expr_p = fold_indirect_ref (*expr_p);
if (*expr_p != save_expr)
break;
/* else fall through. */
case ALIGN_INDIRECT_REF:
case MISALIGNED_INDIRECT_REF:
ret = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p,
is_gimple_reg, fb_rvalue);
recalculate_side_effects (*expr_p);
break;
/* Constants need not be gimplified. */
case INTEGER_CST:
case REAL_CST:
case FIXED_CST:
case STRING_CST:
case COMPLEX_CST:
case VECTOR_CST:
ret = GS_ALL_DONE;
break;
case CONST_DECL:
/* If we require an lvalue, such as for ADDR_EXPR, retain the
CONST_DECL node. Otherwise the decl is replaceable by its
value. */
/* ??? Should be == fb_lvalue, but ADDR_EXPR passes fb_either. */
if (fallback & fb_lvalue)
ret = GS_ALL_DONE;
else
*expr_p = DECL_INITIAL (*expr_p);
break;
case DECL_EXPR:
ret = gimplify_decl_expr (expr_p);
break;
case EXC_PTR_EXPR:
/* FIXME make this a decl. */
ret = GS_ALL_DONE;
break;
case BIND_EXPR:
ret = gimplify_bind_expr (expr_p, pre_p);
break;
case LOOP_EXPR:
ret = gimplify_loop_expr (expr_p, pre_p);
break;
case SWITCH_EXPR:
ret = gimplify_switch_expr (expr_p, pre_p);
break;
case EXIT_EXPR:
ret = gimplify_exit_expr (expr_p);
break;
case GOTO_EXPR:
/* If the target is not LABEL, then it is a computed jump
and the target needs to be gimplified. */
if (TREE_CODE (GOTO_DESTINATION (*expr_p)) != LABEL_DECL)
ret = gimplify_expr (&GOTO_DESTINATION (*expr_p), pre_p,
NULL, is_gimple_val, fb_rvalue);
break;
case LABEL_EXPR:
ret = GS_ALL_DONE;
gcc_assert (decl_function_context (LABEL_EXPR_LABEL (*expr_p))
== current_function_decl);
break;
case CASE_LABEL_EXPR:
ret = gimplify_case_label_expr (expr_p);
break;
case RETURN_EXPR:
ret = gimplify_return_expr (*expr_p, pre_p);
break;
case CONSTRUCTOR:
/* Don't reduce this in place; let gimplify_init_constructor work its
magic. Buf if we're just elaborating this for side effects, just
gimplify any element that has side-effects. */
if (fallback == fb_none)
{
unsigned HOST_WIDE_INT ix;
constructor_elt *ce;
tree temp = NULL_TREE;
for (ix = 0;
VEC_iterate (constructor_elt, CONSTRUCTOR_ELTS (*expr_p),
ix, ce);
ix++)
if (TREE_SIDE_EFFECTS (ce->value))
append_to_statement_list (ce->value, &temp);
*expr_p = temp;
ret = GS_OK;
}
/* C99 code may assign to an array in a constructed
structure or union, and this has undefined behavior only
on execution, so create a temporary if an lvalue is
required. */
else if (fallback == fb_lvalue)
{
*expr_p = get_initialized_tmp_var (*expr_p, pre_p, post_p);
mark_addressable (*expr_p);
}
else
ret = GS_ALL_DONE;
break;
/* The following are special cases that are not handled by the
original GIMPLE grammar. */
/* SAVE_EXPR nodes are converted into a GIMPLE identifier and
eliminated. */
case SAVE_EXPR:
ret = gimplify_save_expr (expr_p, pre_p, post_p);
break;
case BIT_FIELD_REF:
{
enum gimplify_status r0, r1, r2;
r0 = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p,
is_gimple_lvalue, fb_either);
r1 = gimplify_expr (&TREE_OPERAND (*expr_p, 1), pre_p, post_p,
is_gimple_val, fb_rvalue);
r2 = gimplify_expr (&TREE_OPERAND (*expr_p, 2), pre_p, post_p,
is_gimple_val, fb_rvalue);
recalculate_side_effects (*expr_p);
ret = MIN (r0, MIN (r1, r2));
}
break;
case NON_LVALUE_EXPR:
/* This should have been stripped above. */
gcc_unreachable ();
case ASM_EXPR:
ret = gimplify_asm_expr (expr_p, pre_p, post_p);
break;
case TRY_FINALLY_EXPR:
case TRY_CATCH_EXPR:
gimplify_to_stmt_list (&TREE_OPERAND (*expr_p, 0));
gimplify_to_stmt_list (&TREE_OPERAND (*expr_p, 1));
ret = GS_ALL_DONE;
break;
case CLEANUP_POINT_EXPR:
ret = gimplify_cleanup_point_expr (expr_p, pre_p);
break;
case TARGET_EXPR:
ret = gimplify_target_expr (expr_p, pre_p, post_p);
break;
case CATCH_EXPR:
gimplify_to_stmt_list (&CATCH_BODY (*expr_p));
ret = GS_ALL_DONE;
break;
case EH_FILTER_EXPR:
gimplify_to_stmt_list (&EH_FILTER_FAILURE (*expr_p));
ret = GS_ALL_DONE;
break;
case CHANGE_DYNAMIC_TYPE_EXPR:
ret = gimplify_expr (&CHANGE_DYNAMIC_TYPE_LOCATION (*expr_p),
pre_p, post_p, is_gimple_reg, fb_lvalue);
break;
case OBJ_TYPE_REF:
{
enum gimplify_status r0, r1;
r0 = gimplify_expr (&OBJ_TYPE_REF_OBJECT (*expr_p), pre_p, post_p,
is_gimple_val, fb_rvalue);
r1 = gimplify_expr (&OBJ_TYPE_REF_EXPR (*expr_p), pre_p, post_p,
is_gimple_val, fb_rvalue);
ret = MIN (r0, r1);
}
break;
case LABEL_DECL:
/* We get here when taking the address of a label. We mark
the label as "forced"; meaning it can never be removed and
it is a potential target for any computed goto. */
FORCED_LABEL (*expr_p) = 1;
ret = GS_ALL_DONE;
break;
case STATEMENT_LIST:
ret = gimplify_statement_list (expr_p, pre_p);
break;
case WITH_SIZE_EXPR:
{
gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p,
post_p == &internal_post ? NULL : post_p,
gimple_test_f, fallback);
gimplify_expr (&TREE_OPERAND (*expr_p, 1), pre_p, post_p,
is_gimple_val, fb_rvalue);
}
break;
case VAR_DECL:
case PARM_DECL:
ret = gimplify_var_or_parm_decl (expr_p);
break;
case RESULT_DECL:
/* When within an OpenMP context, notice uses of variables. */
if (gimplify_omp_ctxp)
omp_notice_variable (gimplify_omp_ctxp, *expr_p, true);
ret = GS_ALL_DONE;
break;
case SSA_NAME:
/* Allow callbacks into the gimplifier during optimization. */
ret = GS_ALL_DONE;
break;
case OMP_PARALLEL:
ret = gimplify_omp_parallel (expr_p, pre_p);
break;
case OMP_FOR:
ret = gimplify_omp_for (expr_p, pre_p);
break;
case OMP_SECTIONS:
case OMP_SINGLE:
ret = gimplify_omp_workshare (expr_p, pre_p);
break;
case OMP_SECTION:
case OMP_MASTER:
case OMP_ORDERED:
case OMP_CRITICAL:
gimplify_to_stmt_list (&OMP_BODY (*expr_p));
break;
case OMP_ATOMIC:
ret = gimplify_omp_atomic (expr_p, pre_p);
break;
case OMP_RETURN:
case OMP_CONTINUE:
case OMP_ATOMIC_STORE:
ret = GS_ALL_DONE;
break;
case OMP_ATOMIC_LOAD:
if (gimplify_expr (&TREE_OPERAND (*expr_p, 1), pre_p, NULL,
is_gimple_val, fb_rvalue) != GS_ALL_DONE)
ret = GS_ERROR;
else
ret = GS_ALL_DONE;
break;
case POINTER_PLUS_EXPR:
/* Convert ((type *)A)+offset into &A->field_of_type_and_offset.
The second is gimple immediate saving a need for extra statement.
*/
if (TREE_CODE (TREE_OPERAND (*expr_p, 1)) == INTEGER_CST
&& (tmp = maybe_fold_offset_to_reference
(TREE_OPERAND (*expr_p, 0), TREE_OPERAND (*expr_p, 1),
TREE_TYPE (TREE_TYPE (*expr_p)))))
{
tree ptr_type = build_pointer_type (TREE_TYPE (tmp));
if (useless_type_conversion_p (TREE_TYPE (*expr_p), ptr_type))
{
*expr_p = build_fold_addr_expr_with_type (tmp, ptr_type);
break;
}
}
/* Convert (void *)&a + 4 into (void *)&a[1]. */
if (TREE_CODE (TREE_OPERAND (*expr_p, 0)) == NOP_EXPR
&& TREE_CODE (TREE_OPERAND (*expr_p, 1)) == INTEGER_CST
&& POINTER_TYPE_P (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (*expr_p,
0),0)))
&& (tmp = maybe_fold_offset_to_reference
(TREE_OPERAND (TREE_OPERAND (*expr_p, 0), 0),
TREE_OPERAND (*expr_p, 1),
TREE_TYPE (TREE_TYPE
(TREE_OPERAND (TREE_OPERAND (*expr_p, 0),
0))))))
{
tmp = build_fold_addr_expr (tmp);
*expr_p = fold_convert (TREE_TYPE (*expr_p), tmp);
break;
}
/* FALLTHRU */
default:
switch (TREE_CODE_CLASS (TREE_CODE (*expr_p)))
{
case tcc_comparison:
/* Handle comparison of objects of non scalar mode aggregates
with a call to memcmp. It would be nice to only have to do
this for variable-sized objects, but then we'd have to allow
the same nest of reference nodes we allow for MODIFY_EXPR and
that's too complex.
Compare scalar mode aggregates as scalar mode values. Using
memcmp for them would be very inefficient at best, and is
plain wrong if bitfields are involved. */
{
tree type = TREE_TYPE (TREE_OPERAND (*expr_p, 1));
if (!AGGREGATE_TYPE_P (type))
goto expr_2;
else if (TYPE_MODE (type) != BLKmode)
ret = gimplify_scalar_mode_aggregate_compare (expr_p);
else
ret = gimplify_variable_sized_compare (expr_p);
break;
}
/* If *EXPR_P does not need to be special-cased, handle it
according to its class. */
case tcc_unary:
ret = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p,
post_p, is_gimple_val, fb_rvalue);
break;
case tcc_binary:
expr_2:
{
enum gimplify_status r0, r1;
r0 = gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p,
post_p, is_gimple_val, fb_rvalue);
r1 = gimplify_expr (&TREE_OPERAND (*expr_p, 1), pre_p,
post_p, is_gimple_val, fb_rvalue);
ret = MIN (r0, r1);
break;
}
case tcc_declaration:
case tcc_constant:
ret = GS_ALL_DONE;
goto dont_recalculate;
default:
gcc_assert (TREE_CODE (*expr_p) == TRUTH_AND_EXPR
|| TREE_CODE (*expr_p) == TRUTH_OR_EXPR
|| TREE_CODE (*expr_p) == TRUTH_XOR_EXPR);
goto expr_2;
}
recalculate_side_effects (*expr_p);
dont_recalculate:
break;
}
/* If we replaced *expr_p, gimplify again. */
if (ret == GS_OK && (*expr_p == NULL || *expr_p == save_expr))
ret = GS_ALL_DONE;
}
while (ret == GS_OK);
/* If we encountered an error_mark somewhere nested inside, either
stub out the statement or propagate the error back out. */
if (ret == GS_ERROR)
{
if (is_statement)
*expr_p = NULL;
goto out;
}
/* This was only valid as a return value from the langhook, which
we handled. Make sure it doesn't escape from any other context. */
gcc_assert (ret != GS_UNHANDLED);
if (fallback == fb_none && *expr_p && !is_gimple_stmt (*expr_p))
{
/* We aren't looking for a value, and we don't have a valid
statement. If it doesn't have side-effects, throw it away. */
if (!TREE_SIDE_EFFECTS (*expr_p))
*expr_p = NULL;
else if (!TREE_THIS_VOLATILE (*expr_p))
{
/* This is probably a _REF that contains something nested that
has side effects. Recurse through the operands to find it. */
enum tree_code code = TREE_CODE (*expr_p);
switch (code)
{
case COMPONENT_REF:
case REALPART_EXPR:
case IMAGPART_EXPR:
case VIEW_CONVERT_EXPR:
gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p,
gimple_test_f, fallback);
break;
case ARRAY_REF:
case ARRAY_RANGE_REF:
gimplify_expr (&TREE_OPERAND (*expr_p, 0), pre_p, post_p,
gimple_test_f, fallback);
gimplify_expr (&TREE_OPERAND (*expr_p, 1), pre_p, post_p,
gimple_test_f, fallback);
break;
default:
/* Anything else with side-effects must be converted to
a valid statement before we get here. */
gcc_unreachable ();
}
*expr_p = NULL;
}
else if (COMPLETE_TYPE_P (TREE_TYPE (*expr_p))
&& TYPE_MODE (TREE_TYPE (*expr_p)) != BLKmode)
{
/* Historically, the compiler has treated a bare reference
to a non-BLKmode volatile lvalue as forcing a load. */
tree type = TYPE_MAIN_VARIANT (TREE_TYPE (*expr_p));
/* Normally, we do not want to create a temporary for a
TREE_ADDRESSABLE type because such a type should not be
copied by bitwise-assignment. However, we make an
exception here, as all we are doing here is ensuring that
we read the bytes that make up the type. We use
create_tmp_var_raw because create_tmp_var will abort when
given a TREE_ADDRESSABLE type. */
tree tmp = create_tmp_var_raw (type, "vol");
gimple_add_tmp_var (tmp);
*expr_p = build_gimple_modify_stmt (tmp, *expr_p);
}
else
/* We can't do anything useful with a volatile reference to
an incomplete type, so just throw it away. Likewise for
a BLKmode type, since any implicit inner load should
already have been turned into an explicit one by the
gimplification process. */
*expr_p = NULL;
}
/* If we are gimplifying at the statement level, we're done. Tack
everything together and replace the original statement with the
gimplified form. */
if (fallback == fb_none || is_statement)
{
if (internal_pre || internal_post)
{
append_to_statement_list (*expr_p, &internal_pre);
append_to_statement_list (internal_post, &internal_pre);
annotate_all_with_locus (&internal_pre, input_location);
*expr_p = internal_pre;
}
else if (!*expr_p)
;
else if (TREE_CODE (*expr_p) == STATEMENT_LIST)
annotate_all_with_locus (expr_p, input_location);
else
annotate_one_with_locus (*expr_p, input_location);
goto out;
}
/* Otherwise we're gimplifying a subexpression, so the resulting value is
interesting. */
/* If it's sufficiently simple already, we're done. Unless we are
handling some post-effects internally; if that's the case, we need to
copy into a temp before adding the post-effects to the tree. */
if (!internal_post && (*gimple_test_f) (*expr_p))
goto out;
/* Otherwise, we need to create a new temporary for the gimplified
expression. */
/* We can't return an lvalue if we have an internal postqueue. The
object the lvalue refers to would (probably) be modified by the
postqueue; we need to copy the value out first, which means an
rvalue. */
if ((fallback & fb_lvalue) && !internal_post
&& is_gimple_addressable (*expr_p))
{
/* An lvalue will do. Take the address of the expression, store it
in a temporary, and replace the expression with an INDIRECT_REF of
that temporary. */
tmp = build_fold_addr_expr (*expr_p);
gimplify_expr (&tmp, pre_p, post_p, is_gimple_reg, fb_rvalue);
*expr_p = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (tmp)), tmp);
}
else if ((fallback & fb_rvalue) && is_gimple_formal_tmp_rhs (*expr_p))
{
gcc_assert (!VOID_TYPE_P (TREE_TYPE (*expr_p)));
/* An rvalue will do. Assign the gimplified expression into a new
temporary TMP and replace the original expression with TMP. */
if (internal_post || (fallback & fb_lvalue))
/* The postqueue might change the value of the expression between
the initialization and use of the temporary, so we can't use a
formal temp. FIXME do we care? */
*expr_p = get_initialized_tmp_var (*expr_p, pre_p, post_p);
else
*expr_p = get_formal_tmp_var (*expr_p, pre_p);
if (TREE_CODE (*expr_p) != SSA_NAME)
DECL_GIMPLE_FORMAL_TEMP_P (*expr_p) = 1;
}
else
{
#ifdef ENABLE_CHECKING
if (!(fallback & fb_mayfail))
{
fprintf (stderr, "gimplification failed:\n");
print_generic_expr (stderr, *expr_p, 0);
debug_tree (*expr_p);
internal_error ("gimplification failed");
}
#endif
gcc_assert (fallback & fb_mayfail);
/* If this is an asm statement, and the user asked for the
impossible, don't die. Fail and let gimplify_asm_expr
issue an error. */
ret = GS_ERROR;
goto out;
}
/* Make sure the temporary matches our predicate. */
gcc_assert ((*gimple_test_f) (*expr_p));
if (internal_post)
{
annotate_all_with_locus (&internal_post, input_location);
append_to_statement_list (internal_post, pre_p);
}
out:
input_location = saved_location;
return ret;
}
/* Look through TYPE for variable-sized objects and gimplify each such
size that we find. Add to LIST_P any statements generated. */
void
gimplify_type_sizes (tree type, tree *list_p)
{
tree field, t;
if (type == NULL || type == error_mark_node)
return;
/* We first do the main variant, then copy into any other variants. */
type = TYPE_MAIN_VARIANT (type);
/* Avoid infinite recursion. */
if (TYPE_SIZES_GIMPLIFIED (type))
return;
TYPE_SIZES_GIMPLIFIED (type) = 1;
switch (TREE_CODE (type))
{
case INTEGER_TYPE:
case ENUMERAL_TYPE:
case BOOLEAN_TYPE:
case REAL_TYPE:
case FIXED_POINT_TYPE:
gimplify_one_sizepos (&TYPE_MIN_VALUE (type), list_p);
gimplify_one_sizepos (&TYPE_MAX_VALUE (type), list_p);
for (t = TYPE_NEXT_VARIANT (type); t; t = TYPE_NEXT_VARIANT (t))
{
TYPE_MIN_VALUE (t) = TYPE_MIN_VALUE (type);
TYPE_MAX_VALUE (t) = TYPE_MAX_VALUE (type);
}
break;
case ARRAY_TYPE:
/* These types may not have declarations, so handle them here. */
gimplify_type_sizes (TREE_TYPE (type), list_p);
gimplify_type_sizes (TYPE_DOMAIN (type), list_p);
break;
case RECORD_TYPE:
case UNION_TYPE:
case QUAL_UNION_TYPE:
for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
if (TREE_CODE (field) == FIELD_DECL)
{
gimplify_one_sizepos (&DECL_FIELD_OFFSET (field), list_p);
gimplify_type_sizes (TREE_TYPE (field), list_p);
}
break;
case POINTER_TYPE:
case REFERENCE_TYPE:
/* We used to recurse on the pointed-to type here, which turned out to
be incorrect because its definition might refer to variables not
yet initialized at this point if a forward declaration is involved.
It was actually useful for anonymous pointed-to types to ensure
that the sizes evaluation dominates every possible later use of the
values. Restricting to such types here would be safe since there
is no possible forward declaration around, but would introduce an
undesirable middle-end semantic to anonymity. We then defer to
front-ends the responsibility of ensuring that the sizes are
evaluated both early and late enough, e.g. by attaching artificial
type declarations to the tree. */
break;
default:
break;
}
gimplify_one_sizepos (&TYPE_SIZE (type), list_p);
gimplify_one_sizepos (&TYPE_SIZE_UNIT (type), list_p);
for (t = TYPE_NEXT_VARIANT (type); t; t = TYPE_NEXT_VARIANT (t))
{
TYPE_SIZE (t) = TYPE_SIZE (type);
TYPE_SIZE_UNIT (t) = TYPE_SIZE_UNIT (type);
TYPE_SIZES_GIMPLIFIED (t) = 1;
}
}
/* A subroutine of gimplify_type_sizes to make sure that *EXPR_P,
a size or position, has had all of its SAVE_EXPRs evaluated.
We add any required statements to STMT_P. */
void
gimplify_one_sizepos (tree *expr_p, tree *stmt_p)
{
tree type, expr = *expr_p;
/* We don't do anything if the value isn't there, is constant, or contains
A PLACEHOLDER_EXPR. We also don't want to do anything if it's already
a VAR_DECL. If it's a VAR_DECL from another function, the gimplifier
will want to replace it with a new variable, but that will cause problems
if this type is from outside the function. It's OK to have that here. */
if (expr == NULL_TREE || TREE_CONSTANT (expr)
|| TREE_CODE (expr) == VAR_DECL
|| CONTAINS_PLACEHOLDER_P (expr))
return;
type = TREE_TYPE (expr);
*expr_p = unshare_expr (expr);
gimplify_expr (expr_p, stmt_p, NULL, is_gimple_val, fb_rvalue);
expr = *expr_p;
/* Verify that we've an exact type match with the original expression.
In particular, we do not wish to drop a "sizetype" in favour of a
type of similar dimensions. We don't want to pollute the generic
type-stripping code with this knowledge because it doesn't matter
for the bulk of GENERIC/GIMPLE. It only matters that TYPE_SIZE_UNIT
and friends retain their "sizetype-ness". */
if (TREE_TYPE (expr) != type
&& TREE_CODE (type) == INTEGER_TYPE
&& TYPE_IS_SIZETYPE (type))
{
tree tmp;
*expr_p = create_tmp_var (type, NULL);
tmp = build1 (NOP_EXPR, type, expr);
tmp = build_gimple_modify_stmt (*expr_p, tmp);
if (EXPR_HAS_LOCATION (expr))
SET_EXPR_LOCUS (tmp, EXPR_LOCUS (expr));
else
SET_EXPR_LOCATION (tmp, input_location);
gimplify_and_add (tmp, stmt_p);
}
}
/* Gimplify the body of statements pointed to by BODY_P. FNDECL is the
function decl containing BODY. */
void
gimplify_body (tree *body_p, tree fndecl, bool do_parms)
{
location_t saved_location = input_location;
tree body, parm_stmts;
timevar_push (TV_TREE_GIMPLIFY);
gcc_assert (gimplify_ctxp == NULL);
push_gimplify_context ();
/* Unshare most shared trees in the body and in that of any nested functions.
It would seem we don't have to do this for nested functions because
they are supposed to be output and then the outer function gimplified
first, but the g++ front end doesn't always do it that way. */
unshare_body (body_p, fndecl);
unvisit_body (body_p, fndecl);
/* Make sure input_location isn't set to something wierd. */
input_location = DECL_SOURCE_LOCATION (fndecl);
/* Resolve callee-copies. This has to be done before processing
the body so that DECL_VALUE_EXPR gets processed correctly. */
parm_stmts = do_parms ? gimplify_parameters () : NULL;
/* Gimplify the function's body. */
gimplify_stmt (body_p);
body = *body_p;
if (!body)
body = alloc_stmt_list ();
else if (TREE_CODE (body) == STATEMENT_LIST)
{
tree t = expr_only (*body_p);
if (t)
body = t;
}
/* If there isn't an outer BIND_EXPR, add one. */
if (TREE_CODE (body) != BIND_EXPR)
{
tree b = build3 (BIND_EXPR, void_type_node, NULL_TREE,
NULL_TREE, NULL_TREE);
TREE_SIDE_EFFECTS (b) = 1;
append_to_statement_list_force (body, &BIND_EXPR_BODY (b));
body = b;
}
/* If we had callee-copies statements, insert them at the beginning
of the function. */
if (parm_stmts)
{
append_to_statement_list_force (BIND_EXPR_BODY (body), &parm_stmts);
BIND_EXPR_BODY (body) = parm_stmts;
}
/* Unshare again, in case gimplification was sloppy. */
unshare_all_trees (body);
*body_p = body;
pop_gimplify_context (body);
gcc_assert (gimplify_ctxp == NULL);
#ifdef ENABLE_TYPES_CHECKING
if (!errorcount && !sorrycount)
verify_gimple_1 (BIND_EXPR_BODY (*body_p));
#endif
timevar_pop (TV_TREE_GIMPLIFY);
input_location = saved_location;
}
/* Entry point to the gimplification pass. FNDECL is the FUNCTION_DECL
node for the function we want to gimplify. */
void
gimplify_function_tree (tree fndecl)
{
tree oldfn, parm, ret;
oldfn = current_function_decl;
current_function_decl = fndecl;
if (DECL_STRUCT_FUNCTION (fndecl))
push_cfun (DECL_STRUCT_FUNCTION (fndecl));
else
push_struct_function (fndecl);
for (parm = DECL_ARGUMENTS (fndecl); parm ; parm = TREE_CHAIN (parm))
{
/* Preliminarily mark non-addressed complex variables as eligible
for promotion to gimple registers. We'll transform their uses
as we find them. */
if ((TREE_CODE (TREE_TYPE (parm)) == COMPLEX_TYPE
|| TREE_CODE (TREE_TYPE (parm)) == VECTOR_TYPE)
&& !TREE_THIS_VOLATILE (parm)
&& !needs_to_live_in_memory (parm))
DECL_GIMPLE_REG_P (parm) = 1;
}
ret = DECL_RESULT (fndecl);
if ((TREE_CODE (TREE_TYPE (ret)) == COMPLEX_TYPE
|| TREE_CODE (TREE_TYPE (ret)) == VECTOR_TYPE)
&& !needs_to_live_in_memory (ret))
DECL_GIMPLE_REG_P (ret) = 1;
gimplify_body (&DECL_SAVED_TREE (fndecl), fndecl, true);
/* If we're instrumenting function entry/exit, then prepend the call to
the entry hook and wrap the whole function in a TRY_FINALLY_EXPR to
catch the exit hook. */
/* ??? Add some way to ignore exceptions for this TFE. */
if (flag_instrument_function_entry_exit
&& !DECL_NO_INSTRUMENT_FUNCTION_ENTRY_EXIT (fndecl)
&& !flag_instrument_functions_exclude_p (fndecl))
{
tree tf, x, bind;
tf = build2 (TRY_FINALLY_EXPR, void_type_node, NULL, NULL);
TREE_SIDE_EFFECTS (tf) = 1;
x = DECL_SAVED_TREE (fndecl);
append_to_statement_list (x, &TREE_OPERAND (tf, 0));
x = implicit_built_in_decls[BUILT_IN_PROFILE_FUNC_EXIT];
x = build_call_expr (x, 0);
append_to_statement_list (x, &TREE_OPERAND (tf, 1));
bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL);
TREE_SIDE_EFFECTS (bind) = 1;
x = implicit_built_in_decls[BUILT_IN_PROFILE_FUNC_ENTER];
x = build_call_expr (x, 0);
append_to_statement_list (x, &BIND_EXPR_BODY (bind));
append_to_statement_list (tf, &BIND_EXPR_BODY (bind));
DECL_SAVED_TREE (fndecl) = bind;
}
cfun->gimplified = true;
current_function_decl = oldfn;
pop_cfun ();
}
/* Expands EXPR to list of gimple statements STMTS. If SIMPLE is true,
force the result to be either ssa_name or an invariant, otherwise
just force it to be a rhs expression. If VAR is not NULL, make the
base variable of the final destination be VAR if suitable. */
tree
force_gimple_operand (tree expr, tree *stmts, bool simple, tree var)
{
tree t;
enum gimplify_status ret;
gimple_predicate gimple_test_f;
*stmts = NULL_TREE;
if (is_gimple_val (expr))
return expr;
gimple_test_f = simple ? is_gimple_val : is_gimple_reg_rhs;
push_gimplify_context ();
gimplify_ctxp->into_ssa = gimple_in_ssa_p (cfun);
gimplify_ctxp->allow_rhs_cond_expr = true;
if (var)
expr = build_gimple_modify_stmt (var, expr);
if (TREE_CODE (expr) != GIMPLE_MODIFY_STMT
&& TREE_TYPE (expr) == void_type_node)
{
gimplify_and_add (expr, stmts);
expr = NULL_TREE;
}
else
{
ret = gimplify_expr (&expr, stmts, NULL,
gimple_test_f, fb_rvalue);
gcc_assert (ret != GS_ERROR);
}
if (gimple_referenced_vars (cfun))
{
for (t = gimplify_ctxp->temps; t ; t = TREE_CHAIN (t))
add_referenced_var (t);
}
pop_gimplify_context (NULL);
return expr;
}
/* Invokes force_gimple_operand for EXPR with parameters SIMPLE_P and VAR. If
some statements are produced, emits them at BSI. If BEFORE is true.
the statements are appended before BSI, otherwise they are appended after
it. M specifies the way BSI moves after insertion (BSI_SAME_STMT or
BSI_CONTINUE_LINKING are the usual values). */
tree
force_gimple_operand_bsi (block_stmt_iterator *bsi, tree expr,
bool simple_p, tree var, bool before,
enum bsi_iterator_update m)
{
tree stmts;
expr = force_gimple_operand (expr, &stmts, simple_p, var);
if (stmts)
{
if (gimple_in_ssa_p (cfun))
{
tree_stmt_iterator tsi;
for (tsi = tsi_start (stmts); !tsi_end_p (tsi); tsi_next (&tsi))
mark_symbols_for_renaming (tsi_stmt (tsi));
}
if (before)
bsi_insert_before (bsi, stmts, m);
else
bsi_insert_after (bsi, stmts, m);
}
return expr;
}
#include "gt-gimplify.h"
|
6676.c | // this source is derived from CHILL AST originally from file '/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/polybench/polybench-code/stencils/fdtd-2d/kernel.c' as parsed by frontend compiler rose
void kernel_fdtd_2d(int tmax, int nx, int ny, double ex[1000 + 0][1200 + 0], double ey[1000 + 0][1200 + 0], double hz[1000 + 0][1200 + 0], double _fict_[500 + 0]) {
int t10;
int t8;
int t6;
int t4;
int t2;
for (t2 = 0; t2 <= tmax - 1; t2 += 1) {
for (t4 = 0; t4 <= ny - 1; t4 += 1)
ey[0][t4] = _fict_[t2];
#pragma omp parallel for
for (t4 = 1; t4 <= nx - 1; t4 += 16)
for (t6 = t4; t6 <= (t4 + 15 < nx - 1 ? t4 + 15 : nx - 1); t6 += 1)
for (t8 = 0; t8 <= ny - 1; t8 += 16)
for (t10 = t8; t10 <= (ny - 1 < t8 + 15 ? ny - 1 : t8 + 15); t10 += 1)
ey[t6][t10] = ey[t6][t10] - 0.5 * (hz[t6][t10] - hz[t6 - 1][t10]);
#pragma omp parallel for
for (t4 = 0; t4 <= nx - 1; t4 += 16)
for (t6 = t4; t6 <= (t4 + 15 < nx - 1 ? t4 + 15 : nx - 1); t6 += 1)
for (t8 = 1; t8 <= ny - 1; t8 += 16)
for (t10 = t8; t10 <= (ny - 1 < t8 + 15 ? ny - 1 : t8 + 15); t10 += 1)
ex[t6][t10] = ex[t6][t10] - 0.5 * (hz[t6][t10] - hz[t6][t10 - 1]);
#pragma omp parallel for
for (t4 = 0; t4 <= nx - 2; t4 += 16)
for (t6 = t4; t6 <= (t4 + 15 < nx - 2 ? t4 + 15 : nx - 2); t6 += 1)
for (t8 = 0; t8 <= ny - 2; t8 += 16)
for (t10 = t8; t10 <= (ny - 2 < t8 + 15 ? ny - 2 : t8 + 15); t10 += 1)
hz[t6][t10] = hz[t6][t10] - 0.69999999999999996 * (ex[t6][t10 + 1] - ex[t6][t10] + ey[t6 + 1][t10] - ey[t6][t10]);
}
}
|
GB_unaryop__identity_uint16_fp32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_uint16_fp32
// op(A') function: GB_tran__identity_uint16_fp32
// C type: uint16_t
// A type: float
// cast: uint16_t cij ; GB_CAST_UNSIGNED(cij,aij,16)
// unaryop: cij = aij
#define GB_ATYPE \
float
#define GB_CTYPE \
uint16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, aij) \
uint16_t z ; GB_CAST_UNSIGNED(z,aij,16) ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT16 || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_uint16_fp32
(
uint16_t *Cx, // Cx and Ax may be aliased
float *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_uint16_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
crop_and_resize.c | #include <TH/TH.h>
#include <stdio.h>
#include <math.h>
void CropAndResizePerBox(
const float * image_data,
const int batch_size,
const int depth,
const int image_height,
const int image_width,
const float * boxes_data,
const int * box_index_data,
const int start_box,
const int limit_box,
float * corps_data,
const int crop_height,
const int crop_width,
const float extrapolation_value
) {
const int image_channel_elements = image_height * image_width;
const int image_elements = depth * image_channel_elements;
const int channel_elements = crop_height * crop_width;
const int crop_elements = depth * channel_elements;
int b;
#pragma omp parallel for
for (b = start_box; b < limit_box; ++b) {
const float * box = boxes_data + b * 4;
const float y1 = box[0];
const float x1 = box[1];
const float y2 = box[2];
const float x2 = box[3];
const int b_in = box_index_data[b];
if (b_in < 0 || b_in >= batch_size) {
printf("Error: batch_index %d out of range [0, %d)\n", b_in, batch_size);
exit(-1);
}
const float height_scale =
(crop_height > 1)
? (y2 - y1) * (image_height - 1) / (crop_height - 1)
: 0;
const float width_scale =
(crop_width > 1) ? (x2 - x1) * (image_width - 1) / (crop_width - 1)
: 0;
for (int y = 0; y < crop_height; ++y)
{
const float in_y = (crop_height > 1)
? y1 * (image_height - 1) + y * height_scale
: 0.5 * (y1 + y2) * (image_height - 1);
if (in_y < 0 || in_y > image_height - 1)
{
for (int x = 0; x < crop_width; ++x)
{
for (int d = 0; d < depth; ++d)
{
// crops(b, y, x, d) = extrapolation_value;
corps_data[crop_elements * b + channel_elements * d + y * crop_width + x] = extrapolation_value;
}
}
continue;
}
const int top_y_index = floorf(in_y);
const int bottom_y_index = ceilf(in_y);
const float y_lerp = in_y - top_y_index;
for (int x = 0; x < crop_width; ++x)
{
const float in_x = (crop_width > 1)
? x1 * (image_width - 1) + x * width_scale
: 0.5 * (x1 + x2) * (image_width - 1);
if (in_x < 0 || in_x > image_width - 1)
{
for (int d = 0; d < depth; ++d)
{
corps_data[crop_elements * b + channel_elements * d + y * crop_width + x] = extrapolation_value;
}
continue;
}
const int left_x_index = floorf(in_x);
const int right_x_index = ceilf(in_x);
const float x_lerp = in_x - left_x_index;
for (int d = 0; d < depth; ++d)
{
const float *pimage = image_data + b_in * image_elements + d * image_channel_elements;
const float top_left = pimage[top_y_index * image_width + left_x_index];
const float top_right = pimage[top_y_index * image_width + right_x_index];
const float bottom_left = pimage[bottom_y_index * image_width + left_x_index];
const float bottom_right = pimage[bottom_y_index * image_width + right_x_index];
const float top = top_left + (top_right - top_left) * x_lerp;
const float bottom =
bottom_left + (bottom_right - bottom_left) * x_lerp;
corps_data[crop_elements * b + channel_elements * d + y * crop_width + x] = top + (bottom - top) * y_lerp;
}
} // end for x
} // end for y
} // end for b
}
void crop_and_resize_forward(
THFloatTensor * image,
THFloatTensor * boxes, // [y1, x1, y2, x2]
THIntTensor * box_index, // range in [0, batch_size)
const float extrapolation_value,
const int crop_height,
const int crop_width,
THFloatTensor * crops
) {
//const int batch_size = image->size[0];
//const int depth = image->size[1];
//const int image_height = image->size[2];
//const int image_width = image->size[3];
//const int num_boxes = boxes->size[0];
const int batch_size = THFloatTensor_size(image, 0);
const int depth = THFloatTensor_size(image, 1);
const int image_height = THFloatTensor_size(image, 2);
const int image_width = THFloatTensor_size(image, 3);
const int num_boxes = THFloatTensor_size(boxes, 0);
// init output space
THFloatTensor_resize4d(crops, num_boxes, depth, crop_height, crop_width);
THFloatTensor_zero(crops);
// crop_and_resize for each box
CropAndResizePerBox(
THFloatTensor_data(image),
batch_size,
depth,
image_height,
image_width,
THFloatTensor_data(boxes),
THIntTensor_data(box_index),
0,
num_boxes,
THFloatTensor_data(crops),
crop_height,
crop_width,
extrapolation_value
);
}
void crop_and_resize_backward(
THFloatTensor * grads,
THFloatTensor * boxes, // [y1, x1, y2, x2]
THIntTensor * box_index, // range in [0, batch_size)
THFloatTensor * grads_image // resize to [bsize, c, hc, wc]
)
{
// shape
//const int batch_size = grads_image->size[0];
//const int depth = grads_image->size[1];
//const int image_height = grads_image->size[2];
//const int image_width = grads_image->size[3];
//const int num_boxes = grads->size[0];
//const int crop_height = grads->size[2];
//const int crop_width = grads->size[3];
const int batch_size = THFloatTensor_size(grads_image, 0);
const int depth = THFloatTensor_size(grads_image, 1);
const int image_height = THFloatTensor_size(grads_image, 2);
const int image_width = THFloatTensor_size(grads_image, 3);
const int num_boxes = THFloatTensor_size(grads, 0);
const int crop_height = THFloatTensor_size(grads,2);
const int crop_width = THFloatTensor_size(grads,3);
// n_elements
const int image_channel_elements = image_height * image_width;
const int image_elements = depth * image_channel_elements;
const int channel_elements = crop_height * crop_width;
const int crop_elements = depth * channel_elements;
// init output space
THFloatTensor_zero(grads_image);
// data pointer
const float * grads_data = THFloatTensor_data(grads);
const float * boxes_data = THFloatTensor_data(boxes);
const int * box_index_data = THIntTensor_data(box_index);
float * grads_image_data = THFloatTensor_data(grads_image);
for (int b = 0; b < num_boxes; ++b) {
const float * box = boxes_data + b * 4;
const float y1 = box[0];
const float x1 = box[1];
const float y2 = box[2];
const float x2 = box[3];
const int b_in = box_index_data[b];
if (b_in < 0 || b_in >= batch_size) {
printf("Error: batch_index %d out of range [0, %d)\n", b_in, batch_size);
exit(-1);
}
const float height_scale =
(crop_height > 1) ? (y2 - y1) * (image_height - 1) / (crop_height - 1)
: 0;
const float width_scale =
(crop_width > 1) ? (x2 - x1) * (image_width - 1) / (crop_width - 1)
: 0;
for (int y = 0; y < crop_height; ++y)
{
const float in_y = (crop_height > 1)
? y1 * (image_height - 1) + y * height_scale
: 0.5 * (y1 + y2) * (image_height - 1);
if (in_y < 0 || in_y > image_height - 1)
{
continue;
}
const int top_y_index = floorf(in_y);
const int bottom_y_index = ceilf(in_y);
const float y_lerp = in_y - top_y_index;
for (int x = 0; x < crop_width; ++x)
{
const float in_x = (crop_width > 1)
? x1 * (image_width - 1) + x * width_scale
: 0.5 * (x1 + x2) * (image_width - 1);
if (in_x < 0 || in_x > image_width - 1)
{
continue;
}
const int left_x_index = floorf(in_x);
const int right_x_index = ceilf(in_x);
const float x_lerp = in_x - left_x_index;
for (int d = 0; d < depth; ++d)
{
float *pimage = grads_image_data + b_in * image_elements + d * image_channel_elements;
const float grad_val = grads_data[crop_elements * b + channel_elements * d + y * crop_width + x];
const float dtop = (1 - y_lerp) * grad_val;
pimage[top_y_index * image_width + left_x_index] += (1 - x_lerp) * dtop;
pimage[top_y_index * image_width + right_x_index] += x_lerp * dtop;
const float dbottom = y_lerp * grad_val;
pimage[bottom_y_index * image_width + left_x_index] += (1 - x_lerp) * dbottom;
pimage[bottom_y_index * image_width + right_x_index] += x_lerp * dbottom;
} // end d
} // end x
} // end y
} // end b
}
|
timer.h | #ifndef SPLATT_TIMER_H
#define SPLATT_TIMER_H
/******************************************************************************
* INCLUDES
*****************************************************************************/
#include <time.h>
#include <stddef.h>
#include <stdbool.h>
#ifdef __MACH__
#include <mach/mach.h>
#include <mach/mach_time.h>
#endif
/******************************************************************************
* STRUCTURES
*****************************************************************************/
/**
* @brief Represents a wall-clock timer.
*/
typedef struct
{
bool running;
double seconds;
double start;
double stop;
} sp_timer_t;
/**
* @brief timer_id provides easy indexing into timers[].
*/
typedef enum
{
TIMER_LVL0, /* LEVEL 0 */
TIMER_ALL,
TIMER_CPD,
TIMER_REORDER,
TIMER_CONVERT,
TIMER_LVL1, /* LEVEL 1 */
TIMER_MTTKRP,
TIMER_ADMM,
TIMER_CHOLESKY,
TIMER_BACKSOLVE,
TIMER_INV,
TIMER_FIT,
TIMER_MATMUL,
TIMER_ATA,
TIMER_MATNORM,
TIMER_IO,
TIMER_PART,
TIMER_LVL2, /* LEVEL 2 */
#ifdef SPLATT_USE_MPI
TIMER_MPI,
TIMER_MPI_IDLE,
TIMER_MPI_COMM,
TIMER_MPI_ATA,
TIMER_MPI_REDUCE,
TIMER_MPI_PARTIALS,
TIMER_MPI_NORM,
TIMER_MPI_UPDATE,
TIMER_MPI_FIT,
/* timer max */
TIMER_MTTKRP_MAX,
TIMER_MPI_MAX,
TIMER_MPI_IDLE_MAX,
TIMER_MPI_COMM_MAX,
#endif
TIMER_SPLATT,
TIMER_GIGA,
TIMER_DFACTO,
TIMER_TTBOX,
TIMER_SORT,
TIMER_TILE,
TIMER_MISC,
TIMER_NTIMERS /* LEVEL N */
} timer_id;
/* globals */
extern int timer_lvl;
extern sp_timer_t timers[TIMER_NTIMERS];
/******************************************************************************
* PUBLIC FUNCTIONS
*****************************************************************************/
#define init_timers splatt_init_timers
/**
* @brief Call timer_reset() on all of timers[].
*/
void init_timers(void);
#define report_times splatt_report_times
/**
* @brief Output a summary of all used timers.
*/
void report_times(void);
#define timer_inc_verbose splatt_timer_inc_verbose
/**
* @brief Increment timer verbosity to the next level;
*/
void timer_inc_verbose(void);
/**
* @brief Return the number of seconds since an unspecified time (e.g., Unix
* epoch). This is accomplished with a high-resolution monotonic timer,
* suitable for performance timing.
*
* @return The number of seconds.
*/
static inline double monotonic_seconds()
{
#ifdef __MACH__
/* OSX */
static mach_timebase_info_data_t info;
static double seconds_per_unit;
if(seconds_per_unit == 0) {
#pragma omp critical
{
mach_timebase_info(&info);
seconds_per_unit = (info.numer / info.denom) / 1e9;
}
}
return seconds_per_unit * mach_absolute_time();
#else
/* Linux systems */
struct timespec ts;
clock_gettime(CLOCK_MONOTONIC, &ts);
return ts.tv_sec + ts.tv_nsec * 1e-9;
#endif
}
/**
* @brief Reset all fields of a sp_timer_t.
*
* @param timer The timer to reset.
*/
static inline void timer_reset(sp_timer_t * const timer)
{
timer->running = false;
timer->seconds = 0;
timer->start = 0;
timer->stop = 0;
}
/**
* @brief Start a sp_timer_t. NOTE: this does not reset the timer.
*
* @param timer The timer to start.
*/
static inline void timer_start(sp_timer_t * const timer)
{
if(!timer->running) {
timer->running = true;
timer->start = monotonic_seconds();
}
}
/**
* @brief Stop a sp_timer_t and update its time.
*
* @param timer The timer to stop.
*/
static inline void timer_stop(sp_timer_t * const timer)
{
timer->running = false;
timer->stop = monotonic_seconds();
timer->seconds += timer->stop - timer->start;
}
/**
* @brief Give a sp_timer_t a fresh start by resetting and starting it.
*
* @param timer The timer to refresh.
*/
static inline void timer_fstart(sp_timer_t * const timer)
{
timer_reset(timer);
timer_start(timer);
}
#endif
|
axpy_practice.c | /*
* AXPY Y[N] = Y[N] + a*X[N]
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include <sys/timeb.h>
#include <omp.h>
/* read timer in second */
double read_timer() {
struct timeb tm;
ftime(&tm);
return (double) tm.time + (double) tm.millitm / 1000.0;
}
/* read timer in ms */
double read_timer_ms() {
struct timeb tm;
ftime(&tm);
return (double) tm.time * 1000.0 + (double) tm.millitm;
}
#define REAL float
#define VECTOR_LENGTH 102400
/* initialize a vector with random floating point numbers */
void init(REAL A[], int N) {
int i;
for (i = 0; i < N; i++) {
A[i] = (double) drand48();
}
}
double check(REAL A[], REAL B[], int N) {
int i;
double sum = 0.0;
for (i = 0; i < N; i++) {
sum += fabs(A[i] - B[i]);
}
return sum;
}
void axpy_base(int N, REAL Y[], REAL X[], REAL a);
void axpy_omp_parallel(int N, REAL Y[], REAL X[], REAL a);
int main(int argc, char *argv[]) {
int N = VECTOR_LENGTH;
double elapsed; /* for timing */
double elapsed_omp; /* for timing */
if (argc < 2) {
fprintf(stderr, "Usage: axpy <n> \n");
exit(1);
}
N = atoi(argv[1]);
REAL a = 123.456;
REAL Y_base[N];
REAL Y_omp[N];
REAL X[N];
srand48((1 << 12));
init(X, N);
init(Y_base, N);
memcpy(Y_omp, Y_base, N * sizeof(REAL));
/* example run */
elapsed = read_timer();
axpy_base(N, Y_base, X, a);
elapsed = (read_timer() - elapsed);
elapsed_omp = read_timer();
axpy_omp_parallel(N, Y_omp, X, a);
elapsed_omp = (read_timer() - elapsed_omp);
/* you should add the call to each function and time the execution */
printf("\tAXPY: Y[N] = Y[N] + a*X[N], N=%d\n", N);
printf("-----------------------------------------------------------\n");
printf("Performance:\t\tRuntime (ms)\t MFLOPS \t\tError (compared to base)\n");
printf("-----------------------------------------------------------\n");
printf("axpy_base:\t\t%4f\t%4f \t\t%g\n", elapsed * 1.0e3, (2.0 * N) / (1.0e6 * elapsed), check(Y_base, Y_base, N));
printf("axpy_omp:\t\t%4f\t%4f \t\t%g\n", elapsed_omp * 1.0e3, (2.0 * N) / (1.0e6 * elapsed_omp), check(Y_base, Y_omp, N));
return 0;
}
void axpy_base(int N, REAL Y[], REAL X[], REAL a) {
int i;
for (i = 0; i < N; ++i)
Y[i] += a * X[i];
}
#define USE_CONSTRUCT
/* use openMP */
void axpy_omp_parallel(int N, REAL Y[], REAL X[], REAL a)
{
#ifdef USE_CONSTRUCT
int i;
#pragma omp parallel
#pragma omp for private(i)
for (i = 0; i < N; i++)
{
Y[i] += a * X[i];
}
#else
int Nthrds;
#pragma omp parallel shared(X, Y)
{
int id, i, istart, iend;
#pragma omp single
{
Nthrds = omp_get_num_threads();
};
id = omp_get_thread_num();
istart = id * N / Nthrds;
iend = (id + 1) * N / Nthrds;
for (i = istart; i < iend; i++)
{
Y[i] += a * X[i];
}
/* #pragma omp barrier
printf("Done from thread %d\n", id);*/
}
#endif
}
|
SectionsSectionBodyLink.c | int main() {
#pragma omp sections
{
}
#pragma omp sections
{
#pragma omp section
{
int x;
}
}
#pragma omp sections
{
#pragma omp section
{
100;
}
#pragma omp section
{
101;
}
}
#pragma omp sections
{
#pragma omp section
{
103;
}
}
#pragma omp sections
{
#pragma omp section
{
int x;
}
#pragma omp section
{
105;
}
#pragma omp section
{
int x;
}
}
#pragma omp sections
{
#pragma omp section
{
int x;
}
#pragma omp section
{
int x;
}
}
}
|
GB_unaryop__lnot_bool_int8.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_bool_int8
// op(A') function: GB_tran__lnot_bool_int8
// C type: bool
// A type: int8_t
// cast: bool cij = (bool) aij
// unaryop: cij = !aij
#define GB_ATYPE \
int8_t
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !x ;
// casting
#define GB_CASTING(z, x) \
bool z = (bool) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_BOOL || GxB_NO_INT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_bool_int8
(
bool *restrict Cx,
const int8_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_bool_int8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
Stmt.h | //===- Stmt.h - Classes for representing statements -------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the Stmt interface and subclasses.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_AST_STMT_H
#define LLVM_CLANG_AST_STMT_H
#include "clang/AST/DeclGroup.h"
#include "clang/AST/StmtIterator.h"
#include "clang/Basic/CapturedStmt.h"
#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/SourceLocation.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/iterator.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <iterator>
#include <string>
namespace llvm {
class FoldingSetNodeID;
} // namespace llvm
namespace clang {
class ASTContext;
class Attr;
class CapturedDecl;
class Decl;
class Expr;
class LabelDecl;
class ODRHash;
class PrinterHelper;
struct PrintingPolicy;
class RecordDecl;
class SourceManager;
class StringLiteral;
class Token;
class VarDecl;
//===----------------------------------------------------------------------===//
// AST classes for statements.
//===----------------------------------------------------------------------===//
/// Stmt - This represents one statement.
///
class alignas(void *) Stmt {
public:
enum StmtClass {
NoStmtClass = 0,
#define STMT(CLASS, PARENT) CLASS##Class,
#define STMT_RANGE(BASE, FIRST, LAST) \
first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class,
#define LAST_STMT_RANGE(BASE, FIRST, LAST) \
first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class
#define ABSTRACT_STMT(STMT)
#include "clang/AST/StmtNodes.inc"
};
// Make vanilla 'new' and 'delete' illegal for Stmts.
protected:
friend class ASTStmtReader;
friend class ASTStmtWriter;
void *operator new(size_t bytes) noexcept {
llvm_unreachable("Stmts cannot be allocated with regular 'new'.");
}
void operator delete(void *data) noexcept {
llvm_unreachable("Stmts cannot be released with regular 'delete'.");
}
//===--- Statement bitfields classes ---===//
class StmtBitfields {
friend class Stmt;
/// The statement class.
unsigned sClass : 8;
};
enum { NumStmtBits = 8 };
class NullStmtBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class NullStmt;
unsigned : NumStmtBits;
/// True if the null statement was preceded by an empty macro, e.g:
/// @code
/// #define CALL(x)
/// CALL(0);
/// @endcode
unsigned HasLeadingEmptyMacro : 1;
/// The location of the semi-colon.
SourceLocation SemiLoc;
};
class CompoundStmtBitfields {
friend class ASTStmtReader;
friend class CompoundStmt;
unsigned : NumStmtBits;
unsigned NumStmts : 32 - NumStmtBits;
/// The location of the opening "{".
SourceLocation LBraceLoc;
};
class LabelStmtBitfields {
friend class LabelStmt;
unsigned : NumStmtBits;
SourceLocation IdentLoc;
};
class AttributedStmtBitfields {
friend class ASTStmtReader;
friend class AttributedStmt;
unsigned : NumStmtBits;
/// Number of attributes.
unsigned NumAttrs : 32 - NumStmtBits;
/// The location of the attribute.
SourceLocation AttrLoc;
};
class IfStmtBitfields {
friend class ASTStmtReader;
friend class IfStmt;
unsigned : NumStmtBits;
/// True if this if statement is a constexpr if.
unsigned IsConstexpr : 1;
/// True if this if statement has storage for an else statement.
unsigned HasElse : 1;
/// True if this if statement has storage for a variable declaration.
unsigned HasVar : 1;
/// True if this if statement has storage for an init statement.
unsigned HasInit : 1;
/// The location of the "if".
SourceLocation IfLoc;
};
class SwitchStmtBitfields {
friend class SwitchStmt;
unsigned : NumStmtBits;
/// True if the SwitchStmt has storage for an init statement.
unsigned HasInit : 1;
/// True if the SwitchStmt has storage for a condition variable.
unsigned HasVar : 1;
/// If the SwitchStmt is a switch on an enum value, records whether all
/// the enum values were covered by CaseStmts. The coverage information
/// value is meant to be a hint for possible clients.
unsigned AllEnumCasesCovered : 1;
/// The location of the "switch".
SourceLocation SwitchLoc;
};
class WhileStmtBitfields {
friend class ASTStmtReader;
friend class WhileStmt;
unsigned : NumStmtBits;
/// True if the WhileStmt has storage for a condition variable.
unsigned HasVar : 1;
/// The location of the "while".
SourceLocation WhileLoc;
};
class DoStmtBitfields {
friend class DoStmt;
unsigned : NumStmtBits;
/// The location of the "do".
SourceLocation DoLoc;
};
class ForStmtBitfields {
friend class ForStmt;
unsigned : NumStmtBits;
/// The location of the "for".
SourceLocation ForLoc;
};
class GotoStmtBitfields {
friend class GotoStmt;
friend class IndirectGotoStmt;
unsigned : NumStmtBits;
/// The location of the "goto".
SourceLocation GotoLoc;
};
class ContinueStmtBitfields {
friend class ContinueStmt;
unsigned : NumStmtBits;
/// The location of the "continue".
SourceLocation ContinueLoc;
};
class BreakStmtBitfields {
friend class BreakStmt;
unsigned : NumStmtBits;
/// The location of the "break".
SourceLocation BreakLoc;
};
class ReturnStmtBitfields {
friend class ReturnStmt;
unsigned : NumStmtBits;
/// True if this ReturnStmt has storage for an NRVO candidate.
unsigned HasNRVOCandidate : 1;
/// The location of the "return".
SourceLocation RetLoc;
};
class SwitchCaseBitfields {
friend class SwitchCase;
friend class CaseStmt;
unsigned : NumStmtBits;
/// Used by CaseStmt to store whether it is a case statement
/// of the form case LHS ... RHS (a GNU extension).
unsigned CaseStmtIsGNURange : 1;
/// The location of the "case" or "default" keyword.
SourceLocation KeywordLoc;
};
//===--- Expression bitfields classes ---===//
class ExprBitfields {
friend class ASTStmtReader; // deserialization
friend class AtomicExpr; // ctor
friend class BlockDeclRefExpr; // ctor
friend class CallExpr; // ctor
friend class CXXConstructExpr; // ctor
friend class CXXDependentScopeMemberExpr; // ctor
friend class CXXNewExpr; // ctor
friend class CXXUnresolvedConstructExpr; // ctor
friend class DeclRefExpr; // computeDependence
friend class DependentScopeDeclRefExpr; // ctor
friend class DesignatedInitExpr; // ctor
friend class Expr;
friend class InitListExpr; // ctor
friend class ObjCArrayLiteral; // ctor
friend class ObjCDictionaryLiteral; // ctor
friend class ObjCMessageExpr; // ctor
friend class OffsetOfExpr; // ctor
friend class OpaqueValueExpr; // ctor
friend class OverloadExpr; // ctor
friend class ParenListExpr; // ctor
friend class PseudoObjectExpr; // ctor
friend class ShuffleVectorExpr; // ctor
unsigned : NumStmtBits;
unsigned ValueKind : 2;
unsigned ObjectKind : 3;
unsigned TypeDependent : 1;
unsigned ValueDependent : 1;
unsigned InstantiationDependent : 1;
unsigned ContainsUnexpandedParameterPack : 1;
};
enum { NumExprBits = NumStmtBits + 9 };
class PredefinedExprBitfields {
friend class ASTStmtReader;
friend class PredefinedExpr;
unsigned : NumExprBits;
/// The kind of this PredefinedExpr. One of the enumeration values
/// in PredefinedExpr::IdentKind.
unsigned Kind : 4;
/// True if this PredefinedExpr has a trailing "StringLiteral *"
/// for the predefined identifier.
unsigned HasFunctionName : 1;
/// The location of this PredefinedExpr.
SourceLocation Loc;
};
class DeclRefExprBitfields {
friend class ASTStmtReader; // deserialization
friend class DeclRefExpr;
unsigned : NumExprBits;
unsigned HasQualifier : 1;
unsigned HasTemplateKWAndArgsInfo : 1;
unsigned HasFoundDecl : 1;
unsigned HadMultipleCandidates : 1;
unsigned RefersToEnclosingVariableOrCapture : 1;
/// The location of the declaration name itself.
SourceLocation Loc;
};
enum APFloatSemantics {
IEEEhalf,
IEEEsingle,
IEEEdouble,
x87DoubleExtended,
IEEEquad,
PPCDoubleDouble
};
class FloatingLiteralBitfields {
friend class FloatingLiteral;
unsigned : NumExprBits;
unsigned Semantics : 3; // Provides semantics for APFloat construction
unsigned IsExact : 1;
};
class StringLiteralBitfields {
friend class ASTStmtReader;
friend class StringLiteral;
unsigned : NumExprBits;
/// The kind of this string literal.
/// One of the enumeration values of StringLiteral::StringKind.
unsigned Kind : 3;
/// The width of a single character in bytes. Only values of 1, 2,
/// and 4 bytes are supported. StringLiteral::mapCharByteWidth maps
/// the target + string kind to the appropriate CharByteWidth.
unsigned CharByteWidth : 3;
unsigned IsPascal : 1;
/// The number of concatenated token this string is made of.
/// This is the number of trailing SourceLocation.
unsigned NumConcatenated;
};
class CharacterLiteralBitfields {
friend class CharacterLiteral;
unsigned : NumExprBits;
unsigned Kind : 3;
};
class UnaryOperatorBitfields {
friend class UnaryOperator;
unsigned : NumExprBits;
unsigned Opc : 5;
unsigned CanOverflow : 1;
SourceLocation Loc;
};
class UnaryExprOrTypeTraitExprBitfields {
friend class UnaryExprOrTypeTraitExpr;
unsigned : NumExprBits;
unsigned Kind : 3;
unsigned IsType : 1; // true if operand is a type, false if an expression.
};
class ArraySubscriptExprBitfields {
friend class ArraySubscriptExpr;
unsigned : NumExprBits;
SourceLocation RBracketLoc;
};
class CallExprBitfields {
friend class CallExpr;
unsigned : NumExprBits;
unsigned NumPreArgs : 1;
/// True if the callee of the call expression was found using ADL.
unsigned UsesADL : 1;
/// Padding used to align OffsetToTrailingObjects to a byte multiple.
unsigned : 24 - 2 - NumExprBits;
/// The offset in bytes from the this pointer to the start of the
/// trailing objects belonging to CallExpr. Intentionally byte sized
/// for faster access.
unsigned OffsetToTrailingObjects : 8;
};
enum { NumCallExprBits = 32 };
class MemberExprBitfields {
friend class MemberExpr;
unsigned : NumExprBits;
/// IsArrow - True if this is "X->F", false if this is "X.F".
unsigned IsArrow : 1;
/// True if this member expression used a nested-name-specifier to
/// refer to the member, e.g., "x->Base::f", or found its member via
/// a using declaration. When true, a MemberExprNameQualifier
/// structure is allocated immediately after the MemberExpr.
unsigned HasQualifierOrFoundDecl : 1;
/// True if this member expression specified a template keyword
/// and/or a template argument list explicitly, e.g., x->f<int>,
/// x->template f, x->template f<int>.
/// When true, an ASTTemplateKWAndArgsInfo structure and its
/// TemplateArguments (if any) are present.
unsigned HasTemplateKWAndArgsInfo : 1;
/// True if this member expression refers to a method that
/// was resolved from an overloaded set having size greater than 1.
unsigned HadMultipleCandidates : 1;
/// This is the location of the -> or . in the expression.
SourceLocation OperatorLoc;
};
class CastExprBitfields {
friend class CastExpr;
friend class ImplicitCastExpr;
unsigned : NumExprBits;
unsigned Kind : 6;
unsigned PartOfExplicitCast : 1; // Only set for ImplicitCastExpr.
/// The number of CXXBaseSpecifiers in the cast. 14 bits would be enough
/// here. ([implimits] Direct and indirect base classes [16384]).
unsigned BasePathSize;
};
class BinaryOperatorBitfields {
friend class BinaryOperator;
unsigned : NumExprBits;
unsigned Opc : 6;
/// This is only meaningful for operations on floating point
/// types and 0 otherwise.
unsigned FPFeatures : 3;
SourceLocation OpLoc;
};
class InitListExprBitfields {
friend class InitListExpr;
unsigned : NumExprBits;
/// Whether this initializer list originally had a GNU array-range
/// designator in it. This is a temporary marker used by CodeGen.
unsigned HadArrayRangeDesignator : 1;
};
class ParenListExprBitfields {
friend class ASTStmtReader;
friend class ParenListExpr;
unsigned : NumExprBits;
/// The number of expressions in the paren list.
unsigned NumExprs;
};
class PseudoObjectExprBitfields {
friend class ASTStmtReader; // deserialization
friend class PseudoObjectExpr;
unsigned : NumExprBits;
// These don't need to be particularly wide, because they're
// strictly limited by the forms of expressions we permit.
unsigned NumSubExprs : 8;
unsigned ResultIndex : 32 - 8 - NumExprBits;
};
//===--- C++ Expression bitfields classes ---===//
class CXXOperatorCallExprBitfields {
friend class ASTStmtReader;
friend class CXXOperatorCallExpr;
unsigned : NumCallExprBits;
/// The kind of this overloaded operator. One of the enumerator
/// value of OverloadedOperatorKind.
unsigned OperatorKind : 6;
// Only meaningful for floating point types.
unsigned FPFeatures : 3;
};
class CXXBoolLiteralExprBitfields {
friend class CXXBoolLiteralExpr;
unsigned : NumExprBits;
/// The value of the boolean literal.
unsigned Value : 1;
/// The location of the boolean literal.
SourceLocation Loc;
};
class CXXNullPtrLiteralExprBitfields {
friend class CXXNullPtrLiteralExpr;
unsigned : NumExprBits;
/// The location of the null pointer literal.
SourceLocation Loc;
};
class CXXThisExprBitfields {
friend class CXXThisExpr;
unsigned : NumExprBits;
/// Whether this is an implicit "this".
unsigned IsImplicit : 1;
/// The location of the "this".
SourceLocation Loc;
};
class CXXThrowExprBitfields {
friend class ASTStmtReader;
friend class CXXThrowExpr;
unsigned : NumExprBits;
/// Whether the thrown variable (if any) is in scope.
unsigned IsThrownVariableInScope : 1;
/// The location of the "throw".
SourceLocation ThrowLoc;
};
class CXXDefaultArgExprBitfields {
friend class ASTStmtReader;
friend class CXXDefaultArgExpr;
unsigned : NumExprBits;
/// The location where the default argument expression was used.
SourceLocation Loc;
};
class CXXDefaultInitExprBitfields {
friend class ASTStmtReader;
friend class CXXDefaultInitExpr;
unsigned : NumExprBits;
/// The location where the default initializer expression was used.
SourceLocation Loc;
};
class CXXScalarValueInitExprBitfields {
friend class ASTStmtReader;
friend class CXXScalarValueInitExpr;
unsigned : NumExprBits;
SourceLocation RParenLoc;
};
class CXXNewExprBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class CXXNewExpr;
unsigned : NumExprBits;
/// Was the usage ::new, i.e. is the global new to be used?
unsigned IsGlobalNew : 1;
/// Do we allocate an array? If so, the first trailing "Stmt *" is the
/// size expression.
unsigned IsArray : 1;
/// Should the alignment be passed to the allocation function?
unsigned ShouldPassAlignment : 1;
/// If this is an array allocation, does the usual deallocation
/// function for the allocated type want to know the allocated size?
unsigned UsualArrayDeleteWantsSize : 1;
/// What kind of initializer do we have? Could be none, parens, or braces.
/// In storage, we distinguish between "none, and no initializer expr", and
/// "none, but an implicit initializer expr".
unsigned StoredInitializationStyle : 2;
/// True if the allocated type was expressed as a parenthesized type-id.
unsigned IsParenTypeId : 1;
/// The number of placement new arguments.
unsigned NumPlacementArgs;
};
class CXXDeleteExprBitfields {
friend class ASTStmtReader;
friend class CXXDeleteExpr;
unsigned : NumExprBits;
/// Is this a forced global delete, i.e. "::delete"?
unsigned GlobalDelete : 1;
/// Is this the array form of delete, i.e. "delete[]"?
unsigned ArrayForm : 1;
/// ArrayFormAsWritten can be different from ArrayForm if 'delete' is
/// applied to pointer-to-array type (ArrayFormAsWritten will be false
/// while ArrayForm will be true).
unsigned ArrayFormAsWritten : 1;
/// Does the usual deallocation function for the element type require
/// a size_t argument?
unsigned UsualArrayDeleteWantsSize : 1;
/// Location of the expression.
SourceLocation Loc;
};
class TypeTraitExprBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class TypeTraitExpr;
unsigned : NumExprBits;
/// The kind of type trait, which is a value of a TypeTrait enumerator.
unsigned Kind : 8;
/// If this expression is not value-dependent, this indicates whether
/// the trait evaluated true or false.
unsigned Value : 1;
/// The number of arguments to this type trait.
unsigned NumArgs : 32 - 8 - 1 - NumExprBits;
};
class DependentScopeDeclRefExprBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class DependentScopeDeclRefExpr;
unsigned : NumExprBits;
/// Whether the name includes info for explicit template
/// keyword and arguments.
unsigned HasTemplateKWAndArgsInfo : 1;
};
class CXXConstructExprBitfields {
friend class ASTStmtReader;
friend class CXXConstructExpr;
unsigned : NumExprBits;
unsigned Elidable : 1;
unsigned HadMultipleCandidates : 1;
unsigned ListInitialization : 1;
unsigned StdInitListInitialization : 1;
unsigned ZeroInitialization : 1;
unsigned ConstructionKind : 3;
SourceLocation Loc;
};
class ExprWithCleanupsBitfields {
friend class ASTStmtReader; // deserialization
friend class ExprWithCleanups;
unsigned : NumExprBits;
// When false, it must not have side effects.
unsigned CleanupsHaveSideEffects : 1;
unsigned NumObjects : 32 - 1 - NumExprBits;
};
class CXXUnresolvedConstructExprBitfields {
friend class ASTStmtReader;
friend class CXXUnresolvedConstructExpr;
unsigned : NumExprBits;
/// The number of arguments used to construct the type.
unsigned NumArgs;
};
class CXXDependentScopeMemberExprBitfields {
friend class ASTStmtReader;
friend class CXXDependentScopeMemberExpr;
unsigned : NumExprBits;
/// Whether this member expression used the '->' operator or
/// the '.' operator.
unsigned IsArrow : 1;
/// Whether this member expression has info for explicit template
/// keyword and arguments.
unsigned HasTemplateKWAndArgsInfo : 1;
/// See getFirstQualifierFoundInScope() and the comment listing
/// the trailing objects.
unsigned HasFirstQualifierFoundInScope : 1;
/// The location of the '->' or '.' operator.
SourceLocation OperatorLoc;
};
class OverloadExprBitfields {
friend class ASTStmtReader;
friend class OverloadExpr;
unsigned : NumExprBits;
/// Whether the name includes info for explicit template
/// keyword and arguments.
unsigned HasTemplateKWAndArgsInfo : 1;
/// Padding used by the derived classes to store various bits. If you
/// need to add some data here, shrink this padding and add your data
/// above. NumOverloadExprBits also needs to be updated.
unsigned : 32 - NumExprBits - 1;
/// The number of results.
unsigned NumResults;
};
enum { NumOverloadExprBits = NumExprBits + 1 };
class UnresolvedLookupExprBitfields {
friend class ASTStmtReader;
friend class UnresolvedLookupExpr;
unsigned : NumOverloadExprBits;
/// True if these lookup results should be extended by
/// argument-dependent lookup if this is the operand of a function call.
unsigned RequiresADL : 1;
/// True if these lookup results are overloaded. This is pretty trivially
/// rederivable if we urgently need to kill this field.
unsigned Overloaded : 1;
};
static_assert(sizeof(UnresolvedLookupExprBitfields) <= 4,
"UnresolvedLookupExprBitfields must be <= than 4 bytes to"
"avoid trashing OverloadExprBitfields::NumResults!");
class UnresolvedMemberExprBitfields {
friend class ASTStmtReader;
friend class UnresolvedMemberExpr;
unsigned : NumOverloadExprBits;
/// Whether this member expression used the '->' operator or
/// the '.' operator.
unsigned IsArrow : 1;
/// Whether the lookup results contain an unresolved using declaration.
unsigned HasUnresolvedUsing : 1;
};
static_assert(sizeof(UnresolvedMemberExprBitfields) <= 4,
"UnresolvedMemberExprBitfields must be <= than 4 bytes to"
"avoid trashing OverloadExprBitfields::NumResults!");
class CXXNoexceptExprBitfields {
friend class ASTStmtReader;
friend class CXXNoexceptExpr;
unsigned : NumExprBits;
unsigned Value : 1;
};
class SubstNonTypeTemplateParmExprBitfields {
friend class ASTStmtReader;
friend class SubstNonTypeTemplateParmExpr;
unsigned : NumExprBits;
/// The location of the non-type template parameter reference.
SourceLocation NameLoc;
};
//===--- C++ Coroutines TS bitfields classes ---===//
class CoawaitExprBitfields {
friend class CoawaitExpr;
unsigned : NumExprBits;
unsigned IsImplicit : 1;
};
//===--- Obj-C Expression bitfields classes ---===//
class ObjCIndirectCopyRestoreExprBitfields {
friend class ObjCIndirectCopyRestoreExpr;
unsigned : NumExprBits;
unsigned ShouldCopy : 1;
};
//===--- Clang Extensions bitfields classes ---===//
class OpaqueValueExprBitfields {
friend class ASTStmtReader;
friend class OpaqueValueExpr;
unsigned : NumExprBits;
/// The OVE is a unique semantic reference to its source expression if this
/// bit is set to true.
unsigned IsUnique : 1;
SourceLocation Loc;
};
union {
// Same order as in StmtNodes.td.
// Statements
StmtBitfields StmtBits;
NullStmtBitfields NullStmtBits;
CompoundStmtBitfields CompoundStmtBits;
LabelStmtBitfields LabelStmtBits;
AttributedStmtBitfields AttributedStmtBits;
IfStmtBitfields IfStmtBits;
SwitchStmtBitfields SwitchStmtBits;
WhileStmtBitfields WhileStmtBits;
DoStmtBitfields DoStmtBits;
ForStmtBitfields ForStmtBits;
GotoStmtBitfields GotoStmtBits;
ContinueStmtBitfields ContinueStmtBits;
BreakStmtBitfields BreakStmtBits;
ReturnStmtBitfields ReturnStmtBits;
SwitchCaseBitfields SwitchCaseBits;
// Expressions
ExprBitfields ExprBits;
PredefinedExprBitfields PredefinedExprBits;
DeclRefExprBitfields DeclRefExprBits;
FloatingLiteralBitfields FloatingLiteralBits;
StringLiteralBitfields StringLiteralBits;
CharacterLiteralBitfields CharacterLiteralBits;
UnaryOperatorBitfields UnaryOperatorBits;
UnaryExprOrTypeTraitExprBitfields UnaryExprOrTypeTraitExprBits;
ArraySubscriptExprBitfields ArraySubscriptExprBits;
CallExprBitfields CallExprBits;
MemberExprBitfields MemberExprBits;
CastExprBitfields CastExprBits;
BinaryOperatorBitfields BinaryOperatorBits;
InitListExprBitfields InitListExprBits;
ParenListExprBitfields ParenListExprBits;
PseudoObjectExprBitfields PseudoObjectExprBits;
// C++ Expressions
CXXOperatorCallExprBitfields CXXOperatorCallExprBits;
CXXBoolLiteralExprBitfields CXXBoolLiteralExprBits;
CXXNullPtrLiteralExprBitfields CXXNullPtrLiteralExprBits;
CXXThisExprBitfields CXXThisExprBits;
CXXThrowExprBitfields CXXThrowExprBits;
CXXDefaultArgExprBitfields CXXDefaultArgExprBits;
CXXDefaultInitExprBitfields CXXDefaultInitExprBits;
CXXScalarValueInitExprBitfields CXXScalarValueInitExprBits;
CXXNewExprBitfields CXXNewExprBits;
CXXDeleteExprBitfields CXXDeleteExprBits;
TypeTraitExprBitfields TypeTraitExprBits;
DependentScopeDeclRefExprBitfields DependentScopeDeclRefExprBits;
CXXConstructExprBitfields CXXConstructExprBits;
ExprWithCleanupsBitfields ExprWithCleanupsBits;
CXXUnresolvedConstructExprBitfields CXXUnresolvedConstructExprBits;
CXXDependentScopeMemberExprBitfields CXXDependentScopeMemberExprBits;
OverloadExprBitfields OverloadExprBits;
UnresolvedLookupExprBitfields UnresolvedLookupExprBits;
UnresolvedMemberExprBitfields UnresolvedMemberExprBits;
CXXNoexceptExprBitfields CXXNoexceptExprBits;
SubstNonTypeTemplateParmExprBitfields SubstNonTypeTemplateParmExprBits;
// C++ Coroutines TS expressions
CoawaitExprBitfields CoawaitBits;
// Obj-C Expressions
ObjCIndirectCopyRestoreExprBitfields ObjCIndirectCopyRestoreExprBits;
// Clang Extensions
OpaqueValueExprBitfields OpaqueValueExprBits;
};
public:
// Only allow allocation of Stmts using the allocator in ASTContext
// or by doing a placement new.
void* operator new(size_t bytes, const ASTContext& C,
unsigned alignment = 8);
void* operator new(size_t bytes, const ASTContext* C,
unsigned alignment = 8) {
return operator new(bytes, *C, alignment);
}
void *operator new(size_t bytes, void *mem) noexcept { return mem; }
void operator delete(void *, const ASTContext &, unsigned) noexcept {}
void operator delete(void *, const ASTContext *, unsigned) noexcept {}
void operator delete(void *, size_t) noexcept {}
void operator delete(void *, void *) noexcept {}
public:
/// A placeholder type used to construct an empty shell of a
/// type, that will be filled in later (e.g., by some
/// de-serialization).
struct EmptyShell {};
protected:
/// Iterator for iterating over Stmt * arrays that contain only Expr *
///
/// This is needed because AST nodes use Stmt* arrays to store
/// references to children (to be compatible with StmtIterator).
struct ExprIterator
: llvm::iterator_adaptor_base<ExprIterator, Stmt **,
std::random_access_iterator_tag, Expr *> {
ExprIterator() : iterator_adaptor_base(nullptr) {}
ExprIterator(Stmt **I) : iterator_adaptor_base(I) {}
reference operator*() const {
assert((*I)->getStmtClass() >= firstExprConstant &&
(*I)->getStmtClass() <= lastExprConstant);
return *reinterpret_cast<Expr **>(I);
}
};
/// Const iterator for iterating over Stmt * arrays that contain only Expr *
struct ConstExprIterator
: llvm::iterator_adaptor_base<ConstExprIterator, const Stmt *const *,
std::random_access_iterator_tag,
const Expr *const> {
ConstExprIterator() : iterator_adaptor_base(nullptr) {}
ConstExprIterator(const Stmt *const *I) : iterator_adaptor_base(I) {}
reference operator*() const {
assert((*I)->getStmtClass() >= firstExprConstant &&
(*I)->getStmtClass() <= lastExprConstant);
return *reinterpret_cast<const Expr *const *>(I);
}
};
private:
/// Whether statistic collection is enabled.
static bool StatisticsEnabled;
protected:
/// Construct an empty statement.
explicit Stmt(StmtClass SC, EmptyShell) : Stmt(SC) {}
public:
Stmt(StmtClass SC) {
static_assert(sizeof(*this) <= 8,
"changing bitfields changed sizeof(Stmt)");
static_assert(sizeof(*this) % alignof(void *) == 0,
"Insufficient alignment!");
StmtBits.sClass = SC;
if (StatisticsEnabled) Stmt::addStmtClass(SC);
}
StmtClass getStmtClass() const {
return static_cast<StmtClass>(StmtBits.sClass);
}
const char *getStmtClassName() const;
/// SourceLocation tokens are not useful in isolation - they are low level
/// value objects created/interpreted by SourceManager. We assume AST
/// clients will have a pointer to the respective SourceManager.
SourceRange getSourceRange() const LLVM_READONLY;
SourceLocation getBeginLoc() const LLVM_READONLY;
SourceLocation getEndLoc() const LLVM_READONLY;
// global temp stats (until we have a per-module visitor)
static void addStmtClass(const StmtClass s);
static void EnableStatistics();
static void PrintStats();
/// Dumps the specified AST fragment and all subtrees to
/// \c llvm::errs().
void dump() const;
void dump(SourceManager &SM) const;
void dump(raw_ostream &OS, SourceManager &SM) const;
void dump(raw_ostream &OS) const;
/// \return Unique reproducible object identifier
int64_t getID(const ASTContext &Context) const;
/// dumpColor - same as dump(), but forces color highlighting.
void dumpColor() const;
/// dumpPretty/printPretty - These two methods do a "pretty print" of the AST
/// back to its original source language syntax.
void dumpPretty(const ASTContext &Context) const;
void printPretty(raw_ostream &OS, PrinterHelper *Helper,
const PrintingPolicy &Policy, unsigned Indentation = 0,
StringRef NewlineSymbol = "\n",
const ASTContext *Context = nullptr) const;
/// viewAST - Visualize an AST rooted at this Stmt* using GraphViz. Only
/// works on systems with GraphViz (Mac OS X) or dot+gv installed.
void viewAST() const;
/// Skip past any implicit AST nodes which might surround this
/// statement, such as ExprWithCleanups or ImplicitCastExpr nodes.
Stmt *IgnoreImplicit();
const Stmt *IgnoreImplicit() const {
return const_cast<Stmt *>(this)->IgnoreImplicit();
}
/// Skip no-op (attributed, compound) container stmts and skip captured
/// stmt at the top, if \a IgnoreCaptured is true.
Stmt *IgnoreContainers(bool IgnoreCaptured = false);
const Stmt *IgnoreContainers(bool IgnoreCaptured = false) const {
return const_cast<Stmt *>(this)->IgnoreContainers(IgnoreCaptured);
}
const Stmt *stripLabelLikeStatements() const;
Stmt *stripLabelLikeStatements() {
return const_cast<Stmt*>(
const_cast<const Stmt*>(this)->stripLabelLikeStatements());
}
/// Child Iterators: All subclasses must implement 'children'
/// to permit easy iteration over the substatements/subexpessions of an
/// AST node. This permits easy iteration over all nodes in the AST.
using child_iterator = StmtIterator;
using const_child_iterator = ConstStmtIterator;
using child_range = llvm::iterator_range<child_iterator>;
using const_child_range = llvm::iterator_range<const_child_iterator>;
child_range children();
const_child_range children() const {
auto Children = const_cast<Stmt *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_iterator child_begin() { return children().begin(); }
child_iterator child_end() { return children().end(); }
const_child_iterator child_begin() const { return children().begin(); }
const_child_iterator child_end() const { return children().end(); }
/// Produce a unique representation of the given statement.
///
/// \param ID once the profiling operation is complete, will contain
/// the unique representation of the given statement.
///
/// \param Context the AST context in which the statement resides
///
/// \param Canonical whether the profile should be based on the canonical
/// representation of this statement (e.g., where non-type template
/// parameters are identified by index/level rather than their
/// declaration pointers) or the exact representation of the statement as
/// written in the source.
void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
bool Canonical) const;
/// Calculate a unique representation for a statement that is
/// stable across compiler invocations.
///
/// \param ID profile information will be stored in ID.
///
/// \param Hash an ODRHash object which will be called where pointers would
/// have been used in the Profile function.
void ProcessODRHash(llvm::FoldingSetNodeID &ID, ODRHash& Hash) const;
};
/// DeclStmt - Adaptor class for mixing declarations with statements and
/// expressions. For example, CompoundStmt mixes statements, expressions
/// and declarations (variables, types). Another example is ForStmt, where
/// the first statement can be an expression or a declaration.
class DeclStmt : public Stmt {
DeclGroupRef DG;
SourceLocation StartLoc, EndLoc;
public:
DeclStmt(DeclGroupRef dg, SourceLocation startLoc, SourceLocation endLoc)
: Stmt(DeclStmtClass), DG(dg), StartLoc(startLoc), EndLoc(endLoc) {}
/// Build an empty declaration statement.
explicit DeclStmt(EmptyShell Empty) : Stmt(DeclStmtClass, Empty) {}
/// isSingleDecl - This method returns true if this DeclStmt refers
/// to a single Decl.
bool isSingleDecl() const { return DG.isSingleDecl(); }
const Decl *getSingleDecl() const { return DG.getSingleDecl(); }
Decl *getSingleDecl() { return DG.getSingleDecl(); }
const DeclGroupRef getDeclGroup() const { return DG; }
DeclGroupRef getDeclGroup() { return DG; }
void setDeclGroup(DeclGroupRef DGR) { DG = DGR; }
void setStartLoc(SourceLocation L) { StartLoc = L; }
SourceLocation getEndLoc() const { return EndLoc; }
void setEndLoc(SourceLocation L) { EndLoc = L; }
SourceLocation getBeginLoc() const LLVM_READONLY { return StartLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == DeclStmtClass;
}
// Iterators over subexpressions.
child_range children() {
return child_range(child_iterator(DG.begin(), DG.end()),
child_iterator(DG.end(), DG.end()));
}
using decl_iterator = DeclGroupRef::iterator;
using const_decl_iterator = DeclGroupRef::const_iterator;
using decl_range = llvm::iterator_range<decl_iterator>;
using decl_const_range = llvm::iterator_range<const_decl_iterator>;
decl_range decls() { return decl_range(decl_begin(), decl_end()); }
decl_const_range decls() const {
return decl_const_range(decl_begin(), decl_end());
}
decl_iterator decl_begin() { return DG.begin(); }
decl_iterator decl_end() { return DG.end(); }
const_decl_iterator decl_begin() const { return DG.begin(); }
const_decl_iterator decl_end() const { return DG.end(); }
using reverse_decl_iterator = std::reverse_iterator<decl_iterator>;
reverse_decl_iterator decl_rbegin() {
return reverse_decl_iterator(decl_end());
}
reverse_decl_iterator decl_rend() {
return reverse_decl_iterator(decl_begin());
}
};
/// NullStmt - This is the null statement ";": C99 6.8.3p3.
///
class NullStmt : public Stmt {
public:
NullStmt(SourceLocation L, bool hasLeadingEmptyMacro = false)
: Stmt(NullStmtClass) {
NullStmtBits.HasLeadingEmptyMacro = hasLeadingEmptyMacro;
setSemiLoc(L);
}
/// Build an empty null statement.
explicit NullStmt(EmptyShell Empty) : Stmt(NullStmtClass, Empty) {}
SourceLocation getSemiLoc() const { return NullStmtBits.SemiLoc; }
void setSemiLoc(SourceLocation L) { NullStmtBits.SemiLoc = L; }
bool hasLeadingEmptyMacro() const {
return NullStmtBits.HasLeadingEmptyMacro;
}
SourceLocation getBeginLoc() const { return getSemiLoc(); }
SourceLocation getEndLoc() const { return getSemiLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == NullStmtClass;
}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
};
/// CompoundStmt - This represents a group of statements like { stmt stmt }.
class CompoundStmt final : public Stmt,
private llvm::TrailingObjects<CompoundStmt, Stmt *> {
friend class ASTStmtReader;
friend TrailingObjects;
/// The location of the closing "}". LBraceLoc is stored in CompoundStmtBits.
SourceLocation RBraceLoc;
CompoundStmt(ArrayRef<Stmt *> Stmts, SourceLocation LB, SourceLocation RB);
explicit CompoundStmt(EmptyShell Empty) : Stmt(CompoundStmtClass, Empty) {}
void setStmts(ArrayRef<Stmt *> Stmts);
public:
static CompoundStmt *Create(const ASTContext &C, ArrayRef<Stmt *> Stmts,
SourceLocation LB, SourceLocation RB);
// Build an empty compound statement with a location.
explicit CompoundStmt(SourceLocation Loc)
: Stmt(CompoundStmtClass), RBraceLoc(Loc) {
CompoundStmtBits.NumStmts = 0;
CompoundStmtBits.LBraceLoc = Loc;
}
// Build an empty compound statement.
static CompoundStmt *CreateEmpty(const ASTContext &C, unsigned NumStmts);
bool body_empty() const { return CompoundStmtBits.NumStmts == 0; }
unsigned size() const { return CompoundStmtBits.NumStmts; }
using body_iterator = Stmt **;
using body_range = llvm::iterator_range<body_iterator>;
body_range body() { return body_range(body_begin(), body_end()); }
body_iterator body_begin() { return getTrailingObjects<Stmt *>(); }
body_iterator body_end() { return body_begin() + size(); }
Stmt *body_front() { return !body_empty() ? body_begin()[0] : nullptr; }
Stmt *body_back() {
return !body_empty() ? body_begin()[size() - 1] : nullptr;
}
void setLastStmt(Stmt *S) {
assert(!body_empty() && "setLastStmt");
body_begin()[size() - 1] = S;
}
using const_body_iterator = Stmt *const *;
using body_const_range = llvm::iterator_range<const_body_iterator>;
body_const_range body() const {
return body_const_range(body_begin(), body_end());
}
const_body_iterator body_begin() const {
return getTrailingObjects<Stmt *>();
}
const_body_iterator body_end() const { return body_begin() + size(); }
const Stmt *body_front() const {
return !body_empty() ? body_begin()[0] : nullptr;
}
const Stmt *body_back() const {
return !body_empty() ? body_begin()[size() - 1] : nullptr;
}
using reverse_body_iterator = std::reverse_iterator<body_iterator>;
reverse_body_iterator body_rbegin() {
return reverse_body_iterator(body_end());
}
reverse_body_iterator body_rend() {
return reverse_body_iterator(body_begin());
}
using const_reverse_body_iterator =
std::reverse_iterator<const_body_iterator>;
const_reverse_body_iterator body_rbegin() const {
return const_reverse_body_iterator(body_end());
}
const_reverse_body_iterator body_rend() const {
return const_reverse_body_iterator(body_begin());
}
SourceLocation getBeginLoc() const { return CompoundStmtBits.LBraceLoc; }
SourceLocation getEndLoc() const { return RBraceLoc; }
SourceLocation getLBracLoc() const { return CompoundStmtBits.LBraceLoc; }
SourceLocation getRBracLoc() const { return RBraceLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == CompoundStmtClass;
}
// Iterators
child_range children() { return child_range(body_begin(), body_end()); }
const_child_range children() const {
return const_child_range(body_begin(), body_end());
}
};
// SwitchCase is the base class for CaseStmt and DefaultStmt,
class SwitchCase : public Stmt {
protected:
/// The location of the ":".
SourceLocation ColonLoc;
// The location of the "case" or "default" keyword. Stored in SwitchCaseBits.
// SourceLocation KeywordLoc;
/// A pointer to the following CaseStmt or DefaultStmt class,
/// used by SwitchStmt.
SwitchCase *NextSwitchCase = nullptr;
SwitchCase(StmtClass SC, SourceLocation KWLoc, SourceLocation ColonLoc)
: Stmt(SC), ColonLoc(ColonLoc) {
setKeywordLoc(KWLoc);
}
SwitchCase(StmtClass SC, EmptyShell) : Stmt(SC) {}
public:
const SwitchCase *getNextSwitchCase() const { return NextSwitchCase; }
SwitchCase *getNextSwitchCase() { return NextSwitchCase; }
void setNextSwitchCase(SwitchCase *SC) { NextSwitchCase = SC; }
SourceLocation getKeywordLoc() const { return SwitchCaseBits.KeywordLoc; }
void setKeywordLoc(SourceLocation L) { SwitchCaseBits.KeywordLoc = L; }
SourceLocation getColonLoc() const { return ColonLoc; }
void setColonLoc(SourceLocation L) { ColonLoc = L; }
inline Stmt *getSubStmt();
const Stmt *getSubStmt() const {
return const_cast<SwitchCase *>(this)->getSubStmt();
}
SourceLocation getBeginLoc() const { return getKeywordLoc(); }
inline SourceLocation getEndLoc() const LLVM_READONLY;
static bool classof(const Stmt *T) {
return T->getStmtClass() == CaseStmtClass ||
T->getStmtClass() == DefaultStmtClass;
}
};
/// CaseStmt - Represent a case statement. It can optionally be a GNU case
/// statement of the form LHS ... RHS representing a range of cases.
class CaseStmt final
: public SwitchCase,
private llvm::TrailingObjects<CaseStmt, Stmt *, SourceLocation> {
friend TrailingObjects;
// CaseStmt is followed by several trailing objects, some of which optional.
// Note that it would be more convenient to put the optional trailing objects
// at the end but this would impact children().
// The trailing objects are in order:
//
// * A "Stmt *" for the LHS of the case statement. Always present.
//
// * A "Stmt *" for the RHS of the case statement. This is a GNU extension
// which allow ranges in cases statement of the form LHS ... RHS.
// Present if and only if caseStmtIsGNURange() is true.
//
// * A "Stmt *" for the substatement of the case statement. Always present.
//
// * A SourceLocation for the location of the ... if this is a case statement
// with a range. Present if and only if caseStmtIsGNURange() is true.
enum { LhsOffset = 0, SubStmtOffsetFromRhs = 1 };
enum { NumMandatoryStmtPtr = 2 };
unsigned numTrailingObjects(OverloadToken<Stmt *>) const {
return NumMandatoryStmtPtr + caseStmtIsGNURange();
}
unsigned numTrailingObjects(OverloadToken<SourceLocation>) const {
return caseStmtIsGNURange();
}
unsigned lhsOffset() const { return LhsOffset; }
unsigned rhsOffset() const { return LhsOffset + caseStmtIsGNURange(); }
unsigned subStmtOffset() const { return rhsOffset() + SubStmtOffsetFromRhs; }
/// Build a case statement assuming that the storage for the
/// trailing objects has been properly allocated.
CaseStmt(Expr *lhs, Expr *rhs, SourceLocation caseLoc,
SourceLocation ellipsisLoc, SourceLocation colonLoc)
: SwitchCase(CaseStmtClass, caseLoc, colonLoc) {
// Handle GNU case statements of the form LHS ... RHS.
bool IsGNURange = rhs != nullptr;
SwitchCaseBits.CaseStmtIsGNURange = IsGNURange;
setLHS(lhs);
setSubStmt(nullptr);
if (IsGNURange) {
setRHS(rhs);
setEllipsisLoc(ellipsisLoc);
}
}
/// Build an empty switch case statement.
explicit CaseStmt(EmptyShell Empty, bool CaseStmtIsGNURange)
: SwitchCase(CaseStmtClass, Empty) {
SwitchCaseBits.CaseStmtIsGNURange = CaseStmtIsGNURange;
}
public:
/// Build a case statement.
static CaseStmt *Create(const ASTContext &Ctx, Expr *lhs, Expr *rhs,
SourceLocation caseLoc, SourceLocation ellipsisLoc,
SourceLocation colonLoc);
/// Build an empty case statement.
static CaseStmt *CreateEmpty(const ASTContext &Ctx, bool CaseStmtIsGNURange);
/// True if this case statement is of the form case LHS ... RHS, which
/// is a GNU extension. In this case the RHS can be obtained with getRHS()
/// and the location of the ellipsis can be obtained with getEllipsisLoc().
bool caseStmtIsGNURange() const { return SwitchCaseBits.CaseStmtIsGNURange; }
SourceLocation getCaseLoc() const { return getKeywordLoc(); }
void setCaseLoc(SourceLocation L) { setKeywordLoc(L); }
/// Get the location of the ... in a case statement of the form LHS ... RHS.
SourceLocation getEllipsisLoc() const {
return caseStmtIsGNURange() ? *getTrailingObjects<SourceLocation>()
: SourceLocation();
}
/// Set the location of the ... in a case statement of the form LHS ... RHS.
/// Assert that this case statement is of this form.
void setEllipsisLoc(SourceLocation L) {
assert(
caseStmtIsGNURange() &&
"setEllipsisLoc but this is not a case stmt of the form LHS ... RHS!");
*getTrailingObjects<SourceLocation>() = L;
}
Expr *getLHS() {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[lhsOffset()]);
}
const Expr *getLHS() const {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[lhsOffset()]);
}
void setLHS(Expr *Val) {
getTrailingObjects<Stmt *>()[lhsOffset()] = reinterpret_cast<Stmt *>(Val);
}
Expr *getRHS() {
return caseStmtIsGNURange() ? reinterpret_cast<Expr *>(
getTrailingObjects<Stmt *>()[rhsOffset()])
: nullptr;
}
const Expr *getRHS() const {
return caseStmtIsGNURange() ? reinterpret_cast<Expr *>(
getTrailingObjects<Stmt *>()[rhsOffset()])
: nullptr;
}
void setRHS(Expr *Val) {
assert(caseStmtIsGNURange() &&
"setRHS but this is not a case stmt of the form LHS ... RHS!");
getTrailingObjects<Stmt *>()[rhsOffset()] = reinterpret_cast<Stmt *>(Val);
}
Stmt *getSubStmt() { return getTrailingObjects<Stmt *>()[subStmtOffset()]; }
const Stmt *getSubStmt() const {
return getTrailingObjects<Stmt *>()[subStmtOffset()];
}
void setSubStmt(Stmt *S) {
getTrailingObjects<Stmt *>()[subStmtOffset()] = S;
}
SourceLocation getBeginLoc() const { return getKeywordLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
// Handle deeply nested case statements with iteration instead of recursion.
const CaseStmt *CS = this;
while (const auto *CS2 = dyn_cast<CaseStmt>(CS->getSubStmt()))
CS = CS2;
return CS->getSubStmt()->getEndLoc();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == CaseStmtClass;
}
// Iterators
child_range children() {
return child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
};
class DefaultStmt : public SwitchCase {
Stmt *SubStmt;
public:
DefaultStmt(SourceLocation DL, SourceLocation CL, Stmt *substmt)
: SwitchCase(DefaultStmtClass, DL, CL), SubStmt(substmt) {}
/// Build an empty default statement.
explicit DefaultStmt(EmptyShell Empty)
: SwitchCase(DefaultStmtClass, Empty) {}
Stmt *getSubStmt() { return SubStmt; }
const Stmt *getSubStmt() const { return SubStmt; }
void setSubStmt(Stmt *S) { SubStmt = S; }
SourceLocation getDefaultLoc() const { return getKeywordLoc(); }
void setDefaultLoc(SourceLocation L) { setKeywordLoc(L); }
SourceLocation getBeginLoc() const { return getKeywordLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
return SubStmt->getEndLoc();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == DefaultStmtClass;
}
// Iterators
child_range children() { return child_range(&SubStmt, &SubStmt + 1); }
};
SourceLocation SwitchCase::getEndLoc() const {
if (const auto *CS = dyn_cast<CaseStmt>(this))
return CS->getEndLoc();
else if (const auto *DS = dyn_cast<DefaultStmt>(this))
return DS->getEndLoc();
llvm_unreachable("SwitchCase is neither a CaseStmt nor a DefaultStmt!");
}
Stmt *SwitchCase::getSubStmt() {
if (auto *CS = dyn_cast<CaseStmt>(this))
return CS->getSubStmt();
else if (auto *DS = dyn_cast<DefaultStmt>(this))
return DS->getSubStmt();
llvm_unreachable("SwitchCase is neither a CaseStmt nor a DefaultStmt!");
}
/// LabelStmt - Represents a label, which has a substatement. For example:
/// foo: return;
class LabelStmt : public Stmt {
LabelDecl *TheDecl;
Stmt *SubStmt;
public:
/// Build a label statement.
LabelStmt(SourceLocation IL, LabelDecl *D, Stmt *substmt)
: Stmt(LabelStmtClass), TheDecl(D), SubStmt(substmt) {
setIdentLoc(IL);
}
/// Build an empty label statement.
explicit LabelStmt(EmptyShell Empty) : Stmt(LabelStmtClass, Empty) {}
SourceLocation getIdentLoc() const { return LabelStmtBits.IdentLoc; }
void setIdentLoc(SourceLocation L) { LabelStmtBits.IdentLoc = L; }
LabelDecl *getDecl() const { return TheDecl; }
void setDecl(LabelDecl *D) { TheDecl = D; }
const char *getName() const;
Stmt *getSubStmt() { return SubStmt; }
const Stmt *getSubStmt() const { return SubStmt; }
void setSubStmt(Stmt *SS) { SubStmt = SS; }
SourceLocation getBeginLoc() const { return getIdentLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY { return SubStmt->getEndLoc();}
child_range children() { return child_range(&SubStmt, &SubStmt + 1); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == LabelStmtClass;
}
};
/// Represents an attribute applied to a statement.
///
/// Represents an attribute applied to a statement. For example:
/// [[omp::for(...)]] for (...) { ... }
class AttributedStmt final
: public Stmt,
private llvm::TrailingObjects<AttributedStmt, const Attr *> {
friend class ASTStmtReader;
friend TrailingObjects;
Stmt *SubStmt;
AttributedStmt(SourceLocation Loc, ArrayRef<const Attr *> Attrs,
Stmt *SubStmt)
: Stmt(AttributedStmtClass), SubStmt(SubStmt) {
AttributedStmtBits.NumAttrs = Attrs.size();
AttributedStmtBits.AttrLoc = Loc;
std::copy(Attrs.begin(), Attrs.end(), getAttrArrayPtr());
}
explicit AttributedStmt(EmptyShell Empty, unsigned NumAttrs)
: Stmt(AttributedStmtClass, Empty) {
AttributedStmtBits.NumAttrs = NumAttrs;
AttributedStmtBits.AttrLoc = SourceLocation{};
std::fill_n(getAttrArrayPtr(), NumAttrs, nullptr);
}
const Attr *const *getAttrArrayPtr() const {
return getTrailingObjects<const Attr *>();
}
const Attr **getAttrArrayPtr() { return getTrailingObjects<const Attr *>(); }
public:
static AttributedStmt *Create(const ASTContext &C, SourceLocation Loc,
ArrayRef<const Attr *> Attrs, Stmt *SubStmt);
// Build an empty attributed statement.
static AttributedStmt *CreateEmpty(const ASTContext &C, unsigned NumAttrs);
SourceLocation getAttrLoc() const { return AttributedStmtBits.AttrLoc; }
ArrayRef<const Attr *> getAttrs() const {
return llvm::makeArrayRef(getAttrArrayPtr(), AttributedStmtBits.NumAttrs);
}
Stmt *getSubStmt() { return SubStmt; }
const Stmt *getSubStmt() const { return SubStmt; }
SourceLocation getBeginLoc() const { return getAttrLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY { return SubStmt->getEndLoc();}
child_range children() { return child_range(&SubStmt, &SubStmt + 1); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == AttributedStmtClass;
}
};
/// IfStmt - This represents an if/then/else.
class IfStmt final
: public Stmt,
private llvm::TrailingObjects<IfStmt, Stmt *, SourceLocation> {
friend TrailingObjects;
// IfStmt is followed by several trailing objects, some of which optional.
// Note that it would be more convenient to put the optional trailing
// objects at then end but this would change the order of the children.
// The trailing objects are in order:
//
// * A "Stmt *" for the init statement.
// Present if and only if hasInitStorage().
//
// * A "Stmt *" for the condition variable.
// Present if and only if hasVarStorage(). This is in fact a "DeclStmt *".
//
// * A "Stmt *" for the condition.
// Always present. This is in fact a "Expr *".
//
// * A "Stmt *" for the then statement.
// Always present.
//
// * A "Stmt *" for the else statement.
// Present if and only if hasElseStorage().
//
// * A "SourceLocation" for the location of the "else".
// Present if and only if hasElseStorage().
enum { InitOffset = 0, ThenOffsetFromCond = 1, ElseOffsetFromCond = 2 };
enum { NumMandatoryStmtPtr = 2 };
unsigned numTrailingObjects(OverloadToken<Stmt *>) const {
return NumMandatoryStmtPtr + hasElseStorage() + hasVarStorage() +
hasInitStorage();
}
unsigned numTrailingObjects(OverloadToken<SourceLocation>) const {
return hasElseStorage();
}
unsigned initOffset() const { return InitOffset; }
unsigned varOffset() const { return InitOffset + hasInitStorage(); }
unsigned condOffset() const {
return InitOffset + hasInitStorage() + hasVarStorage();
}
unsigned thenOffset() const { return condOffset() + ThenOffsetFromCond; }
unsigned elseOffset() const { return condOffset() + ElseOffsetFromCond; }
/// Build an if/then/else statement.
IfStmt(const ASTContext &Ctx, SourceLocation IL, bool IsConstexpr, Stmt *Init,
VarDecl *Var, Expr *Cond, Stmt *Then, SourceLocation EL, Stmt *Else);
/// Build an empty if/then/else statement.
explicit IfStmt(EmptyShell Empty, bool HasElse, bool HasVar, bool HasInit);
public:
/// Create an IfStmt.
static IfStmt *Create(const ASTContext &Ctx, SourceLocation IL,
bool IsConstexpr, Stmt *Init, VarDecl *Var, Expr *Cond,
Stmt *Then, SourceLocation EL = SourceLocation(),
Stmt *Else = nullptr);
/// Create an empty IfStmt optionally with storage for an else statement,
/// condition variable and init expression.
static IfStmt *CreateEmpty(const ASTContext &Ctx, bool HasElse, bool HasVar,
bool HasInit);
/// True if this IfStmt has the storage for an init statement.
bool hasInitStorage() const { return IfStmtBits.HasInit; }
/// True if this IfStmt has storage for a variable declaration.
bool hasVarStorage() const { return IfStmtBits.HasVar; }
/// True if this IfStmt has storage for an else statement.
bool hasElseStorage() const { return IfStmtBits.HasElse; }
Expr *getCond() {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
const Expr *getCond() const {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
void setCond(Expr *Cond) {
getTrailingObjects<Stmt *>()[condOffset()] = reinterpret_cast<Stmt *>(Cond);
}
Stmt *getThen() { return getTrailingObjects<Stmt *>()[thenOffset()]; }
const Stmt *getThen() const {
return getTrailingObjects<Stmt *>()[thenOffset()];
}
void setThen(Stmt *Then) {
getTrailingObjects<Stmt *>()[thenOffset()] = Then;
}
Stmt *getElse() {
return hasElseStorage() ? getTrailingObjects<Stmt *>()[elseOffset()]
: nullptr;
}
const Stmt *getElse() const {
return hasElseStorage() ? getTrailingObjects<Stmt *>()[elseOffset()]
: nullptr;
}
void setElse(Stmt *Else) {
assert(hasElseStorage() &&
"This if statement has no storage for an else statement!");
getTrailingObjects<Stmt *>()[elseOffset()] = Else;
}
/// Retrieve the variable declared in this "if" statement, if any.
///
/// In the following example, "x" is the condition variable.
/// \code
/// if (int x = foo()) {
/// printf("x is %d", x);
/// }
/// \endcode
VarDecl *getConditionVariable();
const VarDecl *getConditionVariable() const {
return const_cast<IfStmt *>(this)->getConditionVariable();
}
/// Set the condition variable for this if statement.
/// The if statement must have storage for the condition variable.
void setConditionVariable(const ASTContext &Ctx, VarDecl *V);
/// If this IfStmt has a condition variable, return the faux DeclStmt
/// associated with the creation of that condition variable.
DeclStmt *getConditionVariableDeclStmt() {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
const DeclStmt *getConditionVariableDeclStmt() const {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
Stmt *getInit() {
return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()]
: nullptr;
}
const Stmt *getInit() const {
return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()]
: nullptr;
}
void setInit(Stmt *Init) {
assert(hasInitStorage() &&
"This if statement has no storage for an init statement!");
getTrailingObjects<Stmt *>()[initOffset()] = Init;
}
SourceLocation getIfLoc() const { return IfStmtBits.IfLoc; }
void setIfLoc(SourceLocation IfLoc) { IfStmtBits.IfLoc = IfLoc; }
SourceLocation getElseLoc() const {
return hasElseStorage() ? *getTrailingObjects<SourceLocation>()
: SourceLocation();
}
void setElseLoc(SourceLocation ElseLoc) {
assert(hasElseStorage() &&
"This if statement has no storage for an else statement!");
*getTrailingObjects<SourceLocation>() = ElseLoc;
}
bool isConstexpr() const { return IfStmtBits.IsConstexpr; }
void setConstexpr(bool C) { IfStmtBits.IsConstexpr = C; }
bool isObjCAvailabilityCheck() const;
SourceLocation getBeginLoc() const { return getIfLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
if (getElse())
return getElse()->getEndLoc();
return getThen()->getEndLoc();
}
// Iterators over subexpressions. The iterators will include iterating
// over the initialization expression referenced by the condition variable.
child_range children() {
return child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == IfStmtClass;
}
};
/// SwitchStmt - This represents a 'switch' stmt.
class SwitchStmt final : public Stmt,
private llvm::TrailingObjects<SwitchStmt, Stmt *> {
friend TrailingObjects;
/// Points to a linked list of case and default statements.
SwitchCase *FirstCase;
// SwitchStmt is followed by several trailing objects,
// some of which optional. Note that it would be more convenient to
// put the optional trailing objects at the end but this would change
// the order in children().
// The trailing objects are in order:
//
// * A "Stmt *" for the init statement.
// Present if and only if hasInitStorage().
//
// * A "Stmt *" for the condition variable.
// Present if and only if hasVarStorage(). This is in fact a "DeclStmt *".
//
// * A "Stmt *" for the condition.
// Always present. This is in fact an "Expr *".
//
// * A "Stmt *" for the body.
// Always present.
enum { InitOffset = 0, BodyOffsetFromCond = 1 };
enum { NumMandatoryStmtPtr = 2 };
unsigned numTrailingObjects(OverloadToken<Stmt *>) const {
return NumMandatoryStmtPtr + hasInitStorage() + hasVarStorage();
}
unsigned initOffset() const { return InitOffset; }
unsigned varOffset() const { return InitOffset + hasInitStorage(); }
unsigned condOffset() const {
return InitOffset + hasInitStorage() + hasVarStorage();
}
unsigned bodyOffset() const { return condOffset() + BodyOffsetFromCond; }
/// Build a switch statement.
SwitchStmt(const ASTContext &Ctx, Stmt *Init, VarDecl *Var, Expr *Cond);
/// Build a empty switch statement.
explicit SwitchStmt(EmptyShell Empty, bool HasInit, bool HasVar);
public:
/// Create a switch statement.
static SwitchStmt *Create(const ASTContext &Ctx, Stmt *Init, VarDecl *Var,
Expr *Cond);
/// Create an empty switch statement optionally with storage for
/// an init expression and a condition variable.
static SwitchStmt *CreateEmpty(const ASTContext &Ctx, bool HasInit,
bool HasVar);
/// True if this SwitchStmt has storage for an init statement.
bool hasInitStorage() const { return SwitchStmtBits.HasInit; }
/// True if this SwitchStmt has storage for a condition variable.
bool hasVarStorage() const { return SwitchStmtBits.HasVar; }
Expr *getCond() {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
const Expr *getCond() const {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
void setCond(Expr *Cond) {
getTrailingObjects<Stmt *>()[condOffset()] = reinterpret_cast<Stmt *>(Cond);
}
Stmt *getBody() { return getTrailingObjects<Stmt *>()[bodyOffset()]; }
const Stmt *getBody() const {
return getTrailingObjects<Stmt *>()[bodyOffset()];
}
void setBody(Stmt *Body) {
getTrailingObjects<Stmt *>()[bodyOffset()] = Body;
}
Stmt *getInit() {
return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()]
: nullptr;
}
const Stmt *getInit() const {
return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()]
: nullptr;
}
void setInit(Stmt *Init) {
assert(hasInitStorage() &&
"This switch statement has no storage for an init statement!");
getTrailingObjects<Stmt *>()[initOffset()] = Init;
}
/// Retrieve the variable declared in this "switch" statement, if any.
///
/// In the following example, "x" is the condition variable.
/// \code
/// switch (int x = foo()) {
/// case 0: break;
/// // ...
/// }
/// \endcode
VarDecl *getConditionVariable();
const VarDecl *getConditionVariable() const {
return const_cast<SwitchStmt *>(this)->getConditionVariable();
}
/// Set the condition variable in this switch statement.
/// The switch statement must have storage for it.
void setConditionVariable(const ASTContext &Ctx, VarDecl *VD);
/// If this SwitchStmt has a condition variable, return the faux DeclStmt
/// associated with the creation of that condition variable.
DeclStmt *getConditionVariableDeclStmt() {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
const DeclStmt *getConditionVariableDeclStmt() const {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
SwitchCase *getSwitchCaseList() { return FirstCase; }
const SwitchCase *getSwitchCaseList() const { return FirstCase; }
void setSwitchCaseList(SwitchCase *SC) { FirstCase = SC; }
SourceLocation getSwitchLoc() const { return SwitchStmtBits.SwitchLoc; }
void setSwitchLoc(SourceLocation L) { SwitchStmtBits.SwitchLoc = L; }
void setBody(Stmt *S, SourceLocation SL) {
setBody(S);
setSwitchLoc(SL);
}
void addSwitchCase(SwitchCase *SC) {
assert(!SC->getNextSwitchCase() &&
"case/default already added to a switch");
SC->setNextSwitchCase(FirstCase);
FirstCase = SC;
}
/// Set a flag in the SwitchStmt indicating that if the 'switch (X)' is a
/// switch over an enum value then all cases have been explicitly covered.
void setAllEnumCasesCovered() { SwitchStmtBits.AllEnumCasesCovered = true; }
/// Returns true if the SwitchStmt is a switch of an enum value and all cases
/// have been explicitly covered.
bool isAllEnumCasesCovered() const {
return SwitchStmtBits.AllEnumCasesCovered;
}
SourceLocation getBeginLoc() const { return getSwitchLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
return getBody() ? getBody()->getEndLoc()
: reinterpret_cast<const Stmt *>(getCond())->getEndLoc();
}
// Iterators
child_range children() {
return child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == SwitchStmtClass;
}
};
/// WhileStmt - This represents a 'while' stmt.
class WhileStmt final : public Stmt,
private llvm::TrailingObjects<WhileStmt, Stmt *> {
friend TrailingObjects;
// WhileStmt is followed by several trailing objects,
// some of which optional. Note that it would be more
// convenient to put the optional trailing object at the end
// but this would affect children().
// The trailing objects are in order:
//
// * A "Stmt *" for the condition variable.
// Present if and only if hasVarStorage(). This is in fact a "DeclStmt *".
//
// * A "Stmt *" for the condition.
// Always present. This is in fact an "Expr *".
//
// * A "Stmt *" for the body.
// Always present.
//
enum { VarOffset = 0, BodyOffsetFromCond = 1 };
enum { NumMandatoryStmtPtr = 2 };
unsigned varOffset() const { return VarOffset; }
unsigned condOffset() const { return VarOffset + hasVarStorage(); }
unsigned bodyOffset() const { return condOffset() + BodyOffsetFromCond; }
unsigned numTrailingObjects(OverloadToken<Stmt *>) const {
return NumMandatoryStmtPtr + hasVarStorage();
}
/// Build a while statement.
WhileStmt(const ASTContext &Ctx, VarDecl *Var, Expr *Cond, Stmt *Body,
SourceLocation WL);
/// Build an empty while statement.
explicit WhileStmt(EmptyShell Empty, bool HasVar);
public:
/// Create a while statement.
static WhileStmt *Create(const ASTContext &Ctx, VarDecl *Var, Expr *Cond,
Stmt *Body, SourceLocation WL);
/// Create an empty while statement optionally with storage for
/// a condition variable.
static WhileStmt *CreateEmpty(const ASTContext &Ctx, bool HasVar);
/// True if this WhileStmt has storage for a condition variable.
bool hasVarStorage() const { return WhileStmtBits.HasVar; }
Expr *getCond() {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
const Expr *getCond() const {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
void setCond(Expr *Cond) {
getTrailingObjects<Stmt *>()[condOffset()] = reinterpret_cast<Stmt *>(Cond);
}
Stmt *getBody() { return getTrailingObjects<Stmt *>()[bodyOffset()]; }
const Stmt *getBody() const {
return getTrailingObjects<Stmt *>()[bodyOffset()];
}
void setBody(Stmt *Body) {
getTrailingObjects<Stmt *>()[bodyOffset()] = Body;
}
/// Retrieve the variable declared in this "while" statement, if any.
///
/// In the following example, "x" is the condition variable.
/// \code
/// while (int x = random()) {
/// // ...
/// }
/// \endcode
VarDecl *getConditionVariable();
const VarDecl *getConditionVariable() const {
return const_cast<WhileStmt *>(this)->getConditionVariable();
}
/// Set the condition variable of this while statement.
/// The while statement must have storage for it.
void setConditionVariable(const ASTContext &Ctx, VarDecl *V);
/// If this WhileStmt has a condition variable, return the faux DeclStmt
/// associated with the creation of that condition variable.
DeclStmt *getConditionVariableDeclStmt() {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
const DeclStmt *getConditionVariableDeclStmt() const {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
SourceLocation getWhileLoc() const { return WhileStmtBits.WhileLoc; }
void setWhileLoc(SourceLocation L) { WhileStmtBits.WhileLoc = L; }
SourceLocation getBeginLoc() const { return getWhileLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
return getBody()->getEndLoc();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == WhileStmtClass;
}
// Iterators
child_range children() {
return child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
};
/// DoStmt - This represents a 'do/while' stmt.
class DoStmt : public Stmt {
enum { BODY, COND, END_EXPR };
Stmt *SubExprs[END_EXPR];
SourceLocation WhileLoc;
SourceLocation RParenLoc; // Location of final ')' in do stmt condition.
public:
DoStmt(Stmt *Body, Expr *Cond, SourceLocation DL, SourceLocation WL,
SourceLocation RP)
: Stmt(DoStmtClass), WhileLoc(WL), RParenLoc(RP) {
setCond(Cond);
setBody(Body);
setDoLoc(DL);
}
/// Build an empty do-while statement.
explicit DoStmt(EmptyShell Empty) : Stmt(DoStmtClass, Empty) {}
Expr *getCond() { return reinterpret_cast<Expr *>(SubExprs[COND]); }
const Expr *getCond() const {
return reinterpret_cast<Expr *>(SubExprs[COND]);
}
void setCond(Expr *Cond) { SubExprs[COND] = reinterpret_cast<Stmt *>(Cond); }
Stmt *getBody() { return SubExprs[BODY]; }
const Stmt *getBody() const { return SubExprs[BODY]; }
void setBody(Stmt *Body) { SubExprs[BODY] = Body; }
SourceLocation getDoLoc() const { return DoStmtBits.DoLoc; }
void setDoLoc(SourceLocation L) { DoStmtBits.DoLoc = L; }
SourceLocation getWhileLoc() const { return WhileLoc; }
void setWhileLoc(SourceLocation L) { WhileLoc = L; }
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation L) { RParenLoc = L; }
SourceLocation getBeginLoc() const { return getDoLoc(); }
SourceLocation getEndLoc() const { return getRParenLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == DoStmtClass;
}
// Iterators
child_range children() {
return child_range(&SubExprs[0], &SubExprs[0] + END_EXPR);
}
};
/// ForStmt - This represents a 'for (init;cond;inc)' stmt. Note that any of
/// the init/cond/inc parts of the ForStmt will be null if they were not
/// specified in the source.
class ForStmt : public Stmt {
enum { INIT, CONDVAR, COND, INC, BODY, END_EXPR };
Stmt* SubExprs[END_EXPR]; // SubExprs[INIT] is an expression or declstmt.
SourceLocation LParenLoc, RParenLoc;
public:
ForStmt(const ASTContext &C, Stmt *Init, Expr *Cond, VarDecl *condVar,
Expr *Inc, Stmt *Body, SourceLocation FL, SourceLocation LP,
SourceLocation RP);
/// Build an empty for statement.
explicit ForStmt(EmptyShell Empty) : Stmt(ForStmtClass, Empty) {}
Stmt *getInit() { return SubExprs[INIT]; }
/// Retrieve the variable declared in this "for" statement, if any.
///
/// In the following example, "y" is the condition variable.
/// \code
/// for (int x = random(); int y = mangle(x); ++x) {
/// // ...
/// }
/// \endcode
VarDecl *getConditionVariable() const;
void setConditionVariable(const ASTContext &C, VarDecl *V);
/// If this ForStmt has a condition variable, return the faux DeclStmt
/// associated with the creation of that condition variable.
const DeclStmt *getConditionVariableDeclStmt() const {
return reinterpret_cast<DeclStmt*>(SubExprs[CONDVAR]);
}
Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); }
Expr *getInc() { return reinterpret_cast<Expr*>(SubExprs[INC]); }
Stmt *getBody() { return SubExprs[BODY]; }
const Stmt *getInit() const { return SubExprs[INIT]; }
const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);}
const Expr *getInc() const { return reinterpret_cast<Expr*>(SubExprs[INC]); }
const Stmt *getBody() const { return SubExprs[BODY]; }
void setInit(Stmt *S) { SubExprs[INIT] = S; }
void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt*>(E); }
void setInc(Expr *E) { SubExprs[INC] = reinterpret_cast<Stmt*>(E); }
void setBody(Stmt *S) { SubExprs[BODY] = S; }
SourceLocation getForLoc() const { return ForStmtBits.ForLoc; }
void setForLoc(SourceLocation L) { ForStmtBits.ForLoc = L; }
SourceLocation getLParenLoc() const { return LParenLoc; }
void setLParenLoc(SourceLocation L) { LParenLoc = L; }
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation L) { RParenLoc = L; }
SourceLocation getBeginLoc() const { return getForLoc(); }
SourceLocation getEndLoc() const { return getBody()->getEndLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == ForStmtClass;
}
// Iterators
child_range children() {
return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR);
}
};
/// GotoStmt - This represents a direct goto.
class GotoStmt : public Stmt {
LabelDecl *Label;
SourceLocation LabelLoc;
public:
GotoStmt(LabelDecl *label, SourceLocation GL, SourceLocation LL)
: Stmt(GotoStmtClass), Label(label), LabelLoc(LL) {
setGotoLoc(GL);
}
/// Build an empty goto statement.
explicit GotoStmt(EmptyShell Empty) : Stmt(GotoStmtClass, Empty) {}
LabelDecl *getLabel() const { return Label; }
void setLabel(LabelDecl *D) { Label = D; }
SourceLocation getGotoLoc() const { return GotoStmtBits.GotoLoc; }
void setGotoLoc(SourceLocation L) { GotoStmtBits.GotoLoc = L; }
SourceLocation getLabelLoc() const { return LabelLoc; }
void setLabelLoc(SourceLocation L) { LabelLoc = L; }
SourceLocation getBeginLoc() const { return getGotoLoc(); }
SourceLocation getEndLoc() const { return getLabelLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == GotoStmtClass;
}
// Iterators
child_range children() {
return child_range(child_iterator(), child_iterator());
}
};
/// IndirectGotoStmt - This represents an indirect goto.
class IndirectGotoStmt : public Stmt {
SourceLocation StarLoc;
Stmt *Target;
public:
IndirectGotoStmt(SourceLocation gotoLoc, SourceLocation starLoc, Expr *target)
: Stmt(IndirectGotoStmtClass), StarLoc(starLoc) {
setTarget(target);
setGotoLoc(gotoLoc);
}
/// Build an empty indirect goto statement.
explicit IndirectGotoStmt(EmptyShell Empty)
: Stmt(IndirectGotoStmtClass, Empty) {}
void setGotoLoc(SourceLocation L) { GotoStmtBits.GotoLoc = L; }
SourceLocation getGotoLoc() const { return GotoStmtBits.GotoLoc; }
void setStarLoc(SourceLocation L) { StarLoc = L; }
SourceLocation getStarLoc() const { return StarLoc; }
Expr *getTarget() { return reinterpret_cast<Expr *>(Target); }
const Expr *getTarget() const {
return reinterpret_cast<const Expr *>(Target);
}
void setTarget(Expr *E) { Target = reinterpret_cast<Stmt *>(E); }
/// getConstantTarget - Returns the fixed target of this indirect
/// goto, if one exists.
LabelDecl *getConstantTarget();
const LabelDecl *getConstantTarget() const {
return const_cast<IndirectGotoStmt *>(this)->getConstantTarget();
}
SourceLocation getBeginLoc() const { return getGotoLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY { return Target->getEndLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == IndirectGotoStmtClass;
}
// Iterators
child_range children() { return child_range(&Target, &Target + 1); }
};
/// ContinueStmt - This represents a continue.
class ContinueStmt : public Stmt {
public:
ContinueStmt(SourceLocation CL) : Stmt(ContinueStmtClass) {
setContinueLoc(CL);
}
/// Build an empty continue statement.
explicit ContinueStmt(EmptyShell Empty) : Stmt(ContinueStmtClass, Empty) {}
SourceLocation getContinueLoc() const { return ContinueStmtBits.ContinueLoc; }
void setContinueLoc(SourceLocation L) { ContinueStmtBits.ContinueLoc = L; }
SourceLocation getBeginLoc() const { return getContinueLoc(); }
SourceLocation getEndLoc() const { return getContinueLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == ContinueStmtClass;
}
// Iterators
child_range children() {
return child_range(child_iterator(), child_iterator());
}
};
/// BreakStmt - This represents a break.
class BreakStmt : public Stmt {
public:
BreakStmt(SourceLocation BL) : Stmt(BreakStmtClass) {
setBreakLoc(BL);
}
/// Build an empty break statement.
explicit BreakStmt(EmptyShell Empty) : Stmt(BreakStmtClass, Empty) {}
SourceLocation getBreakLoc() const { return BreakStmtBits.BreakLoc; }
void setBreakLoc(SourceLocation L) { BreakStmtBits.BreakLoc = L; }
SourceLocation getBeginLoc() const { return getBreakLoc(); }
SourceLocation getEndLoc() const { return getBreakLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == BreakStmtClass;
}
// Iterators
child_range children() {
return child_range(child_iterator(), child_iterator());
}
};
/// ReturnStmt - This represents a return, optionally of an expression:
/// return;
/// return 4;
///
/// Note that GCC allows return with no argument in a function declared to
/// return a value, and it allows returning a value in functions declared to
/// return void. We explicitly model this in the AST, which means you can't
/// depend on the return type of the function and the presence of an argument.
class ReturnStmt final
: public Stmt,
private llvm::TrailingObjects<ReturnStmt, const VarDecl *> {
friend TrailingObjects;
/// The return expression.
Stmt *RetExpr;
// ReturnStmt is followed optionally by a trailing "const VarDecl *"
// for the NRVO candidate. Present if and only if hasNRVOCandidate().
/// True if this ReturnStmt has storage for an NRVO candidate.
bool hasNRVOCandidate() const { return ReturnStmtBits.HasNRVOCandidate; }
unsigned numTrailingObjects(OverloadToken<const VarDecl *>) const {
return hasNRVOCandidate();
}
/// Build a return statement.
ReturnStmt(SourceLocation RL, Expr *E, const VarDecl *NRVOCandidate);
/// Build an empty return statement.
explicit ReturnStmt(EmptyShell Empty, bool HasNRVOCandidate);
public:
/// Create a return statement.
static ReturnStmt *Create(const ASTContext &Ctx, SourceLocation RL, Expr *E,
const VarDecl *NRVOCandidate);
/// Create an empty return statement, optionally with
/// storage for an NRVO candidate.
static ReturnStmt *CreateEmpty(const ASTContext &Ctx, bool HasNRVOCandidate);
Expr *getRetValue() { return reinterpret_cast<Expr *>(RetExpr); }
const Expr *getRetValue() const { return reinterpret_cast<Expr *>(RetExpr); }
void setRetValue(Expr *E) { RetExpr = reinterpret_cast<Stmt *>(E); }
/// Retrieve the variable that might be used for the named return
/// value optimization.
///
/// The optimization itself can only be performed if the variable is
/// also marked as an NRVO object.
const VarDecl *getNRVOCandidate() const {
return hasNRVOCandidate() ? *getTrailingObjects<const VarDecl *>()
: nullptr;
}
/// Set the variable that might be used for the named return value
/// optimization. The return statement must have storage for it,
/// which is the case if and only if hasNRVOCandidate() is true.
void setNRVOCandidate(const VarDecl *Var) {
assert(hasNRVOCandidate() &&
"This return statement has no storage for an NRVO candidate!");
*getTrailingObjects<const VarDecl *>() = Var;
}
SourceLocation getReturnLoc() const { return ReturnStmtBits.RetLoc; }
void setReturnLoc(SourceLocation L) { ReturnStmtBits.RetLoc = L; }
SourceLocation getBeginLoc() const { return getReturnLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
return RetExpr ? RetExpr->getEndLoc() : getReturnLoc();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == ReturnStmtClass;
}
// Iterators
child_range children() {
if (RetExpr)
return child_range(&RetExpr, &RetExpr + 1);
return child_range(child_iterator(), child_iterator());
}
};
/// AsmStmt is the base class for GCCAsmStmt and MSAsmStmt.
class AsmStmt : public Stmt {
protected:
friend class ASTStmtReader;
SourceLocation AsmLoc;
/// True if the assembly statement does not have any input or output
/// operands.
bool IsSimple;
/// If true, treat this inline assembly as having side effects.
/// This assembly statement should not be optimized, deleted or moved.
bool IsVolatile;
unsigned NumOutputs;
unsigned NumInputs;
unsigned NumClobbers;
Stmt **Exprs = nullptr;
AsmStmt(StmtClass SC, SourceLocation asmloc, bool issimple, bool isvolatile,
unsigned numoutputs, unsigned numinputs, unsigned numclobbers)
: Stmt (SC), AsmLoc(asmloc), IsSimple(issimple), IsVolatile(isvolatile),
NumOutputs(numoutputs), NumInputs(numinputs),
NumClobbers(numclobbers) {}
public:
/// Build an empty inline-assembly statement.
explicit AsmStmt(StmtClass SC, EmptyShell Empty) : Stmt(SC, Empty) {}
SourceLocation getAsmLoc() const { return AsmLoc; }
void setAsmLoc(SourceLocation L) { AsmLoc = L; }
bool isSimple() const { return IsSimple; }
void setSimple(bool V) { IsSimple = V; }
bool isVolatile() const { return IsVolatile; }
void setVolatile(bool V) { IsVolatile = V; }
SourceLocation getBeginLoc() const LLVM_READONLY { return {}; }
SourceLocation getEndLoc() const LLVM_READONLY { return {}; }
//===--- Asm String Analysis ---===//
/// Assemble final IR asm string.
std::string generateAsmString(const ASTContext &C) const;
//===--- Output operands ---===//
unsigned getNumOutputs() const { return NumOutputs; }
/// getOutputConstraint - Return the constraint string for the specified
/// output operand. All output constraints are known to be non-empty (either
/// '=' or '+').
StringRef getOutputConstraint(unsigned i) const;
/// isOutputPlusConstraint - Return true if the specified output constraint
/// is a "+" constraint (which is both an input and an output) or false if it
/// is an "=" constraint (just an output).
bool isOutputPlusConstraint(unsigned i) const {
return getOutputConstraint(i)[0] == '+';
}
const Expr *getOutputExpr(unsigned i) const;
/// getNumPlusOperands - Return the number of output operands that have a "+"
/// constraint.
unsigned getNumPlusOperands() const;
//===--- Input operands ---===//
unsigned getNumInputs() const { return NumInputs; }
/// getInputConstraint - Return the specified input constraint. Unlike output
/// constraints, these can be empty.
StringRef getInputConstraint(unsigned i) const;
const Expr *getInputExpr(unsigned i) const;
//===--- Other ---===//
unsigned getNumClobbers() const { return NumClobbers; }
StringRef getClobber(unsigned i) const;
static bool classof(const Stmt *T) {
return T->getStmtClass() == GCCAsmStmtClass ||
T->getStmtClass() == MSAsmStmtClass;
}
// Input expr iterators.
using inputs_iterator = ExprIterator;
using const_inputs_iterator = ConstExprIterator;
using inputs_range = llvm::iterator_range<inputs_iterator>;
using inputs_const_range = llvm::iterator_range<const_inputs_iterator>;
inputs_iterator begin_inputs() {
return &Exprs[0] + NumOutputs;
}
inputs_iterator end_inputs() {
return &Exprs[0] + NumOutputs + NumInputs;
}
inputs_range inputs() { return inputs_range(begin_inputs(), end_inputs()); }
const_inputs_iterator begin_inputs() const {
return &Exprs[0] + NumOutputs;
}
const_inputs_iterator end_inputs() const {
return &Exprs[0] + NumOutputs + NumInputs;
}
inputs_const_range inputs() const {
return inputs_const_range(begin_inputs(), end_inputs());
}
// Output expr iterators.
using outputs_iterator = ExprIterator;
using const_outputs_iterator = ConstExprIterator;
using outputs_range = llvm::iterator_range<outputs_iterator>;
using outputs_const_range = llvm::iterator_range<const_outputs_iterator>;
outputs_iterator begin_outputs() {
return &Exprs[0];
}
outputs_iterator end_outputs() {
return &Exprs[0] + NumOutputs;
}
outputs_range outputs() {
return outputs_range(begin_outputs(), end_outputs());
}
const_outputs_iterator begin_outputs() const {
return &Exprs[0];
}
const_outputs_iterator end_outputs() const {
return &Exprs[0] + NumOutputs;
}
outputs_const_range outputs() const {
return outputs_const_range(begin_outputs(), end_outputs());
}
child_range children() {
return child_range(&Exprs[0], &Exprs[0] + NumOutputs + NumInputs);
}
};
/// This represents a GCC inline-assembly statement extension.
class GCCAsmStmt : public AsmStmt {
friend class ASTStmtReader;
SourceLocation RParenLoc;
StringLiteral *AsmStr;
// FIXME: If we wanted to, we could allocate all of these in one big array.
StringLiteral **Constraints = nullptr;
StringLiteral **Clobbers = nullptr;
IdentifierInfo **Names = nullptr;
public:
GCCAsmStmt(const ASTContext &C, SourceLocation asmloc, bool issimple,
bool isvolatile, unsigned numoutputs, unsigned numinputs,
IdentifierInfo **names, StringLiteral **constraints, Expr **exprs,
StringLiteral *asmstr, unsigned numclobbers,
StringLiteral **clobbers, SourceLocation rparenloc);
/// Build an empty inline-assembly statement.
explicit GCCAsmStmt(EmptyShell Empty) : AsmStmt(GCCAsmStmtClass, Empty) {}
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation L) { RParenLoc = L; }
//===--- Asm String Analysis ---===//
const StringLiteral *getAsmString() const { return AsmStr; }
StringLiteral *getAsmString() { return AsmStr; }
void setAsmString(StringLiteral *E) { AsmStr = E; }
/// AsmStringPiece - this is part of a decomposed asm string specification
/// (for use with the AnalyzeAsmString function below). An asm string is
/// considered to be a concatenation of these parts.
class AsmStringPiece {
public:
enum Kind {
String, // String in .ll asm string form, "$" -> "$$" and "%%" -> "%".
Operand // Operand reference, with optional modifier %c4.
};
private:
Kind MyKind;
std::string Str;
unsigned OperandNo;
// Source range for operand references.
CharSourceRange Range;
public:
AsmStringPiece(const std::string &S) : MyKind(String), Str(S) {}
AsmStringPiece(unsigned OpNo, const std::string &S, SourceLocation Begin,
SourceLocation End)
: MyKind(Operand), Str(S), OperandNo(OpNo),
Range(CharSourceRange::getCharRange(Begin, End)) {}
bool isString() const { return MyKind == String; }
bool isOperand() const { return MyKind == Operand; }
const std::string &getString() const { return Str; }
unsigned getOperandNo() const {
assert(isOperand());
return OperandNo;
}
CharSourceRange getRange() const {
assert(isOperand() && "Range is currently used only for Operands.");
return Range;
}
/// getModifier - Get the modifier for this operand, if present. This
/// returns '\0' if there was no modifier.
char getModifier() const;
};
/// AnalyzeAsmString - Analyze the asm string of the current asm, decomposing
/// it into pieces. If the asm string is erroneous, emit errors and return
/// true, otherwise return false. This handles canonicalization and
/// translation of strings from GCC syntax to LLVM IR syntax, and handles
//// flattening of named references like %[foo] to Operand AsmStringPiece's.
unsigned AnalyzeAsmString(SmallVectorImpl<AsmStringPiece> &Pieces,
const ASTContext &C, unsigned &DiagOffs) const;
/// Assemble final IR asm string.
std::string generateAsmString(const ASTContext &C) const;
//===--- Output operands ---===//
IdentifierInfo *getOutputIdentifier(unsigned i) const { return Names[i]; }
StringRef getOutputName(unsigned i) const {
if (IdentifierInfo *II = getOutputIdentifier(i))
return II->getName();
return {};
}
StringRef getOutputConstraint(unsigned i) const;
const StringLiteral *getOutputConstraintLiteral(unsigned i) const {
return Constraints[i];
}
StringLiteral *getOutputConstraintLiteral(unsigned i) {
return Constraints[i];
}
Expr *getOutputExpr(unsigned i);
const Expr *getOutputExpr(unsigned i) const {
return const_cast<GCCAsmStmt*>(this)->getOutputExpr(i);
}
//===--- Input operands ---===//
IdentifierInfo *getInputIdentifier(unsigned i) const {
return Names[i + NumOutputs];
}
StringRef getInputName(unsigned i) const {
if (IdentifierInfo *II = getInputIdentifier(i))
return II->getName();
return {};
}
StringRef getInputConstraint(unsigned i) const;
const StringLiteral *getInputConstraintLiteral(unsigned i) const {
return Constraints[i + NumOutputs];
}
StringLiteral *getInputConstraintLiteral(unsigned i) {
return Constraints[i + NumOutputs];
}
Expr *getInputExpr(unsigned i);
void setInputExpr(unsigned i, Expr *E);
const Expr *getInputExpr(unsigned i) const {
return const_cast<GCCAsmStmt*>(this)->getInputExpr(i);
}
private:
void setOutputsAndInputsAndClobbers(const ASTContext &C,
IdentifierInfo **Names,
StringLiteral **Constraints,
Stmt **Exprs,
unsigned NumOutputs,
unsigned NumInputs,
StringLiteral **Clobbers,
unsigned NumClobbers);
public:
//===--- Other ---===//
/// getNamedOperand - Given a symbolic operand reference like %[foo],
/// translate this into a numeric value needed to reference the same operand.
/// This returns -1 if the operand name is invalid.
int getNamedOperand(StringRef SymbolicName) const;
StringRef getClobber(unsigned i) const;
StringLiteral *getClobberStringLiteral(unsigned i) { return Clobbers[i]; }
const StringLiteral *getClobberStringLiteral(unsigned i) const {
return Clobbers[i];
}
SourceLocation getBeginLoc() const LLVM_READONLY { return AsmLoc; }
SourceLocation getEndLoc() const LLVM_READONLY { return RParenLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == GCCAsmStmtClass;
}
};
/// This represents a Microsoft inline-assembly statement extension.
class MSAsmStmt : public AsmStmt {
friend class ASTStmtReader;
SourceLocation LBraceLoc, EndLoc;
StringRef AsmStr;
unsigned NumAsmToks = 0;
Token *AsmToks = nullptr;
StringRef *Constraints = nullptr;
StringRef *Clobbers = nullptr;
public:
MSAsmStmt(const ASTContext &C, SourceLocation asmloc,
SourceLocation lbraceloc, bool issimple, bool isvolatile,
ArrayRef<Token> asmtoks, unsigned numoutputs, unsigned numinputs,
ArrayRef<StringRef> constraints,
ArrayRef<Expr*> exprs, StringRef asmstr,
ArrayRef<StringRef> clobbers, SourceLocation endloc);
/// Build an empty MS-style inline-assembly statement.
explicit MSAsmStmt(EmptyShell Empty) : AsmStmt(MSAsmStmtClass, Empty) {}
SourceLocation getLBraceLoc() const { return LBraceLoc; }
void setLBraceLoc(SourceLocation L) { LBraceLoc = L; }
SourceLocation getEndLoc() const { return EndLoc; }
void setEndLoc(SourceLocation L) { EndLoc = L; }
bool hasBraces() const { return LBraceLoc.isValid(); }
unsigned getNumAsmToks() { return NumAsmToks; }
Token *getAsmToks() { return AsmToks; }
//===--- Asm String Analysis ---===//
StringRef getAsmString() const { return AsmStr; }
/// Assemble final IR asm string.
std::string generateAsmString(const ASTContext &C) const;
//===--- Output operands ---===//
StringRef getOutputConstraint(unsigned i) const {
assert(i < NumOutputs);
return Constraints[i];
}
Expr *getOutputExpr(unsigned i);
const Expr *getOutputExpr(unsigned i) const {
return const_cast<MSAsmStmt*>(this)->getOutputExpr(i);
}
//===--- Input operands ---===//
StringRef getInputConstraint(unsigned i) const {
assert(i < NumInputs);
return Constraints[i + NumOutputs];
}
Expr *getInputExpr(unsigned i);
void setInputExpr(unsigned i, Expr *E);
const Expr *getInputExpr(unsigned i) const {
return const_cast<MSAsmStmt*>(this)->getInputExpr(i);
}
//===--- Other ---===//
ArrayRef<StringRef> getAllConstraints() const {
return llvm::makeArrayRef(Constraints, NumInputs + NumOutputs);
}
ArrayRef<StringRef> getClobbers() const {
return llvm::makeArrayRef(Clobbers, NumClobbers);
}
ArrayRef<Expr*> getAllExprs() const {
return llvm::makeArrayRef(reinterpret_cast<Expr**>(Exprs),
NumInputs + NumOutputs);
}
StringRef getClobber(unsigned i) const { return getClobbers()[i]; }
private:
void initialize(const ASTContext &C, StringRef AsmString,
ArrayRef<Token> AsmToks, ArrayRef<StringRef> Constraints,
ArrayRef<Expr*> Exprs, ArrayRef<StringRef> Clobbers);
public:
SourceLocation getBeginLoc() const LLVM_READONLY { return AsmLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == MSAsmStmtClass;
}
child_range children() {
return child_range(&Exprs[0], &Exprs[NumInputs + NumOutputs]);
}
};
class SEHExceptStmt : public Stmt {
friend class ASTReader;
friend class ASTStmtReader;
SourceLocation Loc;
Stmt *Children[2];
enum { FILTER_EXPR, BLOCK };
SEHExceptStmt(SourceLocation Loc, Expr *FilterExpr, Stmt *Block);
explicit SEHExceptStmt(EmptyShell E) : Stmt(SEHExceptStmtClass, E) {}
public:
static SEHExceptStmt* Create(const ASTContext &C,
SourceLocation ExceptLoc,
Expr *FilterExpr,
Stmt *Block);
SourceLocation getBeginLoc() const LLVM_READONLY { return getExceptLoc(); }
SourceLocation getExceptLoc() const { return Loc; }
SourceLocation getEndLoc() const { return getBlock()->getEndLoc(); }
Expr *getFilterExpr() const {
return reinterpret_cast<Expr*>(Children[FILTER_EXPR]);
}
CompoundStmt *getBlock() const {
return cast<CompoundStmt>(Children[BLOCK]);
}
child_range children() {
return child_range(Children, Children+2);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == SEHExceptStmtClass;
}
};
class SEHFinallyStmt : public Stmt {
friend class ASTReader;
friend class ASTStmtReader;
SourceLocation Loc;
Stmt *Block;
SEHFinallyStmt(SourceLocation Loc, Stmt *Block);
explicit SEHFinallyStmt(EmptyShell E) : Stmt(SEHFinallyStmtClass, E) {}
public:
static SEHFinallyStmt* Create(const ASTContext &C,
SourceLocation FinallyLoc,
Stmt *Block);
SourceLocation getBeginLoc() const LLVM_READONLY { return getFinallyLoc(); }
SourceLocation getFinallyLoc() const { return Loc; }
SourceLocation getEndLoc() const { return Block->getEndLoc(); }
CompoundStmt *getBlock() const { return cast<CompoundStmt>(Block); }
child_range children() {
return child_range(&Block,&Block+1);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == SEHFinallyStmtClass;
}
};
class SEHTryStmt : public Stmt {
friend class ASTReader;
friend class ASTStmtReader;
bool IsCXXTry;
SourceLocation TryLoc;
Stmt *Children[2];
enum { TRY = 0, HANDLER = 1 };
SEHTryStmt(bool isCXXTry, // true if 'try' otherwise '__try'
SourceLocation TryLoc,
Stmt *TryBlock,
Stmt *Handler);
explicit SEHTryStmt(EmptyShell E) : Stmt(SEHTryStmtClass, E) {}
public:
static SEHTryStmt* Create(const ASTContext &C, bool isCXXTry,
SourceLocation TryLoc, Stmt *TryBlock,
Stmt *Handler);
SourceLocation getBeginLoc() const LLVM_READONLY { return getTryLoc(); }
SourceLocation getTryLoc() const { return TryLoc; }
SourceLocation getEndLoc() const { return Children[HANDLER]->getEndLoc(); }
bool getIsCXXTry() const { return IsCXXTry; }
CompoundStmt* getTryBlock() const {
return cast<CompoundStmt>(Children[TRY]);
}
Stmt *getHandler() const { return Children[HANDLER]; }
/// Returns 0 if not defined
SEHExceptStmt *getExceptHandler() const;
SEHFinallyStmt *getFinallyHandler() const;
child_range children() {
return child_range(Children, Children+2);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == SEHTryStmtClass;
}
};
/// Represents a __leave statement.
class SEHLeaveStmt : public Stmt {
SourceLocation LeaveLoc;
public:
explicit SEHLeaveStmt(SourceLocation LL)
: Stmt(SEHLeaveStmtClass), LeaveLoc(LL) {}
/// Build an empty __leave statement.
explicit SEHLeaveStmt(EmptyShell Empty) : Stmt(SEHLeaveStmtClass, Empty) {}
SourceLocation getLeaveLoc() const { return LeaveLoc; }
void setLeaveLoc(SourceLocation L) { LeaveLoc = L; }
SourceLocation getBeginLoc() const LLVM_READONLY { return LeaveLoc; }
SourceLocation getEndLoc() const LLVM_READONLY { return LeaveLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == SEHLeaveStmtClass;
}
// Iterators
child_range children() {
return child_range(child_iterator(), child_iterator());
}
};
/// This captures a statement into a function. For example, the following
/// pragma annotated compound statement can be represented as a CapturedStmt,
/// and this compound statement is the body of an anonymous outlined function.
/// @code
/// #pragma omp parallel
/// {
/// compute();
/// }
/// @endcode
class CapturedStmt : public Stmt {
public:
/// The different capture forms: by 'this', by reference, capture for
/// variable-length array type etc.
enum VariableCaptureKind {
VCK_This,
VCK_ByRef,
VCK_ByCopy,
VCK_VLAType,
};
/// Describes the capture of either a variable, or 'this', or
/// variable-length array type.
class Capture {
llvm::PointerIntPair<VarDecl *, 2, VariableCaptureKind> VarAndKind;
SourceLocation Loc;
public:
friend class ASTStmtReader;
/// Create a new capture.
///
/// \param Loc The source location associated with this capture.
///
/// \param Kind The kind of capture (this, ByRef, ...).
///
/// \param Var The variable being captured, or null if capturing this.
Capture(SourceLocation Loc, VariableCaptureKind Kind,
VarDecl *Var = nullptr);
/// Determine the kind of capture.
VariableCaptureKind getCaptureKind() const;
/// Retrieve the source location at which the variable or 'this' was
/// first used.
SourceLocation getLocation() const { return Loc; }
/// Determine whether this capture handles the C++ 'this' pointer.
bool capturesThis() const { return getCaptureKind() == VCK_This; }
/// Determine whether this capture handles a variable (by reference).
bool capturesVariable() const { return getCaptureKind() == VCK_ByRef; }
/// Determine whether this capture handles a variable by copy.
bool capturesVariableByCopy() const {
return getCaptureKind() == VCK_ByCopy;
}
/// Determine whether this capture handles a variable-length array
/// type.
bool capturesVariableArrayType() const {
return getCaptureKind() == VCK_VLAType;
}
/// Retrieve the declaration of the variable being captured.
///
/// This operation is only valid if this capture captures a variable.
VarDecl *getCapturedVar() const;
};
private:
/// The number of variable captured, including 'this'.
unsigned NumCaptures;
/// The pointer part is the implicit the outlined function and the
/// int part is the captured region kind, 'CR_Default' etc.
llvm::PointerIntPair<CapturedDecl *, 2, CapturedRegionKind> CapDeclAndKind;
/// The record for captured variables, a RecordDecl or CXXRecordDecl.
RecordDecl *TheRecordDecl = nullptr;
/// Construct a captured statement.
CapturedStmt(Stmt *S, CapturedRegionKind Kind, ArrayRef<Capture> Captures,
ArrayRef<Expr *> CaptureInits, CapturedDecl *CD, RecordDecl *RD);
/// Construct an empty captured statement.
CapturedStmt(EmptyShell Empty, unsigned NumCaptures);
Stmt **getStoredStmts() { return reinterpret_cast<Stmt **>(this + 1); }
Stmt *const *getStoredStmts() const {
return reinterpret_cast<Stmt *const *>(this + 1);
}
Capture *getStoredCaptures() const;
void setCapturedStmt(Stmt *S) { getStoredStmts()[NumCaptures] = S; }
public:
friend class ASTStmtReader;
static CapturedStmt *Create(const ASTContext &Context, Stmt *S,
CapturedRegionKind Kind,
ArrayRef<Capture> Captures,
ArrayRef<Expr *> CaptureInits,
CapturedDecl *CD, RecordDecl *RD);
static CapturedStmt *CreateDeserialized(const ASTContext &Context,
unsigned NumCaptures);
/// Retrieve the statement being captured.
Stmt *getCapturedStmt() { return getStoredStmts()[NumCaptures]; }
const Stmt *getCapturedStmt() const { return getStoredStmts()[NumCaptures]; }
/// Retrieve the outlined function declaration.
CapturedDecl *getCapturedDecl();
const CapturedDecl *getCapturedDecl() const;
/// Set the outlined function declaration.
void setCapturedDecl(CapturedDecl *D);
/// Retrieve the captured region kind.
CapturedRegionKind getCapturedRegionKind() const;
/// Set the captured region kind.
void setCapturedRegionKind(CapturedRegionKind Kind);
/// Retrieve the record declaration for captured variables.
const RecordDecl *getCapturedRecordDecl() const { return TheRecordDecl; }
/// Set the record declaration for captured variables.
void setCapturedRecordDecl(RecordDecl *D) {
assert(D && "null RecordDecl");
TheRecordDecl = D;
}
/// True if this variable has been captured.
bool capturesVariable(const VarDecl *Var) const;
/// An iterator that walks over the captures.
using capture_iterator = Capture *;
using const_capture_iterator = const Capture *;
using capture_range = llvm::iterator_range<capture_iterator>;
using capture_const_range = llvm::iterator_range<const_capture_iterator>;
capture_range captures() {
return capture_range(capture_begin(), capture_end());
}
capture_const_range captures() const {
return capture_const_range(capture_begin(), capture_end());
}
/// Retrieve an iterator pointing to the first capture.
capture_iterator capture_begin() { return getStoredCaptures(); }
const_capture_iterator capture_begin() const { return getStoredCaptures(); }
/// Retrieve an iterator pointing past the end of the sequence of
/// captures.
capture_iterator capture_end() const {
return getStoredCaptures() + NumCaptures;
}
/// Retrieve the number of captures, including 'this'.
unsigned capture_size() const { return NumCaptures; }
/// Iterator that walks over the capture initialization arguments.
using capture_init_iterator = Expr **;
using capture_init_range = llvm::iterator_range<capture_init_iterator>;
/// Const iterator that walks over the capture initialization
/// arguments.
using const_capture_init_iterator = Expr *const *;
using const_capture_init_range =
llvm::iterator_range<const_capture_init_iterator>;
capture_init_range capture_inits() {
return capture_init_range(capture_init_begin(), capture_init_end());
}
const_capture_init_range capture_inits() const {
return const_capture_init_range(capture_init_begin(), capture_init_end());
}
/// Retrieve the first initialization argument.
capture_init_iterator capture_init_begin() {
return reinterpret_cast<Expr **>(getStoredStmts());
}
const_capture_init_iterator capture_init_begin() const {
return reinterpret_cast<Expr *const *>(getStoredStmts());
}
/// Retrieve the iterator pointing one past the last initialization
/// argument.
capture_init_iterator capture_init_end() {
return capture_init_begin() + NumCaptures;
}
const_capture_init_iterator capture_init_end() const {
return capture_init_begin() + NumCaptures;
}
SourceLocation getBeginLoc() const LLVM_READONLY {
return getCapturedStmt()->getBeginLoc();
}
SourceLocation getEndLoc() const LLVM_READONLY {
return getCapturedStmt()->getEndLoc();
}
SourceRange getSourceRange() const LLVM_READONLY {
return getCapturedStmt()->getSourceRange();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == CapturedStmtClass;
}
child_range children();
};
} // namespace clang
#endif // LLVM_CLANG_AST_STMT_H
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.