source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
compute_bk.h | #ifndef _COMPUTE_BK_AMA_H
#define _COMPUTE_BK_AMA_H
CPS_START_NAMESPACE
//We compute B_K for a given first kaon timeslice (t0) and a fixed K->K separation for each operator insertion time. The sink kaon timeslice is t1 = (t0 + tsep) % Lt
//The matrix is indexed as [t0][(top-t0+Lt)%Lt]
//Source momenta of the strange quark props are needed for the flavor projection.
//It is assumed that the total kaon momentum is zero, and we project onto zero momentum at the operator insertion
void GparityBK(fMatrix<Rcomplex> &into, const int t0, const int t1,
const PropSiteMatrixGetter &prop_h_t0, const PropSiteMatrixGetter &prop_l_t0, const ThreeMomentum &p_psi_h_t0,
const PropSiteMatrixGetter &prop_h_t1, const PropSiteMatrixGetter &prop_l_t1, const ThreeMomentum &p_psi_h_t1,
const bool do_flav_project = true
){
const int Lt = GJP.TnodeSites()*GJP.Tnodes();
const int nthread = omp_get_max_threads();
basicComplexArray<Rcomplex> tmp(Lt,nthread); //defaults to zero for all elements
FlavorMatrix kaon_proj_t0 = getProjector(p_psi_h_t0);
FlavorMatrix kaon_proj_t1 = getProjector(p_psi_h_t1);
int vol3d = GJP.VolNodeSites()/GJP.TnodeSites();
#pragma omp parallel for
for(int x=0;x<GJP.VolNodeSites();x++){
int pos[4];
int rem = x;
for(int i=0;i<4;i++){ pos[i] = rem % GJP.NodeSites(i); rem /= GJP.NodeSites(i); }
int x3d_lcl = x % vol3d;
int t_glb = pos[3] + GJP.TnodeCoor() * GJP.TnodeSites(); //operator insertion time
int tdis0_glb = t_glb - t0; //linear time coordinate
int tdis1_glb = t_glb - t1;
int tdis_into = (tdis0_glb + Lt)% Lt; //output time coordinate modulo Lt
SpinColorFlavorMatrix prop_l_t0_site;
prop_l_t0.siteMatrix(prop_l_t0_site,x3d_lcl,tdis0_glb);
if(do_flav_project) prop_l_t0_site *= kaon_proj_t0;
SpinColorFlavorMatrix prop_h_dag_t0_site;
prop_h_t0.siteMatrix(prop_h_dag_t0_site,x3d_lcl,tdis0_glb);
prop_h_dag_t0_site.hconj();
SpinColorFlavorMatrix prop_prod_t0 = prop_l_t0_site * prop_h_dag_t0_site;
SpinColorFlavorMatrix prop_l_t1_site;
prop_l_t1.siteMatrix(prop_l_t1_site,x3d_lcl,tdis1_glb);
if(do_flav_project) prop_l_t1_site *= kaon_proj_t1;
SpinColorFlavorMatrix prop_h_dag_t1_site;
prop_h_t1.siteMatrix(prop_h_dag_t1_site,x3d_lcl,tdis1_glb);
prop_h_dag_t1_site.hconj();
SpinColorFlavorMatrix prop_prod_t1 = prop_l_t1_site * prop_h_dag_t1_site;
for(int mu=0;mu<4;mu++){
for(int Gamma = 0; Gamma < 2; Gamma++){ //\gamma^\mu and \gamma^\mu\gamma^5
SpinColorFlavorMatrix part1 = prop_prod_t0;
if(Gamma == 1) part1.gl(-5);
part1.gl(mu);
part1.pr(F0);
SpinColorFlavorMatrix part2 = prop_prod_t1;
if(Gamma == 1) part2.gl(-5);
part2.gl(mu);
part2.pr(F0);
tmp(tdis_into, omp_get_thread_num()) += 2.0*Trace(part1)*Trace(part2);
tmp(tdis_into, omp_get_thread_num()) += -2.0*Trace(part1, part2);
}
}
}
tmp.threadSum();
tmp.nodeSum();
for(int tdis=0;tdis<Lt;tdis++)
into(t0, tdis) = tmp[tdis];
}
void StandardBK(fMatrix<Rcomplex> &into, const int t0, const int t1,
const PropSiteMatrixGetter &prop_h_t0, const PropSiteMatrixGetter &prop_l_t0,
const PropSiteMatrixGetter &prop_h_t1, const PropSiteMatrixGetter &prop_l_t1){
const int Lt = GJP.TnodeSites()*GJP.Tnodes();
const int nthread = omp_get_max_threads();
basicComplexArray<Rcomplex> tmp(Lt,nthread); //defaults to zero for all elements
int vol3d = GJP.VolNodeSites()/GJP.TnodeSites();
#pragma omp_parallel for
for(int x=0;x<GJP.VolNodeSites();x++){
int pos[4];
int rem = x;
for(int i=0;i<4;i++){ pos[i] = rem % GJP.NodeSites(i); rem /= GJP.NodeSites(i); }
int x3d_lcl = x % vol3d;
int t_glb = pos[3] + GJP.TnodeCoor() * GJP.TnodeSites(); //operator insertion time
int tdis0_glb = t_glb - t0; //linear time coordinate
int tdis1_glb = t_glb - t1;
int tdis_into = (tdis0_glb +Lt)% Lt; //output time coordinate modulo Lt
WilsonMatrix prop_l_t0_site;
prop_l_t0.siteMatrix(prop_l_t0_site,x3d_lcl,tdis0_glb);
WilsonMatrix prop_h_dag_t0_site;
prop_h_t0.siteMatrix(prop_h_dag_t0_site,x3d_lcl,tdis0_glb);
prop_h_dag_t0_site.hconj();
WilsonMatrix prop_prod_t0 = prop_l_t0_site * prop_h_dag_t0_site;
WilsonMatrix prop_l_t1_site;
prop_l_t1.siteMatrix(prop_l_t1_site,x3d_lcl,tdis1_glb);
WilsonMatrix prop_h_dag_t1_site;
prop_h_t1.siteMatrix(prop_h_dag_t1_site,x3d_lcl,tdis1_glb);
prop_h_dag_t1_site.hconj();
WilsonMatrix prop_prod_t1 = prop_l_t1_site * prop_h_dag_t1_site;
for(int mu=0;mu<4;mu++){
for(int Gamma = 0; Gamma < 2; Gamma++){ //\gamma^\mu and \gamma^\mu\gamma^5
WilsonMatrix part1 = prop_prod_t0;
if(Gamma == 1) part1.gl(-5);
part1.gl(mu);
WilsonMatrix part2 = prop_prod_t1;
if(Gamma == 1) part2.gl(-5);
part2.gl(mu);
tmp(tdis_into, omp_get_thread_num()) += 2.0*Trace(part1)*Trace(part2);
tmp(tdis_into, omp_get_thread_num()) += -2.0*Trace(part1, part2);
}
}
}
tmp.threadSum();
tmp.nodeSum();
for(int tdis=0;tdis<Lt;tdis++)
into(t0, tdis) = tmp[tdis];
}
inline void getBKsnkPropBcAndWrapperTsnk(TbcStatus &time_bc_t1, int &t1, const TbcStatus &time_bc_t0, const int t0, const int tsep){
const int Lt = GJP.Tnodes()*GJP.TnodeSites();
time_bc_t1 = time_bc_t0;
t1 = t0 + tsep;
if(t1 >= Lt){
if(time_bc_t0.isCombinedType()){ //Use F(t+Lt) = B(t) and B(t+Lt) = F(t)
time_bc_t1.swapTbcCombination();
t1 -= Lt;
}else if(time_bc_t0.getSingleBc() == BND_CND_PRD){
t1 -= Lt;
}else if(time_bc_t0.getSingleBc() == BND_CND_APRD){
ERR.General("","getBKsnkPropBcAndWrapperTsnk","- sign from tsnk prop crossing boundary not implemented yet\n"); //G(t-Lt) = -G(t), need to pass the minus sign into the function
}
}
assert(t1>=0 && t1<Lt);
}
CPS_END_NAMESPACE
#endif
|
GB_transpose.c | //------------------------------------------------------------------------------
// GB_transpose: C=A' or C=op(A'), with typecasting
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// CALLS: GB_builder
// Transpose a matrix, C=A', and optionally apply a unary operator and/or
// typecast the values. The transpose may be done in place, in which case C or
// A are modified in place. If the matrix to be transposed has more than one
// vector, it may have jumbled indices in its vectors, which must be sorted.
// If the input matrix has a single vector, it must be already sorted on input.
// The input matrix may have shallow components (even if in place), and the
// output may also have shallow components (even in the input matrix is not
// shallow).
// This function is CSR/CSC agnostic; it sets the output matrix format from
// C_is_csc but otherwise ignores the CSR/CSC type of A and C.
// If A_in is NULL, then C = (*Chandle) is transposed in place. If out of
// memory, (*Chandle) is always returned as NULL, which frees the input matrix
// C if the transpose is done in place.
// If A_in is not NULL and Chandle is NULL, then A is modified in place, and
// the A_in matrix is not freed when done.
// The bucket sort is parallel, but not highly scalable. If e=nnz(A) and A is
// m-by-n, then at most O(e/n) threads are used. For many matrices, e is O(n),
// although the constant can be high. The qsort method is more scalable, but
// not as fast with a modest number of threads.
#include "GB_transpose.h"
#include "GB_build.h"
#include "GB_apply.h"
#define GB_FREE_WORK \
{ \
GB_FREE (Count) ; \
} \
// free prior content of A, if transpose is done in place
#define GB_FREE_IN_PLACE_A \
{ \
if (in_place) \
{ \
/* A is being transposed in placed */ \
/* free prior content of A but not &A itself */ \
if (!Ap_shallow) GB_FREE (Ap) ; \
if (!Ah_shallow) GB_FREE (Ah) ; \
if (!Ai_shallow) GB_FREE (Ai) ; \
if (!Ax_shallow) GB_FREE (Ax) ; \
} \
else \
{ \
/* A is not modified; it is purely an input matrix */ \
; \
} \
}
// free the new C matrix, unless C=A' is being done in place of A
#define GB_FREE_C \
{ \
if (!in_place_A) \
{ \
/* free all of C and all its contents &C */ \
GB_MATRIX_FREE (Chandle) ; \
} \
}
// free both A (if in place) and C (if not in place of A)
#define GB_FREE_A_AND_C \
{ \
GB_FREE_IN_PLACE_A ; \
GB_FREE_C ; \
}
//------------------------------------------------------------------------------
// GB_transpose
//------------------------------------------------------------------------------
GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only
GrB_Info GB_transpose // C=A', C=(ctype)A or C=op(A')
(
GrB_Matrix *Chandle, // output matrix C, possibly modified in place
GrB_Type ctype, // desired type of C; if NULL use A->type.
// ignored if op is present (cast to op->ztype)
const bool C_is_csc, // desired CSR/CSC format of C
const GrB_Matrix A_in, // input matrix
// no operator is applied if both op1 and op2 are NULL
const GrB_UnaryOp op1_in, // unary operator to apply
const GrB_BinaryOp op2_in, // binary operator to apply
const GxB_Scalar scalar, // scalar to bind to binary operator
bool binop_bind1st, // if true, binop(x,A) else binop(A,y)
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs and determine if transpose is done in place
//--------------------------------------------------------------------------
bool in_place_C, in_place_A ;
GrB_Matrix A, C ;
if (A_in == NULL)
{
//----------------------------------------------------------------------
// C = C' ; &C is transposed in place
//----------------------------------------------------------------------
// GB_transpose (&C, ctype, csc, NULL, op) ;
// C=A' is transposed in place, in the matrix C.
// The matrix C is freed if an error occurs and C is set to NULL.
ASSERT (Chandle != NULL) ; // at least &C or A must be non-NULL
A = (*Chandle) ;
C = A ; // C must be freed if an error occurs
in_place_C = true ; // C is modified in place
in_place_A = false ;
ASSERT (A == C && A == (*Chandle)) ;
}
else if (Chandle == NULL || (*Chandle) == A_in)
{
//----------------------------------------------------------------------
// A = A' ; A is transposed in place; reuse the header of A
//----------------------------------------------------------------------
// GB_transpose (NULL, ctype, csc, A, op) ;
// GB_transpose (&A, ctype, csc, A, op) ;
// C=A' is transposed in place, in the matrix A.
// The matrix A_in is not freed if an error occurs.
A = A_in ;
Chandle = &A ; // C must not be freed if an error occurs
C = A ;
in_place_C = false ;
in_place_A = true ; // A is modified in place
ASSERT (A == C && A == (*Chandle)) ;
}
else
{
//----------------------------------------------------------------------
// C = A' ; C and A are different
//----------------------------------------------------------------------
// GB_transpose (&C, ctype, csc, A, op) ;
// C and A are both non-NULL, and not aliased.
// C=A' where C is a new matrix constructed here.
// The matrix C is freed if an error occurs, and C is set to NULL.
A = A_in ;
C = NULL ;
(*Chandle) = NULL ; // C must be allocated; freed on error
in_place_C = false ; // C and A are different matrices
in_place_A = false ;
ASSERT (A != C && A != (*Chandle)) ;
}
bool in_place = (in_place_A || in_place_C) ;
ASSERT_MATRIX_OK_OR_JUMBLED (A, "A input for GB_transpose", GB0) ;
ASSERT_TYPE_OK_OR_NULL (ctype, "ctype for GB_transpose", GB0) ;
ASSERT_UNARYOP_OK_OR_NULL (op1_in, "unop for GB_transpose", GB0) ;
ASSERT_BINARYOP_OK_OR_NULL (op2_in, "binop for GB_transpose", GB0) ;
ASSERT_SCALAR_OK_OR_NULL (scalar, "scalar for GB_transpose", GB0) ;
ASSERT (!GB_PENDING (A)) ;
ASSERT (!GB_ZOMBIES (A)) ;
//--------------------------------------------------------------------------
// determine the number of threads to use here
//--------------------------------------------------------------------------
int64_t anz = GB_NNZ (A) ;
int64_t anvec = A->nvec ;
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
int nthreads = GB_nthreads (anz + anvec, chunk, nthreads_max) ;
//--------------------------------------------------------------------------
// get A
//--------------------------------------------------------------------------
GrB_Info info ;
GrB_Type atype = A->type ;
size_t asize = atype->size ;
GB_Type_code acode = atype->code ;
int64_t avlen = A->vlen ;
int64_t avdim = A->vdim ;
int64_t aplen = A->plen ;
bool A_is_hyper = A->is_hyper ;
double A_hyper_ratio = A->hyper_ratio ;
int64_t anzmax = A->nzmax ;
// if in place, these must be freed when done, whether successful or not
int64_t *GB_RESTRICT Ap = A->p ;
int64_t *GB_RESTRICT Ah = A->h ;
int64_t *GB_RESTRICT Ai = A->i ;
GB_void *GB_RESTRICT Ax = (GB_void *) A->x ;
bool Ap_shallow = A->p_shallow ;
bool Ah_shallow = A->h_shallow ;
bool Ai_shallow = A->i_shallow ;
bool Ax_shallow = A->x_shallow ;
//--------------------------------------------------------------------------
// allocate workspace
//--------------------------------------------------------------------------
int nth = GB_nthreads (avdim, chunk, nthreads_max) ;
int ntasks = (nth == 1) ? 1 : (8 * nth) ;
ntasks = GB_IMIN (ntasks, avdim) ;
ntasks = GB_IMAX (ntasks, 1) ;
int64_t *GB_RESTRICT Count = NULL ; // size ntasks+1, if allocated
if (anz > 0 && avdim != 1 && avlen == 1)
{
// Count is only used in one case below
Count = GB_CALLOC (ntasks+1, int64_t) ;
if (Count == NULL)
{
// out of memory
GB_FREE_C ;
return (GB_OUT_OF_MEMORY) ;
}
}
//--------------------------------------------------------------------------
// determine the type of C and get the unary or binary operator
//--------------------------------------------------------------------------
// If a unary or binary operator is present, C is always returned as
// the ztype of the operator. The input ctype is ignored.
GrB_UnaryOp op1 = NULL ;
GrB_BinaryOp op2 = NULL ;
if (op1_in != NULL)
{
// get the unary operator
if (atype == op1_in->xtype && op1_in->opcode == GB_IDENTITY_opcode)
{
// op1 is a built-in identity operator, with the same type as A, so
// do not apply the operator and do not typecast.
ctype = atype ;
}
else
{
// apply the operator, z=op1(x)
op1 = op1_in ;
ctype = op1->ztype ;
}
}
else if (op2_in != NULL)
{
// get the binary operator
GrB_Type op2_intype = binop_bind1st ? op2_in->xtype : op2_in->ytype ;
GB_Opcode opcode = op2_in->opcode ;
// only GB_apply calls GB_transpose with op2_in, and it ensures this
// condition holds: the first(A,y), second(x,A), and any(...) have
// been renamed to identity(A), so these cases do not occur here.
ASSERT (!
((opcode == GB_ANY_opcode) ||
(opcode == GB_FIRST_opcode && !binop_bind1st) ||
(opcode == GB_SECOND_opcode && binop_bind1st))) ;
// apply the operator, z=op2(A,y) or op2(x,A)
op2 = op2_in ;
ctype = op2->ztype ;
}
else
{
// no operator
if (ctype == NULL)
{
// no typecasting if ctype is NULL
ctype = atype ;
}
}
GB_Type_code ccode = ctype->code ;
size_t csize = ctype->size ;
//--------------------------------------------------------------------------
// C = A'
//--------------------------------------------------------------------------
ASSERT (GB_IMPLIES (avlen == 0 || avdim == 0, anz == 0)) ;
bool allocate_new_Cx = (ctype != atype) || (op1 != NULL) || (op2 != NULL) ;
if (anz == 0)
{
//======================================================================
// quick return if A is empty
//======================================================================
GB_FREE_IN_PLACE_A ;
// A is empty; create a new empty matrix C, with the new type and
// dimensions. C is hypersparse for now but may convert when
// returned.
info = GB_create (Chandle, ctype, avdim, avlen, GB_Ap_calloc,
C_is_csc, GB_FORCE_HYPER, A_hyper_ratio, 1, 1, true, Context) ;
if (info != GrB_SUCCESS)
{
// out of memory
GB_FREE_C ;
GB_FREE_WORK ;
return (info) ;
}
ASSERT_MATRIX_OK (*Chandle, "C transpose empty", GB0) ;
}
else if (avdim == 1)
{
//======================================================================
// transpose a "column" vector into a "row"
//======================================================================
// transpose a vector (avlen-by-1) into a "row" matrix (1-by-avlen).
// A must be already sorted on input
ASSERT_MATRIX_OK (A, "the vector A must already be sorted", GB0) ;
//----------------------------------------------------------------------
// allocate space
//----------------------------------------------------------------------
// Allocate the header of C, with no C->p, C->h, C->i, or C->x
// content, and initialize the type and dimension of C. If in
// place, A->p, A->h, A->i, and A->x are all NULL. The new matrix
// is hypersparse, but can be CSR or CSC. This step does not
// allocate anything if in place.
// if *Chandle == NULL, allocate a new header; otherwise reuse existing
info = GB_new (Chandle, ctype, 1, avlen, GB_Ap_null, C_is_csc,
GB_FORCE_HYPER, A_hyper_ratio, 0, Context) ;
if (info != GrB_SUCCESS)
{
// out of memory
ASSERT (!in_place) ; // cannot fail if in place
GB_FREE_C ;
GB_FREE_WORK ;
return (info) ;
}
if (!in_place)
{
C = (*Chandle) ;
}
else
{
ASSERT (A == C && A == (*Chandle)) ;
}
// allocate new space for the values and pattern
GB_void *GB_RESTRICT Cx = NULL ;
int64_t *GB_RESTRICT Cp = GB_MALLOC (anz+1, int64_t) ;
int64_t *GB_RESTRICT Ci = GB_CALLOC (anz , int64_t) ;
if (allocate_new_Cx)
{
// allocate new space for the new typecasted numerical values of C
Cx = GB_MALLOC (anz * ctype->size, GB_void) ;
}
if (Cp == NULL || Ci == NULL || (allocate_new_Cx && (Cx == NULL)))
{
// out of memory
GB_FREE (Cp) ;
GB_FREE (Ci) ;
GB_FREE (Cx) ;
GB_FREE_A_AND_C ;
GB_FREE_WORK ;
return (GB_OUT_OF_MEMORY) ;
}
//----------------------------------------------------------------------
// the transpose will now succeed; fill the content of C
//----------------------------------------------------------------------
// numerical values: apply the operator, typecast, or make shallow copy
if (op1 != NULL || op2 != NULL)
{
// Cx = op ((op->xtype) Ax)
C->x = Cx ; C->x_shallow = false ;
GB_apply_op ((GB_void *) Cx,
op1, op2, scalar, binop_bind1st,
(const GB_void *) Ax, atype, anz, Context) ;
// prior Ax will be freed
}
else if (ctype != atype)
{
// copy the values from A into C and cast from atype to ctype
C->x = Cx ; C->x_shallow = false ;
GB_cast_array (Cx, ccode, Ax, acode, asize, anz, 1) ;
// prior Ax will be freed
}
else // ctype == atype
{
// no type change; numerical values of C are a shallow copy of A.
C->x = Ax ; C->x_shallow = (in_place) ? Ax_shallow : true ;
Ax = NULL ; // do not free prior Ax
}
// each entry in A becomes a non-empty vector in C
C->h = Ai ; C->h_shallow = (in_place) ? Ai_shallow : true ;
Ai = NULL ; // do not free prior Ai
C->nzmax = anz ;
// C->p = 0:anz and C->i = zeros (1,anz), newly allocated
C->plen = anz ;
C->nvec = anz ;
C->nvec_nonempty = anz ;
C->i = Ci ; C->i_shallow = false ;
C->p = Cp ; C->p_shallow = false ;
// fill the vector pointers C->p
int64_t k ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (k = 0 ; k <= anz ; k++)
{
Cp [k] = k ;
}
C->magic = GB_MAGIC ;
//----------------------------------------------------------------------
// free prior space
//----------------------------------------------------------------------
GB_FREE_IN_PLACE_A ;
}
else if (avlen == 1)
{
//======================================================================
// transpose a "row" into a "column" vector
//======================================================================
// transpose a "row" matrix (1-by-avdim) into a vector (avdim-by-1).
// if A->vlen is 1, all vectors of A are implicitly sorted
ASSERT_MATRIX_OK (A, "1-by-n input A already sorted", GB0) ;
//----------------------------------------------------------------------
// allocate space
//----------------------------------------------------------------------
// Allocate the header of C, with no C->p, C->h, C->i, or C->x
// content, and initialize the type and dimension of C. If in
// place, A->p, A->h, A->i, and A->x are all NULL. The new matrix
// is NON-hypersparse, but can be CSR or CSC. This step does not
// allocate anything if in place.
// if *Chandle == NULL, allocate a new header; otherwise reuse existing
info = GB_new (Chandle, ctype, avdim, 1, GB_Ap_null, C_is_csc,
GB_FORCE_NONHYPER, A_hyper_ratio, 0, Context) ;
if (info != GrB_SUCCESS)
{
// out of memory
ASSERT (!in_place) ; // cannot fail if in place
GB_FREE_C ;
GB_FREE_WORK ;
return (info) ;
}
if (!in_place)
{
C = (*Chandle) ;
}
else
{
ASSERT (A == C && A == (*Chandle)) ;
}
// allocate new space for the values and pattern
GB_void *GB_RESTRICT Cx = NULL ;
int64_t *GB_RESTRICT Cp ;
int64_t *GB_RESTRICT Ci = NULL ;
Cp = GB_CALLOC (2, int64_t) ;
bool allocate_new_Ci = (!A_is_hyper) ;
if (allocate_new_Ci)
{
// A is not hypersparse, so new space is needed for Ci
Ci = GB_MALLOC (anz, int64_t) ;
}
if (allocate_new_Cx)
{
// allocate new space for the new typecasted numerical values of C
Cx = GB_MALLOC (anz * ctype->size, GB_void) ;
}
if (Cp == NULL || (allocate_new_Cx && (Cx == NULL))
|| (allocate_new_Ci && (Ci == NULL)))
{
// out of memory
GB_FREE (Cp) ;
GB_FREE (Ci) ;
GB_FREE (Cx) ;
GB_FREE_A_AND_C ;
GB_FREE_WORK ;
return (GB_OUT_OF_MEMORY) ;
}
//----------------------------------------------------------------------
// numerical values of C: apply the op, typecast, or make shallow copy
//----------------------------------------------------------------------
if (op1 != NULL || op2 != NULL)
{
// Cx = op ((op->xtype) Ax)
C->x = Cx ; C->x_shallow = false ;
GB_apply_op ((GB_void *) Cx,
op1, op2, scalar, binop_bind1st,
(const GB_void *) Ax, atype, anz, Context) ;
// prior Ax will be freed
}
else if (ctype != atype)
{
// copy the values from A into C and cast from atype to ctype
C->x = Cx ; C->x_shallow = false ;
GB_cast_array (Cx, ccode, Ax, acode, asize, anz, 1) ;
// prior Ax will be freed
}
else // ctype == atype
{
// no type change; numerical values of C are a shallow copy of A
C->x = Ax ; C->x_shallow = (in_place) ? Ax_shallow : true ;
Ax = NULL ; // do not free prior Ax
}
//----------------------------------------------------------------------
// pattern of C
//----------------------------------------------------------------------
if (A_is_hyper)
{
//------------------------------------------------------------------
// each non-empty vector in A becomes an entry in C
//------------------------------------------------------------------
ASSERT (!allocate_new_Ci) ;
C->i = Ah ; C->i_shallow = (in_place) ? Ah_shallow : true ;
ASSERT (anvec == anz) ;
Ah = NULL ; // do not free prior Ah
}
else
{
//------------------------------------------------------------------
// find the non-empty vectors of A, which become entries in C
//------------------------------------------------------------------
ASSERT (allocate_new_Ci) ;
ASSERT (Ah == NULL) ;
if (nth == 1)
{
//--------------------------------------------------------------
// construct Ci with a single thread
//--------------------------------------------------------------
int64_t k = 0 ;
for (int64_t j = 0 ; j < avdim ; j++)
{
if (Ap [j] < Ap [j+1])
{
Ci [k++] = j ;
}
}
ASSERT (k == anz) ;
}
else
{
//--------------------------------------------------------------
// construct Ci in parallel
//--------------------------------------------------------------
int tid ;
#pragma omp parallel for num_threads(nth) schedule(dynamic,1)
for (tid = 0 ; tid < ntasks ; tid++)
{
int64_t jstart, jend, k = 0 ;
GB_PARTITION (jstart, jend, avdim, tid, ntasks) ;
for (int64_t j = jstart ; j < jend ; j++)
{
if (Ap [j] < Ap [j+1])
{
k++ ;
}
}
Count [tid] = k ;
}
GB_cumsum (Count, ntasks, NULL, 1) ;
ASSERT (Count [ntasks] == anz) ;
#pragma omp parallel for num_threads(nth) schedule(dynamic,1)
for (tid = 0 ; tid < ntasks ; tid++)
{
int64_t jstart, jend, k = Count [tid] ;
GB_PARTITION (jstart, jend, avdim, tid, ntasks) ;
for (int64_t j = jstart ; j < jend ; j++)
{
if (Ap [j] < Ap [j+1])
{
Ci [k++] = j ;
}
}
}
}
#ifdef GB_DEBUG
int64_t k = 0 ;
for (int64_t j = 0 ; j < avdim ; j++)
{
if (Ap [j] < Ap [j+1])
{
ASSERT (Ci [k] == j) ;
k++ ;
}
}
ASSERT (k == anz) ;
#endif
C->i = Ci ; C->i_shallow = false ;
}
//----------------------------------------------------------------------
// vector pointers of C
//----------------------------------------------------------------------
C->nzmax = anz ;
// C->p = [0 anz] and C->h = NULL
ASSERT (C->plen == 1) ;
ASSERT (C->nvec == 1) ;
ASSERT (C->h == NULL) ;
C->p = Cp ; C->p_shallow = false ;
C->nvec_nonempty = (anz == 0) ? 0 : 1 ;
// fill the vector pointers C->p
Cp [0] = 0 ;
Cp [1] = anz ;
C->magic = GB_MAGIC ;
//----------------------------------------------------------------------
// free prior space
//----------------------------------------------------------------------
GB_FREE_IN_PLACE_A ;
}
else
{
//======================================================================
// transpose a general matrix
//======================================================================
ASSERT_MATRIX_OK_OR_JUMBLED (A, "A GB_transpose jumbled ok", GB0) ;
ASSERT (avdim > 1 && avlen > 1) ;
// T=A' with optional typecasting, or T=op(A')
//----------------------------------------------------------------------
// select the method
//----------------------------------------------------------------------
// for the qsort method, if the transpose is done in place and A->i is
// not shallow, A->i can be used and then freed. Otherwise, A->i is
// not modified at all.
bool recycle_Ai = (in_place && !Ai_shallow) ;
bool use_qsort ;
if (A_is_hyper)
{
//------------------------------------------------------------------
// always use qsort for hypersparse matrices
//------------------------------------------------------------------
use_qsort = true ;
}
else
{
//------------------------------------------------------------------
// select qsort if the transpose will likely be hypersparse
//------------------------------------------------------------------
use_qsort = GB_CHOOSE_QSORT_INSTEAD_OF_BUCKET (anz, avlen) ;
}
//----------------------------------------------------------------------
// transpose the matrix with the selected method
//----------------------------------------------------------------------
if (use_qsort)
{
//==================================================================
// transpose via quicksort
//==================================================================
//------------------------------------------------------------------
// allocate and create iwork
//------------------------------------------------------------------
// allocate iwork of size anz
int64_t *iwork = GB_MALLOC (anz, int64_t) ;
if (iwork == NULL)
{
// out of memory
GB_FREE_C ;
GB_FREE_WORK ;
return (GB_OUT_OF_MEMORY) ;
}
// Construct the "row" indices of C, which are "column" indices of
// A. This array becomes the permanent T->i on output. This phase
// must be done before Chandle is created below, since that step
// destroys A.
GB_extract_vector_list (iwork, A, nthreads) ;
//------------------------------------------------------------------
// allocate the output matrix and additional space (jwork and S)
//------------------------------------------------------------------
// Allocate the header of C, with no C->p, C->h, C->i, or C->x
// content, and initialize the type and dimension of C. If in
// place, A->p, A->h, A->i, and A->x are all NULL. The new matrix
// is hypersparse, but can be CSR or CSC. This step does not
// allocate anything if in place.
// if *Chandle == NULL, allocate a new header; otherwise reuse
info = GB_new (Chandle, ctype, avdim, avlen, GB_Ap_null, C_is_csc,
GB_FORCE_HYPER, A_hyper_ratio, 0, Context) ;
if (info != GrB_SUCCESS)
{
// out of memory
ASSERT (!in_place) ; // cannot fail if in place
GB_FREE (iwork) ;
GB_FREE_C ;
GB_FREE_WORK ;
return (info) ;
}
if (!in_place)
{
C = (*Chandle) ;
}
else
{
ASSERT (A == C && A == (*Chandle)) ;
}
// if in_place, the prior Ap and Ah can now be freed
if (in_place)
{
if (!Ap_shallow) GB_FREE (Ap) ;
if (!Ah_shallow) GB_FREE (Ah) ;
}
int64_t *jwork = NULL ;
GB_Type_code scode ;
GB_void *S = NULL ;
GB_void *Swork = NULL ;
bool ok = true ;
if (!recycle_Ai)
{
// allocate jwork of size anz
jwork = GB_MALLOC (anz, int64_t) ;
ok = ok && (jwork != NULL) ;
}
if (op1 != NULL || op2 != NULL)
{
// allocate Swork of size anz * csize
Swork = GB_MALLOC (anz * csize, GB_void) ;
ok = ok && (Swork != NULL) ;
}
if (!ok)
{
// out of memory
GB_FREE (iwork) ;
GB_FREE (jwork) ;
GB_FREE (Swork) ;
GB_FREE_A_AND_C ;
GB_FREE_WORK ;
return (GB_OUT_OF_MEMORY) ;
}
//------------------------------------------------------------------
// construct jwork and Swork
//------------------------------------------------------------------
// "row" indices of A become "column" indices of C
if (recycle_Ai)
{
// Ai is used as workspace for the "column" indices of C.
// jwork is a shallow copy of Ai, and is freed by GB_builder.
jwork = Ai ;
ASSERT (in_place) ;
// set Ai to NULL so it is not freed by GB_FREE_IN_PLACE_A
Ai = NULL ;
}
else
{
// jwork = Ai, making a deep copy. jwork is freed by
// GB_builder. A->i is not modified, even if out of memory.
GB_memcpy (jwork, Ai, anz * sizeof (int64_t), nthreads) ;
}
// numerical values: apply the op, typecast, or make shallow copy
if (op1 != NULL || op2 != NULL)
{
// Swork = op ((op->xtype) Ax)
GB_apply_op ((GB_void *) Swork,
op1, op2, scalar, binop_bind1st,
(const GB_void *) Ax, atype, anz, Context) ;
// GB_builder will not need to typecast Swork to T->x, and it
// may choose to transplant it into T->x
scode = ccode ;
#if 0
if (in_place && !Ax_shallow)
{
// A is being transposed in place so A->x is no longer
// needed. If A->x is shallow this can be skipped. T->x
// will not be shallow if the op is present. A->x should
// be freed early to free up space for GB_builder.
// However, in the current usage, when op is used, A is not
// transposed in place, so this step is not needed.
ASSERT (GB_DEAD_CODE) ;
GB_FREE (Ax) ;
}
#endif
}
else
{
// GB_builder will typecast S from atype to ctype if needed.
// S is a shallow copy of Ax, and must not be modified.
S = Ax ;
scode = acode ;
}
//------------------------------------------------------------------
// build the matrix: T = (ctype) A' or op ((xtype) A')
//------------------------------------------------------------------
// internally, jwork is freed and then T->x is allocated, so the
// total high-water memory usage is anz * max (csize,
// sizeof(int64_t)). T is always hypersparse.
// If op is not NULL, then Swork can be transplanted into T in
// GB_builder, instead. However, this requires the tuples to be
// sorted on input, which is possible but rare for GB_transpose.
GrB_Matrix T ;
info = GB_builder
(
&T, // create T
ctype, // T is of type ctype
avdim, // T->vlen = A->vdim, always > 1
avlen, // T->vdim = A->vlen, always > 1
C_is_csc, // T has the same CSR/CSC format as C
&iwork, // iwork_handle, becomes T->i on output
&jwork, // jwork_handle, freed on output
&Swork, // Swork_handle, freed on output
false, // tuples are not sorted on input
true, // tuples have no duplicates
anz, // size of iwork, jwork, and Swork
true, // is_matrix: unused
false, // ijcheck: unused
NULL, NULL, // original I,J indices: not used here
S, // array of values of type scode, not modified
anz, // number of tuples
NULL, // no dup operator needed (input has no duplicates)
scode, // type of S or Swork
Context
) ;
// GB_builder always frees jwork, and either frees iwork or
// transplants it in to T->i and sets iwork to NULL. So iwork and
// jwork are always NULL on output. GB_builder does not modify S.
ASSERT (iwork == NULL && jwork == NULL && Swork == NULL) ;
//------------------------------------------------------------------
// free prior space and transplant T into C
//------------------------------------------------------------------
// Free the prior content of the input matrix, if done in place.
// Ap, Ah, and Ai have already been freed, but Ax has not.
GB_FREE_IN_PLACE_A ;
if (info != GrB_SUCCESS)
{
// out of memory in GB_builder
GB_FREE_A_AND_C ;
GB_FREE_WORK ;
return (info) ;
}
// Transplant T in to the result C. The matrix T is not shallow
// and no typecasting is done, so this will always succeed.
info = GB_transplant (*Chandle, ctype, &T, Context) ;
ASSERT (info == GrB_SUCCESS) ;
}
else
{
//==================================================================
// transpose via bucket sort
//==================================================================
// This method does not operate on the matrix in place, so it must
// create a temporary matrix T. Then the input matrix is freed and
// replaced with the new matrix T.
ASSERT (!A_is_hyper) ;
// T is also typecasted to ctype, if not NULL
GrB_Matrix T ;
info = GB_transpose_bucket (&T, ctype, C_is_csc, A,
op1, op2, scalar, binop_bind1st,
Context) ;
// free prior content, if C=A' is being done in place
if (in_place_A)
{
// free all content of A, but not the header, if in place of A
GB_PHIX_FREE (A) ; // transpose in-place
}
else if (in_place_C)
{
// free all of C, including the header, if done in place of C
GB_MATRIX_FREE (Chandle) ;
}
if (info != GrB_SUCCESS)
{
// out of memory in GB_transpose_bucket
GB_FREE_C ;
GB_FREE_WORK ;
return (info) ;
}
ASSERT_MATRIX_OK (T, "T from bucket", GB0) ;
if (in_place_A)
{
// The header of A has not been freed, since it is used for the
// output. Transplant T back into A and free T. T is not
// shallow and no typecast is done so this will always succeed.
info = GB_transplant (A, ctype, &T, Context) ;
ASSERT (info == GrB_SUCCESS) ;
}
else
{
// If C=A' is done in place of C, then the header and content
// of the input C has been freed. The output T can now be
// moved to the Chandle.
ASSERT (*Chandle == NULL) ;
(*Chandle) = T ;
}
}
}
//--------------------------------------------------------------------------
// free workspace
//--------------------------------------------------------------------------
GB_FREE_WORK ;
//--------------------------------------------------------------------------
// conform the result to the desired hypersparsity of A
//--------------------------------------------------------------------------
// get the output matrix
C = (*Chandle) ;
// transplant the hyper_ratio from A to C
C->hyper_ratio = A_hyper_ratio ;
ASSERT_MATRIX_OK (C, "C to conform in GB_transpose", GB0) ;
info = GB_to_hyper_conform (C, Context) ;
if (info != GrB_SUCCESS)
{
// out of memory
GB_FREE_C ;
return (info) ;
}
ASSERT_MATRIX_OK (*Chandle, "Chandle conformed in GB_transpose", GB0) ;
return (GrB_SUCCESS) ;
}
|
parallel_region.c | // RUN: %libomp-compile-and-run | %sort-threads | FileCheck %s
// REQUIRES: ompt
#include "callback.h"
#include <omp.h>
int main()
{
int x = 0;
//implicit barrier at end of a parallel region
#pragma omp parallel num_threads(2)
{
#pragma omp atomic
x++;
}
print_fuzzy_address();
// Check if libomp supports the callbacks for this test.
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_sync_region'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_sync_region_wait'
// CHECK: 0: NULL_POINTER=[[NULL:.*$]]
// master thread implicit barrier at parallel end
// CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_barrier_begin: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra=[[RETURN_ADDRESS:0x[0-f]+]]{{[0-f][0-f]}}
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_wait_barrier_begin: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra=[[RETURN_ADDRESS]]{{[0-f][0-f]}}
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_wait_barrier_end: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra=[[RETURN_ADDRESS]]{{[0-f][0-f]}}
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_barrier_end: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra=[[RETURN_ADDRESS]]{{[0-f][0-f]}}
// CHECK: {{^}}[[MASTER_ID]]: fuzzy_address={{.*}}[[RETURN_ADDRESS]]
// worker thread implicit barrier at parallel end
// CHECK: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_barrier_begin: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra=[[NULL]]
// CHECK: {{^}}[[THREAD_ID]]: ompt_event_wait_barrier_begin: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra=[[NULL]]
// CHECK: {{^}}[[THREAD_ID]]: ompt_event_wait_barrier_end: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra=[[NULL]]
// CHECK: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: parallel_id={{[0-9]+}}, task_id={{[0-9]+}}, codeptr_ra=[[NULL]]
return 0;
}
|
GB_binop__ne_uint8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__ne_uint8
// A.*B function (eWiseMult): GB_AemultB__ne_uint8
// A*D function (colscale): GB_AxD__ne_uint8
// D*A function (rowscale): GB_DxB__ne_uint8
// C+=B function (dense accum): GB_Cdense_accumB__ne_uint8
// C+=b function (dense accum): GB_Cdense_accumb__ne_uint8
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__ne_uint8
// C=scalar+B GB_bind1st__ne_uint8
// C=scalar+B' GB_bind1st_tran__ne_uint8
// C=A+scalar GB_bind2nd__ne_uint8
// C=A'+scalar GB_bind2nd_tran__ne_uint8
// C type: bool
// A type: uint8_t
// B,b type: uint8_t
// BinaryOp: cij = (aij != bij)
#define GB_ATYPE \
uint8_t
#define GB_BTYPE \
uint8_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint8_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x != y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_NE || GxB_NO_UINT8 || GxB_NO_NE_UINT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__ne_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__ne_uint8
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__ne_uint8
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type uint8_t
uint8_t bwork = (*((uint8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__ne_uint8
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *GB_RESTRICT Cx = (bool *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__ne_uint8
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *GB_RESTRICT Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__ne_uint8
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__ne_uint8
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__ne_uint8
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
uint8_t x = (*((uint8_t *) x_input)) ;
uint8_t *Bx = (uint8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint8_t bij = Bx [p] ;
Cx [p] = (x != bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__ne_uint8
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
uint8_t *Ax = (uint8_t *) Ax_input ;
uint8_t y = (*((uint8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint8_t aij = Ax [p] ;
Cx [p] = (aij != y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = Ax [pA] ; \
Cx [pC] = (x != aij) ; \
}
GrB_Info GB_bind1st_tran__ne_uint8
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t x = (*((const uint8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = Ax [pA] ; \
Cx [pC] = (aij != y) ; \
}
GrB_Info GB_bind2nd_tran__ne_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t y = (*((const uint8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__lxor_uint8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__lxor_uint8
// A.*B function (eWiseMult): GB_AemultB__lxor_uint8
// A*D function (colscale): GB_AxD__lxor_uint8
// D*A function (rowscale): GB_DxB__lxor_uint8
// C+=B function (dense accum): GB_Cdense_accumB__lxor_uint8
// C+=b function (dense accum): GB_Cdense_accumb__lxor_uint8
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__lxor_uint8
// C=scalar+B GB_bind1st__lxor_uint8
// C=scalar+B' GB_bind1st_tran__lxor_uint8
// C=A+scalar GB_bind2nd__lxor_uint8
// C=A'+scalar GB_bind2nd_tran__lxor_uint8
// C type: uint8_t
// A type: uint8_t
// B,b type: uint8_t
// BinaryOp: cij = ((aij != 0) != (bij != 0))
#define GB_ATYPE \
uint8_t
#define GB_BTYPE \
uint8_t
#define GB_CTYPE \
uint8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint8_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = ((x != 0) != (y != 0)) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LXOR || GxB_NO_UINT8 || GxB_NO_LXOR_UINT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__lxor_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__lxor_uint8
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__lxor_uint8
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint8_t
uint8_t bwork = (*((uint8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__lxor_uint8
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *GB_RESTRICT Cx = (uint8_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__lxor_uint8
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *GB_RESTRICT Cx = (uint8_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__lxor_uint8
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__lxor_uint8
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__lxor_uint8
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t x = (*((uint8_t *) x_input)) ;
uint8_t *Bx = (uint8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint8_t bij = Bx [p] ;
Cx [p] = ((x != 0) != (bij != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__lxor_uint8
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t *Ax = (uint8_t *) Ax_input ;
uint8_t y = (*((uint8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint8_t aij = Ax [p] ;
Cx [p] = ((aij != 0) != (y != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = Ax [pA] ; \
Cx [pC] = ((x != 0) != (aij != 0)) ; \
}
GrB_Info GB_bind1st_tran__lxor_uint8
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t x = (*((const uint8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = Ax [pA] ; \
Cx [pC] = ((aij != 0) != (y != 0)) ; \
}
GrB_Info GB_bind2nd_tran__lxor_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t y = (*((const uint8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
findpath.c | /* gcc -fopenmp -g3 -DTEST_FINDPATH findpath.c -o FINDpath -lRNA -lm -I../ -L./ */
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <limits.h>
#include "ViennaRNA/datastructures/basic.h"
#include "ViennaRNA/model.h"
#include "ViennaRNA/params/basic.h"
#include "ViennaRNA/fold.h"
#include "ViennaRNA/cofold.h"
#include "ViennaRNA/fold_vars.h"
#include "ViennaRNA/utils/basic.h"
#include "ViennaRNA/utils/strings.h"
#include "ViennaRNA/utils/structures.h"
#include "ViennaRNA/landscape/findpath.h"
#ifdef _OPENMP
#include <omp.h>
#endif
#define LOOP_EN
/**
* @brief
*/
typedef struct move {
int i; /* i,j>0 insert; i,j<0 delete */
int j;
int when; /* 0 if still available, else resulting distance from start */
int E;
} move_t;
/**
* @brief
*/
typedef struct intermediate {
short *pt; /**< @brief pair table */
int Sen; /**< @brief saddle energy so far */
int curr_en; /**< @brief current energy */
move_t *moves; /**< @brief remaining moves to target */
} intermediate_t;
/*
#################################
# GLOBAL VARIABLES #
#################################
*/
/*
#################################
# PRIVATE VARIABLES #
#################################
*/
PRIVATE int BP_dist;
PRIVATE move_t *path = NULL;
PRIVATE int path_fwd; /* 1: s1->s2, else s2 -> s1 */
PRIVATE vrna_fold_compound_t *backward_compat_compound = NULL;
#ifdef _OPENMP
/* NOTE: all variables are assumed to be uninitialized if they are declared as threadprivate
*/
#pragma omp threadprivate(BP_dist, path, path_fwd, backward_compat_compound)
#endif
/*
#################################
# PRIVATE FUNCTION DECLARATIONS #
#################################
*/
PRIVATE move_t *
copy_moves(move_t *mvs);
PRIVATE int
compare_ptable(const void *A,
const void *B);
PRIVATE int
compare_energy(const void *A,
const void *B);
PRIVATE int
compare_moves_when(const void *A,
const void *B);
PRIVATE void
free_intermediate(intermediate_t *i);
#ifdef TEST_FINDPATH
/* TEST_FINDPATH, COFOLD */
PRIVATE void
usage(void);
#endif
PRIVATE int
find_path_once(vrna_fold_compound_t *vc,
const char *s1,
const char *s2,
int maxl,
int maxE);
PRIVATE int
try_moves(vrna_fold_compound_t *vc,
intermediate_t c,
int maxE,
intermediate_t *next,
int dist);
/*
#################################
# BEGIN OF FUNCTION DEFINITIONS #
#################################
*/
PUBLIC void
free_path(vrna_path_t *path)
{
vrna_path_t *tmp = path;
if (tmp) {
while (tmp->s) {
free(tmp->s);
tmp++;
}
free(path);
}
}
PUBLIC int
find_saddle(const char *seq,
const char *s1,
const char *s2,
int width)
{
int maxE;
char *sequence;
vrna_fold_compound_t *vc;
vrna_md_t md, *md_p;
vc = NULL;
set_model_details(&md);
if (backward_compat_compound) {
if (!strcmp(seq, backward_compat_compound->sequence)) {
/* check if sequence is the same as before */
md.window_size = backward_compat_compound->length;
md.max_bp_span = backward_compat_compound->length;
md_p = &(backward_compat_compound->params->model_details);
if (!memcmp(&md, md_p, sizeof(vrna_md_t))) /* check if model_details are the same as before */
vc = backward_compat_compound; /* re-use previous vrna_fold_compound_t */
}
}
if (!vc) {
vrna_fold_compound_free(backward_compat_compound);
sequence = vrna_cut_point_insert(seq, cut_point);
backward_compat_compound = vc = vrna_fold_compound(sequence, &md, VRNA_OPTION_EVAL_ONLY);
free(sequence);
}
maxE = vrna_path_findpath_saddle(vc, s1, s2, width);
return maxE;
}
PUBLIC int
vrna_path_findpath_saddle(vrna_fold_compound_t *vc,
const char *s1,
const char *s2,
int width)
{
return vrna_path_findpath_saddle_ub(vc, s1, s2, width, INT_MAX - 1);
}
PUBLIC int
vrna_path_findpath_saddle_ub(vrna_fold_compound_t *vc,
const char *s1,
const char *s2,
int width,
int maxE)
{
int maxl;
const char *tmp;
move_t *bestpath = NULL;
int dir;
path_fwd = dir = 0;
maxl = 1;
do {
int saddleE;
path_fwd = !path_fwd;
if (maxl > width)
maxl = width;
if (path)
free(path);
saddleE = find_path_once(vc, s1, s2, maxl, maxE);
if (saddleE < maxE) {
maxE = saddleE;
if (bestpath)
free(bestpath);
bestpath = path;
path = NULL;
dir = path_fwd;
} else {
free(path);
path = NULL;
}
tmp = s1;
s1 = s2;
s2 = tmp;
maxl *= 2;
} while (maxl < 2 * width);
/* (re)set some globals */
path = bestpath;
path_fwd = dir;
return maxE;
}
PUBLIC vrna_path_t *
get_path(const char *seq,
const char *s1,
const char *s2,
int maxkeep)
{
vrna_path_t *route = NULL;
char *sequence = NULL;
vrna_fold_compound_t *vc = NULL;
vrna_md_t md, *md_p;
set_model_details(&md);
if (backward_compat_compound) {
if (!strcmp(seq, backward_compat_compound->sequence)) {
/* check if sequence is the same as before */
md.window_size = backward_compat_compound->length;
md.max_bp_span = backward_compat_compound->length;
md_p = &(backward_compat_compound->params->model_details);
if (!memcmp(&md, md_p, sizeof(vrna_md_t))) /* check if model_details are the same as before */
vc = backward_compat_compound; /* re-use previous vrna_fold_compound_t */
}
}
if (!vc) {
vrna_fold_compound_free(backward_compat_compound);
sequence = vrna_cut_point_insert(seq, cut_point);
backward_compat_compound = vc = vrna_fold_compound(sequence, &md, VRNA_OPTION_EVAL_ONLY);
free(sequence);
}
route = vrna_path_findpath(vc, s1, s2, maxkeep);
return route;
}
PUBLIC vrna_path_t *
vrna_path_findpath(vrna_fold_compound_t *vc,
const char *s1,
const char *s2,
int width)
{
return vrna_path_findpath_ub(vc, s1, s2, width, INT_MAX - 1);
}
PUBLIC vrna_path_t *
vrna_path_findpath_ub(vrna_fold_compound_t *vc,
const char *s1,
const char *s2,
int width,
int maxE)
{
int E, d;
vrna_path_t *route = NULL;
E = vrna_path_findpath_saddle_ub(vc, s1, s2, width, maxE);
/* did we find a better path than one with saddle maxE? */
if (E < maxE) {
route = (vrna_path_t *)vrna_alloc((BP_dist + 2) * sizeof(vrna_path_t));
qsort(path, BP_dist, sizeof(move_t), compare_moves_when);
if (path_fwd) {
/* memorize start of path */
route[0].s = strdup(s1);
route[0].en = vrna_eval_structure(vc, s1);
for (d = 0; d < BP_dist; d++) {
int i, j;
route[d + 1].s = strdup(route[d].s);
i = path[d].i;
j = path[d].j;
if (i < 0) {
/* delete */
route[d + 1].s[(-i) - 1] = route[d + 1].s[(-j) - 1] = '.';
} else {
route[d + 1].s[i - 1] = '(';
route[d + 1].s[j - 1] = ')';
}
route[d + 1].en = path[d].E / 100.0;
}
} else {
/* memorize start of path */
route[BP_dist].s = strdup(s2);
route[BP_dist].en = vrna_eval_structure(vc, s2);
for (d = 0; d < BP_dist; d++) {
int i, j;
route[BP_dist - d - 1].s = strdup(route[BP_dist - d].s);
i = path[d].i;
j = path[d].j;
if (i < 0) {
/* delete */
route[BP_dist - d - 1].s[(-i) - 1] = route[BP_dist - d - 1].s[(-j) - 1] = '.';
} else {
route[BP_dist - d - 1].s[i - 1] = '(';
route[BP_dist - d - 1].s[j - 1] = ')';
}
route[BP_dist - d - 1].en = path[d].E / 100.0;
}
}
#if _DEBUG_FINDPATH_
fprintf(stderr, "\n%s\n%s\n%s\n\n", seq, s1, s2);
for (d = 0; d <= BP_dist; d++)
fprintf(stderr, "%s %6.2f\n", route[d].s, route[d].en);
fprintf(stderr, "%d\n", *num_entry);
#endif
}
free(path);
path = NULL;
return route;
}
PRIVATE int
try_moves(vrna_fold_compound_t *vc,
intermediate_t c,
int maxE,
intermediate_t *next,
int dist)
{
int *loopidx, len, num_next = 0, en, oldE;
move_t *mv;
short *pt;
len = c.pt[0];
loopidx = vrna_loopidx_from_ptable(c.pt);
oldE = c.Sen;
for (mv = c.moves; mv->i != 0; mv++) {
int i, j;
if (mv->when > 0)
continue;
i = mv->i;
j = mv->j;
pt = (short *)vrna_alloc(sizeof(short) * (len + 1));
memcpy(pt, c.pt, (len + 1) * sizeof(short));
if (j < 0) {
/*it's a delete move */
pt[-i] = 0;
pt[-j] = 0;
} else {
/* insert move */
if ((loopidx[i] == loopidx[j]) && /* i and j belong to same loop */
(pt[i] == 0) && (pt[j] == 0) /* ... and are unpaired */
) {
pt[i] = j;
pt[j] = i;
} else {
free(pt);
continue; /* llegal move, try next; */
}
}
#ifdef LOOP_EN
en = c.curr_en + vrna_eval_move_pt(vc, c.pt, i, j);
#else
en = vrna_eval_structure_pt(vc, pt);
#endif
if (en < maxE) {
next[num_next].Sen = (en > oldE) ? en : oldE;
next[num_next].curr_en = en;
next[num_next].pt = pt;
mv->when = dist;
mv->E = en;
next[num_next++].moves = copy_moves(c.moves);
mv->when = 0;
} else {
free(pt);
}
}
free(loopidx);
return num_next;
}
PRIVATE int
find_path_once(vrna_fold_compound_t *vc,
const char *s1,
const char *s2,
int maxl,
int maxE)
{
short *pt1, *pt2;
move_t *mlist;
int i, len, d, dist = 0, result;
intermediate_t *current, *next;
pt1 = vrna_ptable(s1);
pt2 = vrna_ptable(s2);
len = (int)strlen(s1);
mlist = (move_t *)vrna_alloc(sizeof(move_t) * len); /* bp_dist < n */
for (i = 1; i <= len; i++) {
if (pt1[i] != pt2[i]) {
if (i < pt1[i]) {
/* need to delete this pair */
mlist[dist].i = -i;
mlist[dist].j = -pt1[i];
mlist[dist++].when = 0;
}
if (i < pt2[i]) {
/* need to insert this pair */
mlist[dist].i = i;
mlist[dist].j = pt2[i];
mlist[dist++].when = 0;
}
}
}
free(pt2);
BP_dist = dist;
current = (intermediate_t *)vrna_alloc(sizeof(intermediate_t) * (maxl + 1));
current[0].pt = pt1;
current[0].Sen = current[0].curr_en = vrna_eval_structure_pt(vc, pt1);
current[0].moves = mlist;
next = (intermediate_t *)vrna_alloc(sizeof(intermediate_t) * (dist * maxl + 1));
for (d = 1; d <= dist; d++) {
/* go through the distance classes */
int c, u, num_next = 0;
intermediate_t *cc;
for (c = 0; current[c].pt != NULL; c++)
num_next += try_moves(vc, current[c], maxE, next + num_next, d);
if (num_next == 0) {
for (cc = current; cc->pt != NULL; cc++)
free_intermediate(cc);
current[0].Sen = INT_MAX;
break;
}
/* remove duplicates via sort|uniq
* if this becomes a bottleneck we can use a hash instead */
qsort(next, num_next, sizeof(intermediate_t), compare_ptable);
for (u = 0, c = 1; c < num_next; c++) {
if (memcmp(next[u].pt, next[c].pt, sizeof(short) * len) != 0)
next[++u] = next[c];
else
free_intermediate(next + c);
}
num_next = u + 1;
qsort(next, num_next, sizeof(intermediate_t), compare_energy);
/* free the old stuff */
for (cc = current; cc->pt != NULL; cc++)
free_intermediate(cc);
for (u = 0; u < maxl && u < num_next; u++)
current[u] = next[u];
for (; u < num_next; u++)
free_intermediate(next + u);
num_next = 0;
}
free(next);
path = current[0].moves;
result = current[0].Sen;
free(current[0].pt);
free(current);
return result;
}
PRIVATE void
free_intermediate(intermediate_t *i)
{
free(i->pt);
free(i->moves);
i->pt = NULL;
i->moves = NULL;
i->Sen = INT_MAX;
}
PRIVATE int
compare_ptable(const void *A,
const void *B)
{
intermediate_t *a, *b;
int c;
a = (intermediate_t *)A;
b = (intermediate_t *)B;
c = memcmp(a->pt, b->pt, a->pt[0] * sizeof(short));
if (c != 0)
return c;
if ((a->Sen - b->Sen) != 0)
return a->Sen - b->Sen;
return a->curr_en - b->curr_en;
}
PRIVATE int
compare_energy(const void *A,
const void *B)
{
intermediate_t *a, *b;
a = (intermediate_t *)A;
b = (intermediate_t *)B;
if ((a->Sen - b->Sen) != 0)
return a->Sen - b->Sen;
return a->curr_en - b->curr_en;
}
PRIVATE int
compare_moves_when(const void *A,
const void *B)
{
move_t *a, *b;
a = (move_t *)A;
b = (move_t *)B;
return a->when - b->when;
}
PRIVATE move_t *
copy_moves(move_t *mvs)
{
move_t *new;
new = (move_t *)vrna_alloc(sizeof(move_t) * (BP_dist + 1));
memcpy(new, mvs, sizeof(move_t) * (BP_dist + 1));
return new;
}
#ifdef TEST_FINDPATH
PUBLIC void
print_path(const char *seq,
const char *struc)
{
int d;
char *s;
s = strdup(struc);
if (cut_point == -1) {
printf("%s\n%s\n", seq, s);
}
/* printf("%s\n%s %6.2f\n", seq, s, vrna_eval_structure_simple(seq,s)); */
else {
char *pstruct, *pseq;
pstruct = vrna_cut_point_insert(s, cut_point);
pseq = vrna_cut_point_insert(seq, cut_point);
printf("%s\n%s\n", pseq, pstruct);
/* printf("%s\n%s %6.2f\n", pseq, pstruct, vrna_eval_structure_simple(seq,s)); */
free(pstruct);
free(pseq);
}
qsort(path, BP_dist, sizeof(move_t), compare_moves_when);
for (d = 0; d < BP_dist; d++) {
int i, j;
i = path[d].i;
j = path[d].j;
if (i < 0) {
/* delete */
s[(-i) - 1] = s[(-j) - 1] = '.';
} else {
s[i - 1] = '(';
s[j - 1] = ')';
}
/* printf("%s %6.2f - %6.2f\n", s, vrna_eval_structure_simple(seq,s), path[d].E/100.0); */
}
free(s);
}
int
main(int argc,
char *argv[])
{
char *line, *seq, *s1, *s2;
int E, maxkeep = 1000;
int verbose = 0, i;
vrna_path_t *route, *r;
for (i = 1; i < argc; i++) {
switch (argv[i][1]) {
case 'm':
if (strcmp(argv[i], "-m") == 0)
sscanf(argv[++i], "%d", &maxkeep);
break;
case 'v':
verbose = !strcmp(argv[i], "-v");
break;
case 'd':
if (strcmp(argv[i], "-d") == 0)
sscanf(argv[++i], "%d", &dangles);
break;
default:
usage();
}
}
cut_point = -1;
line = vrna_read_line(stdin);
seq = vrna_cut_point_remove(line, &cut_point);
free(line);
line = vrna_read_line(stdin);
s1 = vrna_cut_point_remove(line, &cut_point);
free(line);
line = vrna_read_line(stdin);
s2 = vrna_cut_point_remove(line, &cut_point);
free(line);
E = find_saddle(seq, s1, s2, maxkeep);
printf("saddle_energy = %6.2f\n", E / 100.);
if (verbose) {
if (path_fwd)
print_path(seq, s1);
else
print_path(seq, s2);
free(path);
path = NULL;
route = get_path(seq, s1, s2, maxkeep);
for (r = route; r->s; r++) {
if (cut_point == -1) {
printf("%s %6.2f\n", r->s, r->en);
/* printf("%s %6.2f - %6.2f\n", r->s, vrna_eval_structure_simple(seq,r->s), r->en); */
} else {
char *pstruct;
pstruct = vrna_cut_point_insert(r->s, cut_point);
printf("%s %6.2f\n", pstruct, r->en);
/* printf("%s %6.2f - %6.2f\n", pstruct, vrna_eval_structure_simple(seq,r->s), r->en); */
free(pstruct);
}
free(r->s);
}
free(route);
}
free(seq);
free(s1);
free(s2);
return EXIT_SUCCESS;
}
static void
usage(void)
{
vrna_message_error("usage: findpath.c [-m depth] [-d[0|1|2]] [-v]");
}
#endif
|
c-omp.c | /* This file contains routines to construct OpenACC and OpenMP constructs,
called from parsing in the C and C++ front ends.
Copyright (C) 2005-2020 Free Software Foundation, Inc.
Contributed by Richard Henderson <rth@redhat.com>,
Diego Novillo <dnovillo@redhat.com>.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "options.h"
#include "c-common.h"
#include "gimple-expr.h"
#include "c-pragma.h"
#include "stringpool.h"
#include "omp-general.h"
#include "gomp-constants.h"
#include "memmodel.h"
#include "attribs.h"
#include "gimplify.h"
/* Complete a #pragma oacc wait construct. LOC is the location of
the #pragma. */
tree
c_finish_oacc_wait (location_t loc, tree parms, tree clauses)
{
const int nparms = list_length (parms);
tree stmt, t;
vec<tree, va_gc> *args;
vec_alloc (args, nparms + 2);
stmt = builtin_decl_explicit (BUILT_IN_GOACC_WAIT);
if (omp_find_clause (clauses, OMP_CLAUSE_ASYNC))
t = OMP_CLAUSE_ASYNC_EXPR (clauses);
else
t = build_int_cst (integer_type_node, GOMP_ASYNC_SYNC);
args->quick_push (t);
args->quick_push (build_int_cst (integer_type_node, nparms));
for (t = parms; t; t = TREE_CHAIN (t))
{
if (TREE_CODE (OMP_CLAUSE_WAIT_EXPR (t)) == INTEGER_CST)
args->quick_push (build_int_cst (integer_type_node,
TREE_INT_CST_LOW (OMP_CLAUSE_WAIT_EXPR (t))));
else
args->quick_push (OMP_CLAUSE_WAIT_EXPR (t));
}
stmt = build_call_expr_loc_vec (loc, stmt, args);
vec_free (args);
return stmt;
}
/* Complete a #pragma omp master construct. STMT is the structured-block
that follows the pragma. LOC is the location of the #pragma. */
tree
c_finish_omp_master (location_t loc, tree stmt)
{
tree t = add_stmt (build1 (OMP_MASTER, void_type_node, stmt));
SET_EXPR_LOCATION (t, loc);
return t;
}
/* Complete a #pragma omp taskgroup construct. BODY is the structured-block
that follows the pragma. LOC is the location of the #pragma. */
tree
c_finish_omp_taskgroup (location_t loc, tree body, tree clauses)
{
tree stmt = make_node (OMP_TASKGROUP);
TREE_TYPE (stmt) = void_type_node;
OMP_TASKGROUP_BODY (stmt) = body;
OMP_TASKGROUP_CLAUSES (stmt) = clauses;
SET_EXPR_LOCATION (stmt, loc);
return add_stmt (stmt);
}
/* Complete a #pragma omp critical construct. BODY is the structured-block
that follows the pragma, NAME is the identifier in the pragma, or null
if it was omitted. LOC is the location of the #pragma. */
tree
c_finish_omp_critical (location_t loc, tree body, tree name, tree clauses)
{
tree stmt = make_node (OMP_CRITICAL);
TREE_TYPE (stmt) = void_type_node;
OMP_CRITICAL_BODY (stmt) = body;
OMP_CRITICAL_NAME (stmt) = name;
OMP_CRITICAL_CLAUSES (stmt) = clauses;
SET_EXPR_LOCATION (stmt, loc);
return add_stmt (stmt);
}
/* Complete a #pragma omp ordered construct. STMT is the structured-block
that follows the pragma. LOC is the location of the #pragma. */
tree
c_finish_omp_ordered (location_t loc, tree clauses, tree stmt)
{
tree t = make_node (OMP_ORDERED);
TREE_TYPE (t) = void_type_node;
OMP_ORDERED_BODY (t) = stmt;
if (!flag_openmp /* flag_openmp_simd */
&& (OMP_CLAUSE_CODE (clauses) != OMP_CLAUSE_SIMD
|| OMP_CLAUSE_CHAIN (clauses)))
clauses = build_omp_clause (loc, OMP_CLAUSE_SIMD);
OMP_ORDERED_CLAUSES (t) = clauses;
SET_EXPR_LOCATION (t, loc);
return add_stmt (t);
}
/* Complete a #pragma omp barrier construct. LOC is the location of
the #pragma. */
void
c_finish_omp_barrier (location_t loc)
{
tree x;
x = builtin_decl_explicit (BUILT_IN_GOMP_BARRIER);
x = build_call_expr_loc (loc, x, 0);
add_stmt (x);
}
/* Complete a #pragma omp taskwait construct. LOC is the location of the
pragma. */
void
c_finish_omp_taskwait (location_t loc)
{
tree x;
x = builtin_decl_explicit (BUILT_IN_GOMP_TASKWAIT);
x = build_call_expr_loc (loc, x, 0);
add_stmt (x);
}
/* Complete a #pragma omp taskyield construct. LOC is the location of the
pragma. */
void
c_finish_omp_taskyield (location_t loc)
{
tree x;
x = builtin_decl_explicit (BUILT_IN_GOMP_TASKYIELD);
x = build_call_expr_loc (loc, x, 0);
add_stmt (x);
}
/* Complete a #pragma omp atomic construct. For CODE OMP_ATOMIC
the expression to be implemented atomically is LHS opcode= RHS.
For OMP_ATOMIC_READ V = LHS, for OMP_ATOMIC_CAPTURE_{NEW,OLD} LHS
opcode= RHS with the new or old content of LHS returned.
LOC is the location of the atomic statement. The value returned
is either error_mark_node (if the construct was erroneous) or an
OMP_ATOMIC* node which should be added to the current statement
tree with add_stmt. If TEST is set, avoid calling save_expr
or create_tmp_var*. */
tree
c_finish_omp_atomic (location_t loc, enum tree_code code,
enum tree_code opcode, tree lhs, tree rhs,
tree v, tree lhs1, tree rhs1, bool swapped,
enum omp_memory_order memory_order, bool test)
{
tree x, type, addr, pre = NULL_TREE;
HOST_WIDE_INT bitpos = 0, bitsize = 0;
if (lhs == error_mark_node || rhs == error_mark_node
|| v == error_mark_node || lhs1 == error_mark_node
|| rhs1 == error_mark_node)
return error_mark_node;
/* ??? According to one reading of the OpenMP spec, complex type are
supported, but there are no atomic stores for any architecture.
But at least icc 9.0 doesn't support complex types here either.
And lets not even talk about vector types... */
type = TREE_TYPE (lhs);
if (!INTEGRAL_TYPE_P (type)
&& !POINTER_TYPE_P (type)
&& !SCALAR_FLOAT_TYPE_P (type))
{
error_at (loc, "invalid expression type for %<#pragma omp atomic%>");
return error_mark_node;
}
if (TYPE_ATOMIC (type))
{
error_at (loc, "%<_Atomic%> expression in %<#pragma omp atomic%>");
return error_mark_node;
}
if (opcode == RDIV_EXPR)
opcode = TRUNC_DIV_EXPR;
/* ??? Validate that rhs does not overlap lhs. */
tree blhs = NULL;
if (TREE_CODE (lhs) == COMPONENT_REF
&& TREE_CODE (TREE_OPERAND (lhs, 1)) == FIELD_DECL
&& DECL_C_BIT_FIELD (TREE_OPERAND (lhs, 1))
&& DECL_BIT_FIELD_REPRESENTATIVE (TREE_OPERAND (lhs, 1)))
{
tree field = TREE_OPERAND (lhs, 1);
tree repr = DECL_BIT_FIELD_REPRESENTATIVE (field);
if (tree_fits_uhwi_p (DECL_FIELD_OFFSET (field))
&& tree_fits_uhwi_p (DECL_FIELD_OFFSET (repr)))
bitpos = (tree_to_uhwi (DECL_FIELD_OFFSET (field))
- tree_to_uhwi (DECL_FIELD_OFFSET (repr))) * BITS_PER_UNIT;
else
bitpos = 0;
bitpos += (tree_to_uhwi (DECL_FIELD_BIT_OFFSET (field))
- tree_to_uhwi (DECL_FIELD_BIT_OFFSET (repr)));
gcc_assert (tree_fits_shwi_p (DECL_SIZE (field)));
bitsize = tree_to_shwi (DECL_SIZE (field));
blhs = lhs;
type = TREE_TYPE (repr);
lhs = build3 (COMPONENT_REF, TREE_TYPE (repr), TREE_OPERAND (lhs, 0),
repr, TREE_OPERAND (lhs, 2));
}
/* Take and save the address of the lhs. From then on we'll reference it
via indirection. */
addr = build_unary_op (loc, ADDR_EXPR, lhs, false);
if (addr == error_mark_node)
return error_mark_node;
if (!test)
addr = save_expr (addr);
if (!test
&& TREE_CODE (addr) != SAVE_EXPR
&& (TREE_CODE (addr) != ADDR_EXPR
|| !VAR_P (TREE_OPERAND (addr, 0))))
{
/* Make sure LHS is simple enough so that goa_lhs_expr_p can recognize
it even after unsharing function body. */
tree var = create_tmp_var_raw (TREE_TYPE (addr));
DECL_CONTEXT (var) = current_function_decl;
addr = build4 (TARGET_EXPR, TREE_TYPE (addr), var, addr, NULL, NULL);
}
tree orig_lhs = lhs;
lhs = build_indirect_ref (loc, addr, RO_NULL);
tree new_lhs = lhs;
if (code == OMP_ATOMIC_READ)
{
x = build1 (OMP_ATOMIC_READ, type, addr);
SET_EXPR_LOCATION (x, loc);
OMP_ATOMIC_MEMORY_ORDER (x) = memory_order;
if (blhs)
x = build3_loc (loc, BIT_FIELD_REF, TREE_TYPE (blhs), x,
bitsize_int (bitsize), bitsize_int (bitpos));
return build_modify_expr (loc, v, NULL_TREE, NOP_EXPR,
loc, x, NULL_TREE);
}
/* There are lots of warnings, errors, and conversions that need to happen
in the course of interpreting a statement. Use the normal mechanisms
to do this, and then take it apart again. */
if (blhs)
{
lhs = build3_loc (loc, BIT_FIELD_REF, TREE_TYPE (blhs), lhs,
bitsize_int (bitsize), bitsize_int (bitpos));
if (swapped)
rhs = build_binary_op (loc, opcode, rhs, lhs, true);
else if (opcode != NOP_EXPR)
rhs = build_binary_op (loc, opcode, lhs, rhs, true);
opcode = NOP_EXPR;
}
else if (swapped)
{
rhs = build_binary_op (loc, opcode, rhs, lhs, true);
opcode = NOP_EXPR;
}
bool save = in_late_binary_op;
in_late_binary_op = true;
x = build_modify_expr (loc, blhs ? blhs : lhs, NULL_TREE, opcode,
loc, rhs, NULL_TREE);
in_late_binary_op = save;
if (x == error_mark_node)
return error_mark_node;
if (TREE_CODE (x) == COMPOUND_EXPR)
{
pre = TREE_OPERAND (x, 0);
gcc_assert (TREE_CODE (pre) == SAVE_EXPR || tree_invariant_p (pre));
x = TREE_OPERAND (x, 1);
}
gcc_assert (TREE_CODE (x) == MODIFY_EXPR);
rhs = TREE_OPERAND (x, 1);
if (blhs)
rhs = build3_loc (loc, BIT_INSERT_EXPR, type, new_lhs,
rhs, bitsize_int (bitpos));
/* Punt the actual generation of atomic operations to common code. */
if (code == OMP_ATOMIC)
type = void_type_node;
x = build2 (code, type, addr, rhs);
SET_EXPR_LOCATION (x, loc);
OMP_ATOMIC_MEMORY_ORDER (x) = memory_order;
/* Generally it is hard to prove lhs1 and lhs are the same memory
location, just diagnose different variables. */
if (rhs1
&& VAR_P (rhs1)
&& VAR_P (orig_lhs)
&& rhs1 != orig_lhs
&& !test)
{
if (code == OMP_ATOMIC)
error_at (loc, "%<#pragma omp atomic update%> uses two different "
"variables for memory");
else
error_at (loc, "%<#pragma omp atomic capture%> uses two different "
"variables for memory");
return error_mark_node;
}
if (lhs1
&& lhs1 != orig_lhs
&& TREE_CODE (lhs1) == COMPONENT_REF
&& TREE_CODE (TREE_OPERAND (lhs1, 1)) == FIELD_DECL
&& DECL_C_BIT_FIELD (TREE_OPERAND (lhs1, 1))
&& DECL_BIT_FIELD_REPRESENTATIVE (TREE_OPERAND (lhs1, 1)))
{
tree field = TREE_OPERAND (lhs1, 1);
tree repr = DECL_BIT_FIELD_REPRESENTATIVE (field);
lhs1 = build3 (COMPONENT_REF, TREE_TYPE (repr), TREE_OPERAND (lhs1, 0),
repr, TREE_OPERAND (lhs1, 2));
}
if (rhs1
&& rhs1 != orig_lhs
&& TREE_CODE (rhs1) == COMPONENT_REF
&& TREE_CODE (TREE_OPERAND (rhs1, 1)) == FIELD_DECL
&& DECL_C_BIT_FIELD (TREE_OPERAND (rhs1, 1))
&& DECL_BIT_FIELD_REPRESENTATIVE (TREE_OPERAND (rhs1, 1)))
{
tree field = TREE_OPERAND (rhs1, 1);
tree repr = DECL_BIT_FIELD_REPRESENTATIVE (field);
rhs1 = build3 (COMPONENT_REF, TREE_TYPE (repr), TREE_OPERAND (rhs1, 0),
repr, TREE_OPERAND (rhs1, 2));
}
if (code != OMP_ATOMIC)
{
/* Generally it is hard to prove lhs1 and lhs are the same memory
location, just diagnose different variables. */
if (lhs1 && VAR_P (lhs1) && VAR_P (orig_lhs))
{
if (lhs1 != orig_lhs && !test)
{
error_at (loc, "%<#pragma omp atomic capture%> uses two "
"different variables for memory");
return error_mark_node;
}
}
if (blhs)
{
x = build3_loc (loc, BIT_FIELD_REF, TREE_TYPE (blhs), x,
bitsize_int (bitsize), bitsize_int (bitpos));
type = TREE_TYPE (blhs);
}
x = build_modify_expr (loc, v, NULL_TREE, NOP_EXPR,
loc, x, NULL_TREE);
if (rhs1 && rhs1 != orig_lhs)
{
tree rhs1addr = build_unary_op (loc, ADDR_EXPR, rhs1, false);
if (rhs1addr == error_mark_node)
return error_mark_node;
x = omit_one_operand_loc (loc, type, x, rhs1addr);
}
if (lhs1 && lhs1 != orig_lhs)
{
tree lhs1addr = build_unary_op (loc, ADDR_EXPR, lhs1, false);
if (lhs1addr == error_mark_node)
return error_mark_node;
if (code == OMP_ATOMIC_CAPTURE_OLD)
x = omit_one_operand_loc (loc, type, x, lhs1addr);
else
{
if (!test)
x = save_expr (x);
x = omit_two_operands_loc (loc, type, x, x, lhs1addr);
}
}
}
else if (rhs1 && rhs1 != orig_lhs)
{
tree rhs1addr = build_unary_op (loc, ADDR_EXPR, rhs1, false);
if (rhs1addr == error_mark_node)
return error_mark_node;
x = omit_one_operand_loc (loc, type, x, rhs1addr);
}
if (pre)
x = omit_one_operand_loc (loc, type, x, pre);
return x;
}
/* Return true if TYPE is the implementation's omp_depend_t. */
bool
c_omp_depend_t_p (tree type)
{
type = TYPE_MAIN_VARIANT (type);
return (TREE_CODE (type) == RECORD_TYPE
&& TYPE_NAME (type)
&& ((TREE_CODE (TYPE_NAME (type)) == TYPE_DECL
? DECL_NAME (TYPE_NAME (type)) : TYPE_NAME (type))
== get_identifier ("omp_depend_t"))
&& (!TYPE_CONTEXT (type)
|| TREE_CODE (TYPE_CONTEXT (type)) == TRANSLATION_UNIT_DECL)
&& COMPLETE_TYPE_P (type)
&& TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
&& !compare_tree_int (TYPE_SIZE (type),
2 * tree_to_uhwi (TYPE_SIZE (ptr_type_node))));
}
/* Complete a #pragma omp depobj construct. LOC is the location of the
#pragma. */
void
c_finish_omp_depobj (location_t loc, tree depobj,
enum omp_clause_depend_kind kind, tree clause)
{
tree t = NULL_TREE;
if (!error_operand_p (depobj))
{
if (!c_omp_depend_t_p (TREE_TYPE (depobj)))
{
error_at (EXPR_LOC_OR_LOC (depobj, loc),
"type of %<depobj%> expression is not %<omp_depend_t%>");
depobj = error_mark_node;
}
else if (TYPE_READONLY (TREE_TYPE (depobj)))
{
error_at (EXPR_LOC_OR_LOC (depobj, loc),
"%<const%> qualified %<depobj%> expression");
depobj = error_mark_node;
}
}
else
depobj = error_mark_node;
if (clause == error_mark_node)
return;
if (clause)
{
gcc_assert (TREE_CODE (clause) == OMP_CLAUSE
&& OMP_CLAUSE_CODE (clause) == OMP_CLAUSE_DEPEND);
if (OMP_CLAUSE_CHAIN (clause))
error_at (OMP_CLAUSE_LOCATION (clause),
"more than one locator in %<depend%> clause on %<depobj%> "
"construct");
switch (OMP_CLAUSE_DEPEND_KIND (clause))
{
case OMP_CLAUSE_DEPEND_DEPOBJ:
error_at (OMP_CLAUSE_LOCATION (clause),
"%<depobj%> dependence type specified in %<depend%> "
"clause on %<depobj%> construct");
return;
case OMP_CLAUSE_DEPEND_SOURCE:
case OMP_CLAUSE_DEPEND_SINK:
error_at (OMP_CLAUSE_LOCATION (clause),
"%<depend(%s)%> is only allowed in %<omp ordered%>",
OMP_CLAUSE_DEPEND_KIND (clause) == OMP_CLAUSE_DEPEND_SOURCE
? "source" : "sink");
return;
case OMP_CLAUSE_DEPEND_IN:
case OMP_CLAUSE_DEPEND_OUT:
case OMP_CLAUSE_DEPEND_INOUT:
case OMP_CLAUSE_DEPEND_MUTEXINOUTSET:
kind = OMP_CLAUSE_DEPEND_KIND (clause);
t = OMP_CLAUSE_DECL (clause);
gcc_assert (t);
if (TREE_CODE (t) == TREE_LIST
&& TREE_PURPOSE (t)
&& TREE_CODE (TREE_PURPOSE (t)) == TREE_VEC)
{
error_at (OMP_CLAUSE_LOCATION (clause),
"%<iterator%> modifier may not be specified on "
"%<depobj%> construct");
return;
}
if (TREE_CODE (t) == COMPOUND_EXPR)
{
tree t1 = build_fold_addr_expr (TREE_OPERAND (t, 1));
t = build2 (COMPOUND_EXPR, TREE_TYPE (t1), TREE_OPERAND (t, 0),
t1);
}
else
t = build_fold_addr_expr (t);
break;
default:
gcc_unreachable ();
}
}
else
gcc_assert (kind != OMP_CLAUSE_DEPEND_SOURCE);
if (depobj == error_mark_node)
return;
depobj = build_fold_addr_expr_loc (EXPR_LOC_OR_LOC (depobj, loc), depobj);
tree dtype
= build_pointer_type_for_mode (ptr_type_node, TYPE_MODE (ptr_type_node),
true);
depobj = fold_convert (dtype, depobj);
tree r;
if (clause)
{
depobj = save_expr (depobj);
r = build_indirect_ref (loc, depobj, RO_UNARY_STAR);
add_stmt (build2 (MODIFY_EXPR, void_type_node, r, t));
}
int k;
switch (kind)
{
case OMP_CLAUSE_DEPEND_IN:
k = GOMP_DEPEND_IN;
break;
case OMP_CLAUSE_DEPEND_OUT:
k = GOMP_DEPEND_OUT;
break;
case OMP_CLAUSE_DEPEND_INOUT:
k = GOMP_DEPEND_INOUT;
break;
case OMP_CLAUSE_DEPEND_MUTEXINOUTSET:
k = GOMP_DEPEND_MUTEXINOUTSET;
break;
case OMP_CLAUSE_DEPEND_LAST:
k = -1;
break;
default:
gcc_unreachable ();
}
t = build_int_cst (ptr_type_node, k);
depobj = build2_loc (loc, POINTER_PLUS_EXPR, TREE_TYPE (depobj), depobj,
TYPE_SIZE_UNIT (ptr_type_node));
r = build_indirect_ref (loc, depobj, RO_UNARY_STAR);
add_stmt (build2 (MODIFY_EXPR, void_type_node, r, t));
}
/* Complete a #pragma omp flush construct. We don't do anything with
the variable list that the syntax allows. LOC is the location of
the #pragma. */
void
c_finish_omp_flush (location_t loc, int mo)
{
tree x;
if (mo == MEMMODEL_LAST)
{
x = builtin_decl_explicit (BUILT_IN_SYNC_SYNCHRONIZE);
x = build_call_expr_loc (loc, x, 0);
}
else
{
x = builtin_decl_explicit (BUILT_IN_ATOMIC_THREAD_FENCE);
x = build_call_expr_loc (loc, x, 1,
build_int_cst (integer_type_node, mo));
}
add_stmt (x);
}
/* Check and canonicalize OMP_FOR increment expression.
Helper function for c_finish_omp_for. */
static tree
check_omp_for_incr_expr (location_t loc, tree exp, tree decl)
{
tree t;
if (!INTEGRAL_TYPE_P (TREE_TYPE (exp))
|| TYPE_PRECISION (TREE_TYPE (exp)) < TYPE_PRECISION (TREE_TYPE (decl)))
return error_mark_node;
if (exp == decl)
return build_int_cst (TREE_TYPE (exp), 0);
switch (TREE_CODE (exp))
{
CASE_CONVERT:
t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 0), decl);
if (t != error_mark_node)
return fold_convert_loc (loc, TREE_TYPE (exp), t);
break;
case MINUS_EXPR:
t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 0), decl);
if (t != error_mark_node)
return fold_build2_loc (loc, MINUS_EXPR,
TREE_TYPE (exp), t, TREE_OPERAND (exp, 1));
break;
case PLUS_EXPR:
t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 0), decl);
if (t != error_mark_node)
return fold_build2_loc (loc, PLUS_EXPR,
TREE_TYPE (exp), t, TREE_OPERAND (exp, 1));
t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 1), decl);
if (t != error_mark_node)
return fold_build2_loc (loc, PLUS_EXPR,
TREE_TYPE (exp), TREE_OPERAND (exp, 0), t);
break;
case COMPOUND_EXPR:
{
/* cp_build_modify_expr forces preevaluation of the RHS to make
sure that it is evaluated before the lvalue-rvalue conversion
is applied to the LHS. Reconstruct the original expression. */
tree op0 = TREE_OPERAND (exp, 0);
if (TREE_CODE (op0) == TARGET_EXPR
&& !VOID_TYPE_P (TREE_TYPE (op0)))
{
tree op1 = TREE_OPERAND (exp, 1);
tree temp = TARGET_EXPR_SLOT (op0);
if (BINARY_CLASS_P (op1)
&& TREE_OPERAND (op1, 1) == temp)
{
op1 = copy_node (op1);
TREE_OPERAND (op1, 1) = TARGET_EXPR_INITIAL (op0);
return check_omp_for_incr_expr (loc, op1, decl);
}
}
break;
}
default:
break;
}
return error_mark_node;
}
/* If the OMP_FOR increment expression in INCR is of pointer type,
canonicalize it into an expression handled by gimplify_omp_for()
and return it. DECL is the iteration variable. */
static tree
c_omp_for_incr_canonicalize_ptr (location_t loc, tree decl, tree incr)
{
if (POINTER_TYPE_P (TREE_TYPE (decl))
&& TREE_OPERAND (incr, 1))
{
tree t = fold_convert_loc (loc,
sizetype, TREE_OPERAND (incr, 1));
if (TREE_CODE (incr) == POSTDECREMENT_EXPR
|| TREE_CODE (incr) == PREDECREMENT_EXPR)
t = fold_build1_loc (loc, NEGATE_EXPR, sizetype, t);
t = fold_build_pointer_plus (decl, t);
incr = build2 (MODIFY_EXPR, void_type_node, decl, t);
}
return incr;
}
/* Validate and generate OMP_FOR.
DECLV is a vector of iteration variables, for each collapsed loop.
ORIG_DECLV, if non-NULL, is a vector with the original iteration
variables (prior to any transformations, by say, C++ iterators).
INITV, CONDV and INCRV are vectors containing initialization
expressions, controlling predicates and increment expressions.
BODY is the body of the loop and PRE_BODY statements that go before
the loop. */
tree
c_finish_omp_for (location_t locus, enum tree_code code, tree declv,
tree orig_declv, tree initv, tree condv, tree incrv,
tree body, tree pre_body, bool final_p)
{
location_t elocus;
bool fail = false;
int i;
gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (initv));
gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (condv));
gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (incrv));
for (i = 0; i < TREE_VEC_LENGTH (declv); i++)
{
tree decl = TREE_VEC_ELT (declv, i);
tree init = TREE_VEC_ELT (initv, i);
tree cond = TREE_VEC_ELT (condv, i);
tree incr = TREE_VEC_ELT (incrv, i);
elocus = locus;
if (EXPR_HAS_LOCATION (init))
elocus = EXPR_LOCATION (init);
/* Validate the iteration variable. */
if (!INTEGRAL_TYPE_P (TREE_TYPE (decl))
&& TREE_CODE (TREE_TYPE (decl)) != POINTER_TYPE)
{
error_at (elocus, "invalid type for iteration variable %qE", decl);
fail = true;
}
else if (TYPE_ATOMIC (TREE_TYPE (decl)))
{
error_at (elocus, "%<_Atomic%> iteration variable %qE", decl);
fail = true;
/* _Atomic iterator confuses stuff too much, so we risk ICE
trying to diagnose it further. */
continue;
}
/* In the case of "for (int i = 0...)", init will be a decl. It should
have a DECL_INITIAL that we can turn into an assignment. */
if (init == decl)
{
elocus = DECL_SOURCE_LOCATION (decl);
init = DECL_INITIAL (decl);
if (init == NULL)
{
error_at (elocus, "%qE is not initialized", decl);
init = integer_zero_node;
fail = true;
}
DECL_INITIAL (decl) = NULL_TREE;
init = build_modify_expr (elocus, decl, NULL_TREE, NOP_EXPR,
/* FIXME diagnostics: This should
be the location of the INIT. */
elocus,
init,
NULL_TREE);
}
if (init != error_mark_node)
{
gcc_assert (TREE_CODE (init) == MODIFY_EXPR);
gcc_assert (TREE_OPERAND (init, 0) == decl);
}
if (cond == NULL_TREE)
{
error_at (elocus, "missing controlling predicate");
fail = true;
}
else
{
bool cond_ok = false;
/* E.g. C sizeof (vla) could add COMPOUND_EXPRs with
evaluation of the vla VAR_DECL. We need to readd
them to the non-decl operand. See PR45784. */
while (TREE_CODE (cond) == COMPOUND_EXPR)
cond = TREE_OPERAND (cond, 1);
if (EXPR_HAS_LOCATION (cond))
elocus = EXPR_LOCATION (cond);
if (TREE_CODE (cond) == LT_EXPR
|| TREE_CODE (cond) == LE_EXPR
|| TREE_CODE (cond) == GT_EXPR
|| TREE_CODE (cond) == GE_EXPR
|| TREE_CODE (cond) == NE_EXPR
|| TREE_CODE (cond) == EQ_EXPR)
{
tree op0 = TREE_OPERAND (cond, 0);
tree op1 = TREE_OPERAND (cond, 1);
/* 2.5.1. The comparison in the condition is computed in
the type of DECL, otherwise the behavior is undefined.
For example:
long n; int i;
i < n;
according to ISO will be evaluated as:
(long)i < n;
We want to force:
i < (int)n; */
if (TREE_CODE (op0) == NOP_EXPR
&& decl == TREE_OPERAND (op0, 0))
{
TREE_OPERAND (cond, 0) = TREE_OPERAND (op0, 0);
TREE_OPERAND (cond, 1)
= fold_build1_loc (elocus, NOP_EXPR, TREE_TYPE (decl),
TREE_OPERAND (cond, 1));
}
else if (TREE_CODE (op1) == NOP_EXPR
&& decl == TREE_OPERAND (op1, 0))
{
TREE_OPERAND (cond, 1) = TREE_OPERAND (op1, 0);
TREE_OPERAND (cond, 0)
= fold_build1_loc (elocus, NOP_EXPR, TREE_TYPE (decl),
TREE_OPERAND (cond, 0));
}
if (decl == TREE_OPERAND (cond, 0))
cond_ok = true;
else if (decl == TREE_OPERAND (cond, 1))
{
TREE_SET_CODE (cond,
swap_tree_comparison (TREE_CODE (cond)));
TREE_OPERAND (cond, 1) = TREE_OPERAND (cond, 0);
TREE_OPERAND (cond, 0) = decl;
cond_ok = true;
}
if (TREE_CODE (cond) == NE_EXPR
|| TREE_CODE (cond) == EQ_EXPR)
{
if (!INTEGRAL_TYPE_P (TREE_TYPE (decl)))
{
if (code == OACC_LOOP || TREE_CODE (cond) == EQ_EXPR)
cond_ok = false;
}
else if (operand_equal_p (TREE_OPERAND (cond, 1),
TYPE_MIN_VALUE (TREE_TYPE (decl)),
0))
TREE_SET_CODE (cond, TREE_CODE (cond) == NE_EXPR
? GT_EXPR : LE_EXPR);
else if (operand_equal_p (TREE_OPERAND (cond, 1),
TYPE_MAX_VALUE (TREE_TYPE (decl)),
0))
TREE_SET_CODE (cond, TREE_CODE (cond) == NE_EXPR
? LT_EXPR : GE_EXPR);
else if (code == OACC_LOOP || TREE_CODE (cond) == EQ_EXPR)
cond_ok = false;
}
if (cond_ok && TREE_VEC_ELT (condv, i) != cond)
{
tree ce = NULL_TREE, *pce = &ce;
tree type = TREE_TYPE (TREE_OPERAND (cond, 1));
for (tree c = TREE_VEC_ELT (condv, i); c != cond;
c = TREE_OPERAND (c, 1))
{
*pce = build2 (COMPOUND_EXPR, type, TREE_OPERAND (c, 0),
TREE_OPERAND (cond, 1));
pce = &TREE_OPERAND (*pce, 1);
}
TREE_OPERAND (cond, 1) = ce;
TREE_VEC_ELT (condv, i) = cond;
}
}
if (!cond_ok)
{
error_at (elocus, "invalid controlling predicate");
fail = true;
}
}
if (incr == NULL_TREE)
{
error_at (elocus, "missing increment expression");
fail = true;
}
else
{
bool incr_ok = false;
if (EXPR_HAS_LOCATION (incr))
elocus = EXPR_LOCATION (incr);
/* Check all the valid increment expressions: v++, v--, ++v, --v,
v = v + incr, v = incr + v and v = v - incr. */
switch (TREE_CODE (incr))
{
case POSTINCREMENT_EXPR:
case PREINCREMENT_EXPR:
case POSTDECREMENT_EXPR:
case PREDECREMENT_EXPR:
if (TREE_OPERAND (incr, 0) != decl)
break;
incr_ok = true;
if (!fail
&& TREE_CODE (cond) == NE_EXPR
&& TREE_CODE (TREE_TYPE (decl)) == POINTER_TYPE
&& TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (decl)))
&& (TREE_CODE (TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (decl))))
!= INTEGER_CST))
{
/* For pointer to VLA, transform != into < or >
depending on whether incr is increment or decrement. */
if (TREE_CODE (incr) == PREINCREMENT_EXPR
|| TREE_CODE (incr) == POSTINCREMENT_EXPR)
TREE_SET_CODE (cond, LT_EXPR);
else
TREE_SET_CODE (cond, GT_EXPR);
}
incr = c_omp_for_incr_canonicalize_ptr (elocus, decl, incr);
break;
case COMPOUND_EXPR:
if (TREE_CODE (TREE_OPERAND (incr, 0)) != SAVE_EXPR
|| TREE_CODE (TREE_OPERAND (incr, 1)) != MODIFY_EXPR)
break;
incr = TREE_OPERAND (incr, 1);
/* FALLTHRU */
case MODIFY_EXPR:
if (TREE_OPERAND (incr, 0) != decl)
break;
if (TREE_OPERAND (incr, 1) == decl)
break;
if (TREE_CODE (TREE_OPERAND (incr, 1)) == PLUS_EXPR
&& (TREE_OPERAND (TREE_OPERAND (incr, 1), 0) == decl
|| TREE_OPERAND (TREE_OPERAND (incr, 1), 1) == decl))
incr_ok = true;
else if ((TREE_CODE (TREE_OPERAND (incr, 1)) == MINUS_EXPR
|| (TREE_CODE (TREE_OPERAND (incr, 1))
== POINTER_PLUS_EXPR))
&& TREE_OPERAND (TREE_OPERAND (incr, 1), 0) == decl)
incr_ok = true;
else
{
tree t = check_omp_for_incr_expr (elocus,
TREE_OPERAND (incr, 1),
decl);
if (t != error_mark_node)
{
incr_ok = true;
t = build2 (PLUS_EXPR, TREE_TYPE (decl), decl, t);
incr = build2 (MODIFY_EXPR, void_type_node, decl, t);
}
}
if (!fail
&& incr_ok
&& TREE_CODE (cond) == NE_EXPR)
{
tree i = TREE_OPERAND (incr, 1);
i = TREE_OPERAND (i, TREE_OPERAND (i, 0) == decl);
i = c_fully_fold (i, false, NULL);
if (!final_p
&& TREE_CODE (i) != INTEGER_CST)
;
else if (TREE_CODE (TREE_TYPE (decl)) == POINTER_TYPE)
{
tree unit
= TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (decl)));
if (unit)
{
enum tree_code ccode = GT_EXPR;
unit = c_fully_fold (unit, false, NULL);
i = fold_convert (TREE_TYPE (unit), i);
if (operand_equal_p (unit, i, 0))
ccode = LT_EXPR;
if (ccode == GT_EXPR)
{
i = fold_unary (NEGATE_EXPR, TREE_TYPE (i), i);
if (i == NULL_TREE
|| !operand_equal_p (unit, i, 0))
{
error_at (elocus,
"increment is not constant 1 or "
"-1 for %<!=%> condition");
fail = true;
}
}
if (TREE_CODE (unit) != INTEGER_CST)
/* For pointer to VLA, transform != into < or >
depending on whether the pointer is
incremented or decremented in each
iteration. */
TREE_SET_CODE (cond, ccode);
}
}
else
{
if (!integer_onep (i) && !integer_minus_onep (i))
{
error_at (elocus,
"increment is not constant 1 or -1 for"
" %<!=%> condition");
fail = true;
}
}
}
break;
default:
break;
}
if (!incr_ok)
{
error_at (elocus, "invalid increment expression");
fail = true;
}
}
TREE_VEC_ELT (initv, i) = init;
TREE_VEC_ELT (incrv, i) = incr;
}
if (fail)
return NULL;
else
{
tree t = make_node (code);
TREE_TYPE (t) = void_type_node;
OMP_FOR_INIT (t) = initv;
OMP_FOR_COND (t) = condv;
OMP_FOR_INCR (t) = incrv;
OMP_FOR_BODY (t) = body;
OMP_FOR_PRE_BODY (t) = pre_body;
OMP_FOR_ORIG_DECLS (t) = orig_declv;
SET_EXPR_LOCATION (t, locus);
return t;
}
}
/* Type for passing data in between c_omp_check_loop_iv and
c_omp_check_loop_iv_r. */
struct c_omp_check_loop_iv_data
{
tree declv;
bool fail;
location_t stmt_loc;
location_t expr_loc;
int kind;
walk_tree_lh lh;
hash_set<tree> *ppset;
};
/* Helper function called via walk_tree, to diagnose uses
of associated loop IVs inside of lb, b and incr expressions
of OpenMP loops. */
static tree
c_omp_check_loop_iv_r (tree *tp, int *walk_subtrees, void *data)
{
struct c_omp_check_loop_iv_data *d
= (struct c_omp_check_loop_iv_data *) data;
if (DECL_P (*tp))
{
int i;
for (i = 0; i < TREE_VEC_LENGTH (d->declv); i++)
if (*tp == TREE_VEC_ELT (d->declv, i)
|| (TREE_CODE (TREE_VEC_ELT (d->declv, i)) == TREE_LIST
&& *tp == TREE_PURPOSE (TREE_VEC_ELT (d->declv, i)))
|| (TREE_CODE (TREE_VEC_ELT (d->declv, i)) == TREE_LIST
&& TREE_CHAIN (TREE_VEC_ELT (d->declv, i))
&& (TREE_CODE (TREE_CHAIN (TREE_VEC_ELT (d->declv, i)))
== TREE_VEC)
&& *tp == TREE_VEC_ELT (TREE_CHAIN (TREE_VEC_ELT (d->declv,
i)), 2)))
{
location_t loc = d->expr_loc;
if (loc == UNKNOWN_LOCATION)
loc = d->stmt_loc;
switch (d->kind)
{
case 0:
error_at (loc, "initializer expression refers to "
"iteration variable %qD", *tp);
break;
case 1:
error_at (loc, "condition expression refers to "
"iteration variable %qD", *tp);
break;
case 2:
error_at (loc, "increment expression refers to "
"iteration variable %qD", *tp);
break;
}
d->fail = true;
}
}
/* Don't walk dtors added by C++ wrap_cleanups_r. */
else if (TREE_CODE (*tp) == TRY_CATCH_EXPR
&& TRY_CATCH_IS_CLEANUP (*tp))
{
*walk_subtrees = 0;
return walk_tree_1 (&TREE_OPERAND (*tp, 0), c_omp_check_loop_iv_r, data,
d->ppset, d->lh);
}
return NULL_TREE;
}
/* Diagnose invalid references to loop iterators in lb, b and incr
expressions. */
bool
c_omp_check_loop_iv (tree stmt, tree declv, walk_tree_lh lh)
{
hash_set<tree> pset;
struct c_omp_check_loop_iv_data data;
int i;
data.declv = declv;
data.fail = false;
data.stmt_loc = EXPR_LOCATION (stmt);
data.lh = lh;
data.ppset = &pset;
for (i = 0; i < TREE_VEC_LENGTH (OMP_FOR_INIT (stmt)); i++)
{
tree init = TREE_VEC_ELT (OMP_FOR_INIT (stmt), i);
gcc_assert (TREE_CODE (init) == MODIFY_EXPR);
tree decl = TREE_OPERAND (init, 0);
tree cond = TREE_VEC_ELT (OMP_FOR_COND (stmt), i);
gcc_assert (COMPARISON_CLASS_P (cond));
gcc_assert (TREE_OPERAND (cond, 0) == decl);
tree incr = TREE_VEC_ELT (OMP_FOR_INCR (stmt), i);
data.expr_loc = EXPR_LOCATION (TREE_OPERAND (init, 1));
data.kind = 0;
walk_tree_1 (&TREE_OPERAND (init, 1),
c_omp_check_loop_iv_r, &data, &pset, lh);
/* Don't warn for C++ random access iterators here, the
expression then involves the subtraction and always refers
to the original value. The C++ FE needs to warn on those
earlier. */
if (decl == TREE_VEC_ELT (declv, i)
|| (TREE_CODE (TREE_VEC_ELT (declv, i)) == TREE_LIST
&& decl == TREE_PURPOSE (TREE_VEC_ELT (declv, i))))
{
data.expr_loc = EXPR_LOCATION (cond);
data.kind = 1;
walk_tree_1 (&TREE_OPERAND (cond, 1),
c_omp_check_loop_iv_r, &data, &pset, lh);
}
if (TREE_CODE (incr) == MODIFY_EXPR)
{
gcc_assert (TREE_OPERAND (incr, 0) == decl);
incr = TREE_OPERAND (incr, 1);
data.kind = 2;
if (TREE_CODE (incr) == PLUS_EXPR
&& TREE_OPERAND (incr, 1) == decl)
{
data.expr_loc = EXPR_LOCATION (TREE_OPERAND (incr, 0));
walk_tree_1 (&TREE_OPERAND (incr, 0),
c_omp_check_loop_iv_r, &data, &pset, lh);
}
else
{
data.expr_loc = EXPR_LOCATION (TREE_OPERAND (incr, 1));
walk_tree_1 (&TREE_OPERAND (incr, 1),
c_omp_check_loop_iv_r, &data, &pset, lh);
}
}
}
return !data.fail;
}
/* Similar, but allows to check the init or cond expressions individually. */
bool
c_omp_check_loop_iv_exprs (location_t stmt_loc, tree declv, tree decl,
tree init, tree cond, walk_tree_lh lh)
{
hash_set<tree> pset;
struct c_omp_check_loop_iv_data data;
data.declv = declv;
data.fail = false;
data.stmt_loc = stmt_loc;
data.lh = lh;
data.ppset = &pset;
if (init)
{
data.expr_loc = EXPR_LOCATION (init);
data.kind = 0;
walk_tree_1 (&init,
c_omp_check_loop_iv_r, &data, &pset, lh);
}
if (cond)
{
gcc_assert (COMPARISON_CLASS_P (cond));
data.expr_loc = EXPR_LOCATION (init);
data.kind = 1;
if (TREE_OPERAND (cond, 0) == decl)
walk_tree_1 (&TREE_OPERAND (cond, 1),
c_omp_check_loop_iv_r, &data, &pset, lh);
else
walk_tree_1 (&TREE_OPERAND (cond, 0),
c_omp_check_loop_iv_r, &data, &pset, lh);
}
return !data.fail;
}
/* This function splits clauses for OpenACC combined loop
constructs. OpenACC combined loop constructs are:
#pragma acc kernels loop
#pragma acc parallel loop */
tree
c_oacc_split_loop_clauses (tree clauses, tree *not_loop_clauses,
bool is_parallel)
{
tree next, loop_clauses, nc;
loop_clauses = *not_loop_clauses = NULL_TREE;
for (; clauses ; clauses = next)
{
next = OMP_CLAUSE_CHAIN (clauses);
switch (OMP_CLAUSE_CODE (clauses))
{
/* Loop clauses. */
case OMP_CLAUSE_COLLAPSE:
case OMP_CLAUSE_TILE:
case OMP_CLAUSE_GANG:
case OMP_CLAUSE_WORKER:
case OMP_CLAUSE_VECTOR:
case OMP_CLAUSE_AUTO:
case OMP_CLAUSE_SEQ:
case OMP_CLAUSE_INDEPENDENT:
case OMP_CLAUSE_PRIVATE:
OMP_CLAUSE_CHAIN (clauses) = loop_clauses;
loop_clauses = clauses;
break;
/* Reductions must be duplicated on both constructs. */
case OMP_CLAUSE_REDUCTION:
if (is_parallel)
{
nc = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
OMP_CLAUSE_REDUCTION);
OMP_CLAUSE_DECL (nc) = OMP_CLAUSE_DECL (clauses);
OMP_CLAUSE_REDUCTION_CODE (nc)
= OMP_CLAUSE_REDUCTION_CODE (clauses);
OMP_CLAUSE_CHAIN (nc) = *not_loop_clauses;
*not_loop_clauses = nc;
}
OMP_CLAUSE_CHAIN (clauses) = loop_clauses;
loop_clauses = clauses;
break;
/* Parallel/kernels clauses. */
default:
OMP_CLAUSE_CHAIN (clauses) = *not_loop_clauses;
*not_loop_clauses = clauses;
break;
}
}
return loop_clauses;
}
/* This function attempts to split or duplicate clauses for OpenMP
combined/composite constructs. Right now there are 30 different
constructs. CODE is the innermost construct in the combined construct,
and MASK allows to determine which constructs are combined together,
as every construct has at least one clause that no other construct
has (except for OMP_SECTIONS, but that can be only combined with parallel,
and OMP_MASTER, which doesn't have any clauses at all).
OpenMP combined/composite constructs are:
#pragma omp distribute parallel for
#pragma omp distribute parallel for simd
#pragma omp distribute simd
#pragma omp for simd
#pragma omp master taskloop
#pragma omp master taskloop simd
#pragma omp parallel for
#pragma omp parallel for simd
#pragma omp parallel loop
#pragma omp parallel master
#pragma omp parallel master taskloop
#pragma omp parallel master taskloop simd
#pragma omp parallel sections
#pragma omp target parallel
#pragma omp target parallel for
#pragma omp target parallel for simd
#pragma omp target parallel loop
#pragma omp target teams
#pragma omp target teams distribute
#pragma omp target teams distribute parallel for
#pragma omp target teams distribute parallel for simd
#pragma omp target teams distribute simd
#pragma omp target teams loop
#pragma omp target simd
#pragma omp taskloop simd
#pragma omp teams distribute
#pragma omp teams distribute parallel for
#pragma omp teams distribute parallel for simd
#pragma omp teams distribute simd
#pragma omp teams loop */
void
c_omp_split_clauses (location_t loc, enum tree_code code,
omp_clause_mask mask, tree clauses, tree *cclauses)
{
tree next, c;
enum c_omp_clause_split s;
int i;
for (i = 0; i < C_OMP_CLAUSE_SPLIT_COUNT; i++)
cclauses[i] = NULL;
/* Add implicit nowait clause on
#pragma omp parallel {for,for simd,sections}. */
if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0)
switch (code)
{
case OMP_FOR:
case OMP_SIMD:
if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0)
cclauses[C_OMP_CLAUSE_SPLIT_FOR]
= build_omp_clause (loc, OMP_CLAUSE_NOWAIT);
break;
case OMP_SECTIONS:
cclauses[C_OMP_CLAUSE_SPLIT_SECTIONS]
= build_omp_clause (loc, OMP_CLAUSE_NOWAIT);
break;
default:
break;
}
for (; clauses ; clauses = next)
{
next = OMP_CLAUSE_CHAIN (clauses);
switch (OMP_CLAUSE_CODE (clauses))
{
/* First the clauses that are unique to some constructs. */
case OMP_CLAUSE_DEVICE:
case OMP_CLAUSE_MAP:
case OMP_CLAUSE_IS_DEVICE_PTR:
case OMP_CLAUSE_DEFAULTMAP:
case OMP_CLAUSE_DEPEND:
s = C_OMP_CLAUSE_SPLIT_TARGET;
break;
case OMP_CLAUSE_NUM_TEAMS:
case OMP_CLAUSE_THREAD_LIMIT:
s = C_OMP_CLAUSE_SPLIT_TEAMS;
break;
case OMP_CLAUSE_DIST_SCHEDULE:
s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE;
break;
case OMP_CLAUSE_COPYIN:
case OMP_CLAUSE_NUM_THREADS:
case OMP_CLAUSE_PROC_BIND:
s = C_OMP_CLAUSE_SPLIT_PARALLEL;
break;
case OMP_CLAUSE_ORDERED:
s = C_OMP_CLAUSE_SPLIT_FOR;
break;
case OMP_CLAUSE_SCHEDULE:
s = C_OMP_CLAUSE_SPLIT_FOR;
if (code != OMP_SIMD)
OMP_CLAUSE_SCHEDULE_SIMD (clauses) = 0;
break;
case OMP_CLAUSE_SAFELEN:
case OMP_CLAUSE_SIMDLEN:
case OMP_CLAUSE_ALIGNED:
case OMP_CLAUSE_NONTEMPORAL:
s = C_OMP_CLAUSE_SPLIT_SIMD;
break;
case OMP_CLAUSE_GRAINSIZE:
case OMP_CLAUSE_NUM_TASKS:
case OMP_CLAUSE_FINAL:
case OMP_CLAUSE_UNTIED:
case OMP_CLAUSE_MERGEABLE:
case OMP_CLAUSE_NOGROUP:
case OMP_CLAUSE_PRIORITY:
s = C_OMP_CLAUSE_SPLIT_TASKLOOP;
break;
case OMP_CLAUSE_BIND:
s = C_OMP_CLAUSE_SPLIT_LOOP;
break;
/* Duplicate this to all of taskloop, distribute, for, simd and
loop. */
case OMP_CLAUSE_COLLAPSE:
if (code == OMP_SIMD)
{
if ((mask & ((OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)
| (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)
| (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_NOGROUP))) != 0)
{
c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
OMP_CLAUSE_COLLAPSE);
OMP_CLAUSE_COLLAPSE_EXPR (c)
= OMP_CLAUSE_COLLAPSE_EXPR (clauses);
OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_SIMD];
cclauses[C_OMP_CLAUSE_SPLIT_SIMD] = c;
}
else
{
/* This must be #pragma omp target simd */
s = C_OMP_CLAUSE_SPLIT_SIMD;
break;
}
}
if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0)
{
if ((mask & (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) != 0)
{
c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
OMP_CLAUSE_COLLAPSE);
OMP_CLAUSE_COLLAPSE_EXPR (c)
= OMP_CLAUSE_COLLAPSE_EXPR (clauses);
OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_FOR];
cclauses[C_OMP_CLAUSE_SPLIT_FOR] = c;
s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE;
}
else
s = C_OMP_CLAUSE_SPLIT_FOR;
}
else if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP))
!= 0)
s = C_OMP_CLAUSE_SPLIT_TASKLOOP;
else if (code == OMP_LOOP)
s = C_OMP_CLAUSE_SPLIT_LOOP;
else
s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE;
break;
/* Private clause is supported on all constructs but master,
it is enough to put it on the innermost one other than master. For
#pragma omp {for,sections} put it on parallel though,
as that's what we did for OpenMP 3.1. */
case OMP_CLAUSE_PRIVATE:
switch (code)
{
case OMP_SIMD: s = C_OMP_CLAUSE_SPLIT_SIMD; break;
case OMP_FOR: case OMP_SECTIONS:
case OMP_PARALLEL: s = C_OMP_CLAUSE_SPLIT_PARALLEL; break;
case OMP_DISTRIBUTE: s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE; break;
case OMP_TEAMS: s = C_OMP_CLAUSE_SPLIT_TEAMS; break;
case OMP_MASTER: s = C_OMP_CLAUSE_SPLIT_PARALLEL; break;
case OMP_TASKLOOP: s = C_OMP_CLAUSE_SPLIT_TASKLOOP; break;
case OMP_LOOP: s = C_OMP_CLAUSE_SPLIT_LOOP; break;
default: gcc_unreachable ();
}
break;
/* Firstprivate clause is supported on all constructs but
simd, master and loop. Put it on the outermost of those and
duplicate on teams and parallel. */
case OMP_CLAUSE_FIRSTPRIVATE:
if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP))
!= 0)
{
if (code == OMP_SIMD
&& (mask & ((OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_NUM_THREADS)
| (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_NUM_TEAMS))) == 0)
{
/* This must be #pragma omp target simd. */
s = C_OMP_CLAUSE_SPLIT_TARGET;
break;
}
c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
OMP_CLAUSE_FIRSTPRIVATE);
OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_TARGET];
cclauses[C_OMP_CLAUSE_SPLIT_TARGET] = c;
}
if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS))
!= 0)
{
if ((mask & ((OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS)
| (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_DIST_SCHEDULE))) != 0)
{
c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
OMP_CLAUSE_FIRSTPRIVATE);
OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL];
cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL] = c;
if ((mask & (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_NUM_TEAMS)) != 0)
s = C_OMP_CLAUSE_SPLIT_TEAMS;
else
s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE;
}
else if ((mask & (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_NOGROUP)) != 0)
/* This must be
#pragma omp parallel master taskloop{, simd}. */
s = C_OMP_CLAUSE_SPLIT_TASKLOOP;
else
/* This must be
#pragma omp parallel{, for{, simd}, sections,loop}
or
#pragma omp target parallel. */
s = C_OMP_CLAUSE_SPLIT_PARALLEL;
}
else if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS))
!= 0)
{
/* This must be one of
#pragma omp {,target }teams {distribute,loop}
#pragma omp target teams
#pragma omp {,target }teams distribute simd. */
gcc_assert (code == OMP_DISTRIBUTE
|| code == OMP_LOOP
|| code == OMP_TEAMS
|| code == OMP_SIMD);
s = C_OMP_CLAUSE_SPLIT_TEAMS;
}
else if ((mask & (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) != 0)
{
/* This must be #pragma omp distribute simd. */
gcc_assert (code == OMP_SIMD);
s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE;
}
else if ((mask & (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_NOGROUP)) != 0)
{
/* This must be #pragma omp {,{,parallel }master }taskloop simd
or
#pragma omp {,parallel }master taskloop. */
gcc_assert (code == OMP_SIMD || code == OMP_TASKLOOP);
s = C_OMP_CLAUSE_SPLIT_TASKLOOP;
}
else
{
/* This must be #pragma omp for simd. */
gcc_assert (code == OMP_SIMD);
s = C_OMP_CLAUSE_SPLIT_FOR;
}
break;
/* Lastprivate is allowed on distribute, for, sections, taskloop, loop
and simd. In parallel {for{, simd},sections} we actually want to
put it on parallel rather than for or sections. */
case OMP_CLAUSE_LASTPRIVATE:
if (code == OMP_DISTRIBUTE)
{
s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE;
break;
}
if ((mask & (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) != 0)
{
c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
OMP_CLAUSE_LASTPRIVATE);
OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_DISTRIBUTE];
OMP_CLAUSE_LASTPRIVATE_CONDITIONAL (c)
= OMP_CLAUSE_LASTPRIVATE_CONDITIONAL (clauses);
cclauses[C_OMP_CLAUSE_SPLIT_DISTRIBUTE] = c;
}
if (code == OMP_FOR || code == OMP_SECTIONS)
{
if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS))
!= 0)
s = C_OMP_CLAUSE_SPLIT_PARALLEL;
else
s = C_OMP_CLAUSE_SPLIT_FOR;
break;
}
if (code == OMP_TASKLOOP)
{
s = C_OMP_CLAUSE_SPLIT_TASKLOOP;
break;
}
if (code == OMP_LOOP)
{
s = C_OMP_CLAUSE_SPLIT_LOOP;
break;
}
gcc_assert (code == OMP_SIMD);
if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0)
{
c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
OMP_CLAUSE_LASTPRIVATE);
OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
OMP_CLAUSE_LASTPRIVATE_CONDITIONAL (c)
= OMP_CLAUSE_LASTPRIVATE_CONDITIONAL (clauses);
if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS))
!= 0)
s = C_OMP_CLAUSE_SPLIT_PARALLEL;
else
s = C_OMP_CLAUSE_SPLIT_FOR;
OMP_CLAUSE_CHAIN (c) = cclauses[s];
cclauses[s] = c;
}
if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP)) != 0)
{
c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
OMP_CLAUSE_LASTPRIVATE);
OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
OMP_CLAUSE_LASTPRIVATE_CONDITIONAL (c)
= OMP_CLAUSE_LASTPRIVATE_CONDITIONAL (clauses);
OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_TASKLOOP];
cclauses[C_OMP_CLAUSE_SPLIT_TASKLOOP] = c;
}
s = C_OMP_CLAUSE_SPLIT_SIMD;
break;
/* Shared and default clauses are allowed on parallel, teams and
taskloop. */
case OMP_CLAUSE_SHARED:
case OMP_CLAUSE_DEFAULT:
if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP))
!= 0)
{
if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS))
!= 0)
{
c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
OMP_CLAUSE_CODE (clauses));
if (OMP_CLAUSE_CODE (clauses) == OMP_CLAUSE_SHARED)
OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
else
OMP_CLAUSE_DEFAULT_KIND (c)
= OMP_CLAUSE_DEFAULT_KIND (clauses);
OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL];
cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL] = c;
}
s = C_OMP_CLAUSE_SPLIT_TASKLOOP;
break;
}
if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS))
!= 0)
{
if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS))
== 0)
{
s = C_OMP_CLAUSE_SPLIT_TEAMS;
break;
}
c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
OMP_CLAUSE_CODE (clauses));
if (OMP_CLAUSE_CODE (clauses) == OMP_CLAUSE_SHARED)
OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
else
OMP_CLAUSE_DEFAULT_KIND (c)
= OMP_CLAUSE_DEFAULT_KIND (clauses);
OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_TEAMS];
cclauses[C_OMP_CLAUSE_SPLIT_TEAMS] = c;
}
s = C_OMP_CLAUSE_SPLIT_PARALLEL;
break;
/* order clauses are allowed on for, simd and loop. */
case OMP_CLAUSE_ORDER:
if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0)
{
if (code == OMP_SIMD)
{
c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
OMP_CLAUSE_ORDER);
OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_FOR];
cclauses[C_OMP_CLAUSE_SPLIT_FOR] = c;
s = C_OMP_CLAUSE_SPLIT_SIMD;
}
else
s = C_OMP_CLAUSE_SPLIT_FOR;
}
else if (code == OMP_LOOP)
s = C_OMP_CLAUSE_SPLIT_LOOP;
else
s = C_OMP_CLAUSE_SPLIT_SIMD;
break;
/* Reduction is allowed on simd, for, parallel, sections, taskloop,
teams and loop. Duplicate it on all of them, but omit on for or
sections if parallel is present (unless inscan, in that case
omit on parallel). If taskloop or loop is combined with
parallel, omit it on parallel. */
case OMP_CLAUSE_REDUCTION:
if (OMP_CLAUSE_REDUCTION_TASK (clauses))
{
if (code == OMP_SIMD || code == OMP_LOOP)
{
error_at (OMP_CLAUSE_LOCATION (clauses),
"invalid %<task%> reduction modifier on construct "
"combined with %<simd%> or %<loop%>");
OMP_CLAUSE_REDUCTION_TASK (clauses) = 0;
}
else if (code != OMP_SECTIONS
&& (mask & (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_NUM_THREADS)) == 0
&& (mask & (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_SCHEDULE)) == 0)
{
error_at (OMP_CLAUSE_LOCATION (clauses),
"invalid %<task%> reduction modifier on construct "
"not combined with %<parallel%>, %<for%> or "
"%<sections%>");
OMP_CLAUSE_REDUCTION_TASK (clauses) = 0;
}
}
if (OMP_CLAUSE_REDUCTION_INSCAN (clauses)
&& ((mask & ((OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP)))
!= 0))
{
error_at (OMP_CLAUSE_LOCATION (clauses),
"%<inscan%> %<reduction%> clause on construct other "
"than %<for%>, %<simd%>, %<for simd%>, "
"%<parallel for%>, %<parallel for simd%>");
OMP_CLAUSE_REDUCTION_INSCAN (clauses) = 0;
}
if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0)
{
if (code == OMP_SIMD)
{
c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
OMP_CLAUSE_REDUCTION);
OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
OMP_CLAUSE_REDUCTION_CODE (c)
= OMP_CLAUSE_REDUCTION_CODE (clauses);
OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)
= OMP_CLAUSE_REDUCTION_PLACEHOLDER (clauses);
OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (c)
= OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (clauses);
OMP_CLAUSE_REDUCTION_INSCAN (c)
= OMP_CLAUSE_REDUCTION_INSCAN (clauses);
OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_SIMD];
cclauses[C_OMP_CLAUSE_SPLIT_SIMD] = c;
}
if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS))
!= 0)
{
c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
OMP_CLAUSE_REDUCTION);
OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
OMP_CLAUSE_REDUCTION_CODE (c)
= OMP_CLAUSE_REDUCTION_CODE (clauses);
OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)
= OMP_CLAUSE_REDUCTION_PLACEHOLDER (clauses);
OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (c)
= OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (clauses);
OMP_CLAUSE_REDUCTION_INSCAN (c)
= OMP_CLAUSE_REDUCTION_INSCAN (clauses);
OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_TEAMS];
cclauses[C_OMP_CLAUSE_SPLIT_TEAMS] = c;
s = C_OMP_CLAUSE_SPLIT_PARALLEL;
}
else if ((mask & (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0
&& !OMP_CLAUSE_REDUCTION_INSCAN (clauses))
s = C_OMP_CLAUSE_SPLIT_PARALLEL;
else
s = C_OMP_CLAUSE_SPLIT_FOR;
}
else if (code == OMP_SECTIONS
|| code == OMP_PARALLEL
|| code == OMP_MASTER)
s = C_OMP_CLAUSE_SPLIT_PARALLEL;
else if (code == OMP_TASKLOOP)
s = C_OMP_CLAUSE_SPLIT_TASKLOOP;
else if (code == OMP_LOOP)
s = C_OMP_CLAUSE_SPLIT_LOOP;
else if (code == OMP_SIMD)
{
if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP))
!= 0)
{
c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
OMP_CLAUSE_REDUCTION);
OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
OMP_CLAUSE_REDUCTION_CODE (c)
= OMP_CLAUSE_REDUCTION_CODE (clauses);
OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)
= OMP_CLAUSE_REDUCTION_PLACEHOLDER (clauses);
OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (c)
= OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (clauses);
OMP_CLAUSE_REDUCTION_INSCAN (c)
= OMP_CLAUSE_REDUCTION_INSCAN (clauses);
OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_TASKLOOP];
cclauses[C_OMP_CLAUSE_SPLIT_TASKLOOP] = c;
}
s = C_OMP_CLAUSE_SPLIT_SIMD;
}
else
s = C_OMP_CLAUSE_SPLIT_TEAMS;
break;
case OMP_CLAUSE_IN_REDUCTION:
/* in_reduction on taskloop simd becomes reduction on the simd
and keeps being in_reduction on taskloop. */
if (code == OMP_SIMD)
{
c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
OMP_CLAUSE_REDUCTION);
OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
OMP_CLAUSE_REDUCTION_CODE (c)
= OMP_CLAUSE_REDUCTION_CODE (clauses);
OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)
= OMP_CLAUSE_REDUCTION_PLACEHOLDER (clauses);
OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (c)
= OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (clauses);
OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_SIMD];
cclauses[C_OMP_CLAUSE_SPLIT_SIMD] = c;
}
s = C_OMP_CLAUSE_SPLIT_TASKLOOP;
break;
case OMP_CLAUSE_IF:
if (OMP_CLAUSE_IF_MODIFIER (clauses) != ERROR_MARK)
{
s = C_OMP_CLAUSE_SPLIT_COUNT;
switch (OMP_CLAUSE_IF_MODIFIER (clauses))
{
case OMP_PARALLEL:
if ((mask & (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0)
s = C_OMP_CLAUSE_SPLIT_PARALLEL;
break;
case OMP_SIMD:
if (code == OMP_SIMD)
s = C_OMP_CLAUSE_SPLIT_SIMD;
break;
case OMP_TASKLOOP:
if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP))
!= 0)
s = C_OMP_CLAUSE_SPLIT_TASKLOOP;
break;
case OMP_TARGET:
if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP))
!= 0)
s = C_OMP_CLAUSE_SPLIT_TARGET;
break;
default:
break;
}
if (s != C_OMP_CLAUSE_SPLIT_COUNT)
break;
/* Error-recovery here, invalid if-modifier specified, add the
clause to just one construct. */
if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP)) != 0)
s = C_OMP_CLAUSE_SPLIT_TARGET;
else if ((mask & (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0)
s = C_OMP_CLAUSE_SPLIT_PARALLEL;
else if ((mask & (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_NOGROUP)) != 0)
s = C_OMP_CLAUSE_SPLIT_TASKLOOP;
else if (code == OMP_SIMD)
s = C_OMP_CLAUSE_SPLIT_SIMD;
else
gcc_unreachable ();
break;
}
/* Otherwise, duplicate if clause to all constructs. */
if (code == OMP_SIMD)
{
if ((mask & ((OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP)
| (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_NUM_THREADS)
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP)))
!= 0)
{
c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
OMP_CLAUSE_IF);
OMP_CLAUSE_IF_MODIFIER (c)
= OMP_CLAUSE_IF_MODIFIER (clauses);
OMP_CLAUSE_IF_EXPR (c) = OMP_CLAUSE_IF_EXPR (clauses);
OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_SIMD];
cclauses[C_OMP_CLAUSE_SPLIT_SIMD] = c;
}
else
{
s = C_OMP_CLAUSE_SPLIT_SIMD;
break;
}
}
if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP))
!= 0)
{
if ((mask & (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0)
{
c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
OMP_CLAUSE_IF);
OMP_CLAUSE_IF_MODIFIER (c)
= OMP_CLAUSE_IF_MODIFIER (clauses);
OMP_CLAUSE_IF_EXPR (c) = OMP_CLAUSE_IF_EXPR (clauses);
OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_TASKLOOP];
cclauses[C_OMP_CLAUSE_SPLIT_TASKLOOP] = c;
s = C_OMP_CLAUSE_SPLIT_PARALLEL;
}
else
s = C_OMP_CLAUSE_SPLIT_TASKLOOP;
}
else if ((mask & (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0)
{
if ((mask & (OMP_CLAUSE_MASK_1
<< PRAGMA_OMP_CLAUSE_MAP)) != 0)
{
c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
OMP_CLAUSE_IF);
OMP_CLAUSE_IF_MODIFIER (c)
= OMP_CLAUSE_IF_MODIFIER (clauses);
OMP_CLAUSE_IF_EXPR (c) = OMP_CLAUSE_IF_EXPR (clauses);
OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_TARGET];
cclauses[C_OMP_CLAUSE_SPLIT_TARGET] = c;
s = C_OMP_CLAUSE_SPLIT_PARALLEL;
}
else
s = C_OMP_CLAUSE_SPLIT_PARALLEL;
}
else
s = C_OMP_CLAUSE_SPLIT_TARGET;
break;
case OMP_CLAUSE_LINEAR:
/* Linear clause is allowed on simd and for. Put it on the
innermost construct. */
if (code == OMP_SIMD)
s = C_OMP_CLAUSE_SPLIT_SIMD;
else
s = C_OMP_CLAUSE_SPLIT_FOR;
break;
case OMP_CLAUSE_NOWAIT:
/* Nowait clause is allowed on target, for and sections, but
is not allowed on parallel for or parallel sections. Therefore,
put it on target construct if present, because that can only
be combined with parallel for{, simd} and not with for{, simd},
otherwise to the worksharing construct. */
if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP))
!= 0)
s = C_OMP_CLAUSE_SPLIT_TARGET;
else
s = C_OMP_CLAUSE_SPLIT_FOR;
break;
default:
gcc_unreachable ();
}
OMP_CLAUSE_CHAIN (clauses) = cclauses[s];
cclauses[s] = clauses;
}
if (!flag_checking)
return;
if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP)) == 0)
gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_TARGET] == NULL_TREE);
if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS)) == 0)
gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_TEAMS] == NULL_TREE);
if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) == 0)
gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_DISTRIBUTE] == NULL_TREE);
if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) == 0)
gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL] == NULL_TREE);
if ((mask & ((OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)
| (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP))) == 0
&& code != OMP_SECTIONS
&& code != OMP_LOOP)
gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_FOR] == NULL_TREE);
if (code != OMP_SIMD)
gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_SIMD] == NULL_TREE);
}
/* qsort callback to compare #pragma omp declare simd clauses. */
static int
c_omp_declare_simd_clause_cmp (const void *p, const void *q)
{
tree a = *(const tree *) p;
tree b = *(const tree *) q;
if (OMP_CLAUSE_CODE (a) != OMP_CLAUSE_CODE (b))
{
if (OMP_CLAUSE_CODE (a) > OMP_CLAUSE_CODE (b))
return -1;
return 1;
}
if (OMP_CLAUSE_CODE (a) != OMP_CLAUSE_SIMDLEN
&& OMP_CLAUSE_CODE (a) != OMP_CLAUSE_INBRANCH
&& OMP_CLAUSE_CODE (a) != OMP_CLAUSE_NOTINBRANCH)
{
int c = tree_to_shwi (OMP_CLAUSE_DECL (a));
int d = tree_to_shwi (OMP_CLAUSE_DECL (b));
if (c < d)
return 1;
if (c > d)
return -1;
}
return 0;
}
/* Change PARM_DECLs in OMP_CLAUSE_DECL of #pragma omp declare simd
CLAUSES on FNDECL into argument indexes and sort them. */
tree
c_omp_declare_simd_clauses_to_numbers (tree parms, tree clauses)
{
tree c;
vec<tree> clvec = vNULL;
for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
{
if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_SIMDLEN
&& OMP_CLAUSE_CODE (c) != OMP_CLAUSE_INBRANCH
&& OMP_CLAUSE_CODE (c) != OMP_CLAUSE_NOTINBRANCH)
{
tree decl = OMP_CLAUSE_DECL (c);
tree arg;
int idx;
for (arg = parms, idx = 0; arg;
arg = TREE_CHAIN (arg), idx++)
if (arg == decl)
break;
if (arg == NULL_TREE)
{
error_at (OMP_CLAUSE_LOCATION (c),
"%qD is not a function argument", decl);
continue;
}
OMP_CLAUSE_DECL (c) = build_int_cst (integer_type_node, idx);
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
&& OMP_CLAUSE_LINEAR_VARIABLE_STRIDE (c))
{
decl = OMP_CLAUSE_LINEAR_STEP (c);
for (arg = parms, idx = 0; arg;
arg = TREE_CHAIN (arg), idx++)
if (arg == decl)
break;
if (arg == NULL_TREE)
{
error_at (OMP_CLAUSE_LOCATION (c),
"%qD is not a function argument", decl);
continue;
}
OMP_CLAUSE_LINEAR_STEP (c)
= build_int_cst (integer_type_node, idx);
}
}
clvec.safe_push (c);
}
if (!clvec.is_empty ())
{
unsigned int len = clvec.length (), i;
clvec.qsort (c_omp_declare_simd_clause_cmp);
clauses = clvec[0];
for (i = 0; i < len; i++)
OMP_CLAUSE_CHAIN (clvec[i]) = (i < len - 1) ? clvec[i + 1] : NULL_TREE;
}
else
clauses = NULL_TREE;
clvec.release ();
return clauses;
}
/* Change argument indexes in CLAUSES of FNDECL back to PARM_DECLs. */
void
c_omp_declare_simd_clauses_to_decls (tree fndecl, tree clauses)
{
tree c;
for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_SIMDLEN
&& OMP_CLAUSE_CODE (c) != OMP_CLAUSE_INBRANCH
&& OMP_CLAUSE_CODE (c) != OMP_CLAUSE_NOTINBRANCH)
{
int idx = tree_to_shwi (OMP_CLAUSE_DECL (c)), i;
tree arg;
for (arg = DECL_ARGUMENTS (fndecl), i = 0; arg;
arg = TREE_CHAIN (arg), i++)
if (i == idx)
break;
gcc_assert (arg);
OMP_CLAUSE_DECL (c) = arg;
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
&& OMP_CLAUSE_LINEAR_VARIABLE_STRIDE (c))
{
idx = tree_to_shwi (OMP_CLAUSE_LINEAR_STEP (c));
for (arg = DECL_ARGUMENTS (fndecl), i = 0; arg;
arg = TREE_CHAIN (arg), i++)
if (i == idx)
break;
gcc_assert (arg);
OMP_CLAUSE_LINEAR_STEP (c) = arg;
}
}
}
/* Return true for __func__ and similar function-local predefined
variables (which are in OpenMP predetermined shared, allowed in
shared/firstprivate clauses). */
bool
c_omp_predefined_variable (tree decl)
{
if (VAR_P (decl)
&& DECL_ARTIFICIAL (decl)
&& TREE_READONLY (decl)
&& TREE_STATIC (decl)
&& DECL_NAME (decl)
&& (DECL_NAME (decl) == ridpointers[RID_C99_FUNCTION_NAME]
|| DECL_NAME (decl) == ridpointers[RID_FUNCTION_NAME]
|| DECL_NAME (decl) == ridpointers[RID_PRETTY_FUNCTION_NAME]))
return true;
return false;
}
/* True if OpenMP sharing attribute of DECL is predetermined. */
enum omp_clause_default_kind
c_omp_predetermined_sharing (tree decl)
{
/* Predetermine artificial variables holding integral values, those
are usually result of gimplify_one_sizepos or SAVE_EXPR
gimplification. */
if (VAR_P (decl)
&& DECL_ARTIFICIAL (decl)
&& INTEGRAL_TYPE_P (TREE_TYPE (decl)))
return OMP_CLAUSE_DEFAULT_SHARED;
if (c_omp_predefined_variable (decl))
return OMP_CLAUSE_DEFAULT_SHARED;
return OMP_CLAUSE_DEFAULT_UNSPECIFIED;
}
/* Diagnose errors in an OpenMP context selector, return CTX if
it is correct or error_mark_node otherwise. */
tree
c_omp_check_context_selector (location_t loc, tree ctx)
{
/* Each trait-set-selector-name can only be specified once.
There are just 4 set names. */
for (tree t1 = ctx; t1; t1 = TREE_CHAIN (t1))
for (tree t2 = TREE_CHAIN (t1); t2; t2 = TREE_CHAIN (t2))
if (TREE_PURPOSE (t1) == TREE_PURPOSE (t2))
{
error_at (loc, "selector set %qs specified more than once",
IDENTIFIER_POINTER (TREE_PURPOSE (t1)));
return error_mark_node;
}
for (tree t = ctx; t; t = TREE_CHAIN (t))
{
/* Each trait-selector-name can only be specified once. */
if (list_length (TREE_VALUE (t)) < 5)
{
for (tree t1 = TREE_VALUE (t); t1; t1 = TREE_CHAIN (t1))
for (tree t2 = TREE_CHAIN (t1); t2; t2 = TREE_CHAIN (t2))
if (TREE_PURPOSE (t1) == TREE_PURPOSE (t2))
{
error_at (loc,
"selector %qs specified more than once in set %qs",
IDENTIFIER_POINTER (TREE_PURPOSE (t1)),
IDENTIFIER_POINTER (TREE_PURPOSE (t)));
return error_mark_node;
}
}
else
{
hash_set<tree> pset;
for (tree t1 = TREE_VALUE (t); t1; t1 = TREE_CHAIN (t1))
if (pset.add (TREE_PURPOSE (t1)))
{
error_at (loc,
"selector %qs specified more than once in set %qs",
IDENTIFIER_POINTER (TREE_PURPOSE (t1)),
IDENTIFIER_POINTER (TREE_PURPOSE (t)));
return error_mark_node;
}
}
static const char *const kind[] = {
"host", "nohost", "cpu", "gpu", "fpga", "any", NULL };
static const char *const vendor[] = {
"amd", "arm", "bsc", "cray", "fujitsu", "gnu", "ibm", "intel",
"llvm", "nvidia", "pgi", "ti", "unknown", NULL };
static const char *const extension[] = { NULL };
static const char *const atomic_default_mem_order[] = {
"seq_cst", "relaxed", "acq_rel", NULL };
struct known_properties { const char *set; const char *selector;
const char *const *props; };
known_properties props[] = {
{ "device", "kind", kind },
{ "implementation", "vendor", vendor },
{ "implementation", "extension", extension },
{ "implementation", "atomic_default_mem_order",
atomic_default_mem_order } };
for (tree t1 = TREE_VALUE (t); t1; t1 = TREE_CHAIN (t1))
for (unsigned i = 0; i < ARRAY_SIZE (props); i++)
if (!strcmp (IDENTIFIER_POINTER (TREE_PURPOSE (t1)),
props[i].selector)
&& !strcmp (IDENTIFIER_POINTER (TREE_PURPOSE (t)),
props[i].set))
for (tree t2 = TREE_VALUE (t1); t2; t2 = TREE_CHAIN (t2))
for (unsigned j = 0; ; j++)
{
if (props[i].props[j] == NULL)
{
if (TREE_PURPOSE (t2)
&& !strcmp (IDENTIFIER_POINTER (TREE_PURPOSE (t2)),
" score"))
break;
if (props[i].props == atomic_default_mem_order)
{
error_at (loc,
"incorrect property %qs of %qs selector",
IDENTIFIER_POINTER (TREE_PURPOSE (t2)),
"atomic_default_mem_order");
return error_mark_node;
}
else if (TREE_PURPOSE (t2))
warning_at (loc, 0,
"unknown property %qs of %qs selector",
IDENTIFIER_POINTER (TREE_PURPOSE (t2)),
props[i].selector);
else
warning_at (loc, 0,
"unknown property %qE of %qs selector",
TREE_VALUE (t2), props[i].selector);
break;
}
else if (TREE_PURPOSE (t2) == NULL_TREE)
{
const char *str = TREE_STRING_POINTER (TREE_VALUE (t2));
if (!strcmp (str, props[i].props[j])
&& ((size_t) TREE_STRING_LENGTH (TREE_VALUE (t2))
== strlen (str) + 1))
break;
}
else if (!strcmp (IDENTIFIER_POINTER (TREE_PURPOSE (t2)),
props[i].props[j]))
break;
}
}
return ctx;
}
/* Register VARIANT as variant of some base function marked with
#pragma omp declare variant. CONSTRUCT is corresponding construct
selector set. */
void
c_omp_mark_declare_variant (location_t loc, tree variant, tree construct)
{
tree attr = lookup_attribute ("omp declare variant variant",
DECL_ATTRIBUTES (variant));
if (attr == NULL_TREE)
{
attr = tree_cons (get_identifier ("omp declare variant variant"),
unshare_expr (construct),
DECL_ATTRIBUTES (variant));
DECL_ATTRIBUTES (variant) = attr;
return;
}
if ((TREE_VALUE (attr) != NULL_TREE) != (construct != NULL_TREE)
|| (construct != NULL_TREE
&& omp_context_selector_set_compare ("construct", TREE_VALUE (attr),
construct)))
error_at (loc, "%qD used as a variant with incompatible %<construct%> "
"selector sets", variant);
}
/* For OpenACC, the OMP_CLAUSE_MAP_KIND of an OMP_CLAUSE_MAP is used internally
to distinguish clauses as seen by the user. Return the "friendly" clause
name for error messages etc., where possible. See also
c/c-parser.c:c_parser_oacc_data_clause and
cp/parser.c:cp_parser_oacc_data_clause. */
const char *
c_omp_map_clause_name (tree clause, bool oacc)
{
if (oacc && OMP_CLAUSE_CODE (clause) == OMP_CLAUSE_MAP)
switch (OMP_CLAUSE_MAP_KIND (clause))
{
case GOMP_MAP_FORCE_ALLOC:
case GOMP_MAP_ALLOC: return "create";
case GOMP_MAP_FORCE_TO:
case GOMP_MAP_TO: return "copyin";
case GOMP_MAP_FORCE_FROM:
case GOMP_MAP_FROM: return "copyout";
case GOMP_MAP_FORCE_TOFROM:
case GOMP_MAP_TOFROM: return "copy";
case GOMP_MAP_RELEASE: return "delete";
case GOMP_MAP_FORCE_PRESENT: return "present";
case GOMP_MAP_ATTACH: return "attach";
case GOMP_MAP_FORCE_DETACH:
case GOMP_MAP_DETACH: return "detach";
case GOMP_MAP_DEVICE_RESIDENT: return "device_resident";
case GOMP_MAP_LINK: return "link";
case GOMP_MAP_FORCE_DEVICEPTR: return "deviceptr";
default: break;
}
return omp_clause_code_name[OMP_CLAUSE_CODE (clause)];
}
|
scalprod.c | #include <omp.h>
#ifdef __cplusplus
extern "C"
#endif
void
scalprod(int n, double* x, double* y, double* res)
{
int i;
double res_v = 0.;
#pragma omp parallel for reduction(+ : res_v)
for (i = 0; i < n; ++i) {
res_v += x[i] * y[i];
}
*res = res_v;
}
|
GB_binop__isgt_uint16.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__isgt_uint16)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__isgt_uint16)
// A.*B function (eWiseMult): GB (_AemultB_03__isgt_uint16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__isgt_uint16)
// A*D function (colscale): GB (_AxD__isgt_uint16)
// D*A function (rowscale): GB (_DxB__isgt_uint16)
// C+=B function (dense accum): GB (_Cdense_accumB__isgt_uint16)
// C+=b function (dense accum): GB (_Cdense_accumb__isgt_uint16)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isgt_uint16)
// C=scalar+B GB (_bind1st__isgt_uint16)
// C=scalar+B' GB (_bind1st_tran__isgt_uint16)
// C=A+scalar GB (_bind2nd__isgt_uint16)
// C=A'+scalar GB (_bind2nd_tran__isgt_uint16)
// C type: uint16_t
// A type: uint16_t
// B,b type: uint16_t
// BinaryOp: cij = (aij > bij)
#define GB_ATYPE \
uint16_t
#define GB_BTYPE \
uint16_t
#define GB_CTYPE \
uint16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint16_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x > y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISGT || GxB_NO_UINT16 || GxB_NO_ISGT_UINT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__isgt_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__isgt_uint16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__isgt_uint16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint16_t
uint16_t bwork = (*((uint16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__isgt_uint16)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__isgt_uint16)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__isgt_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__isgt_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__isgt_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__isgt_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__isgt_uint16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__isgt_uint16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t x = (*((uint16_t *) x_input)) ;
uint16_t *Bx = (uint16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint16_t bij = Bx [p] ;
Cx [p] = (x > bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__isgt_uint16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t *Ax = (uint16_t *) Ax_input ;
uint16_t y = (*((uint16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint16_t aij = Ax [p] ;
Cx [p] = (aij > y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = Ax [pA] ; \
Cx [pC] = (x > aij) ; \
}
GrB_Info GB (_bind1st_tran__isgt_uint16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t x = (*((const uint16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = Ax [pA] ; \
Cx [pC] = (aij > y) ; \
}
GrB_Info GB (_bind2nd_tran__isgt_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t y = (*((const uint16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
fourier.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% FFFFF OOO U U RRRR IIIII EEEEE RRRR %
% F O O U U R R I E R R %
% FFF O O U U RRRR I EEE RRRR %
% F O O U U R R I E R R %
% F OOO UUU R R IIIII EEEEE R R %
% %
% %
% MagickCore Discrete Fourier Transform Methods %
% %
% Software Design %
% Sean Burke %
% Fred Weinhaus %
% John Cristy %
% July 2009 %
% %
% %
% Copyright 1999-2013 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/artifact.h"
#include "magick/blob.h"
#include "magick/cache.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/fourier.h"
#include "magick/log.h"
#include "magick/memory_.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/pixel-accessor.h"
#include "magick/pixel-private.h"
#include "magick/property.h"
#include "magick/quantum-private.h"
#include "magick/resource_.h"
#include "magick/string-private.h"
#include "magick/thread-private.h"
#if defined(MAGICKCORE_FFTW_DELEGATE)
#if defined(MAGICKCORE_HAVE_COMPLEX_H)
#include <complex.h>
#endif
#include <fftw3.h>
#if !defined(MAGICKCORE_HAVE_CABS)
#define cabs(z) (sqrt(z[0]*z[0]+z[1]*z[1]))
#endif
#if !defined(MAGICKCORE_HAVE_CARG)
#define carg(z) (atan2(cimag(z),creal(z)))
#endif
#if !defined(MAGICKCORE_HAVE_CIMAG)
#define cimag(z) (z[1])
#endif
#if !defined(MAGICKCORE_HAVE_CREAL)
#define creal(z) (z[0])
#endif
#endif
/*
Typedef declarations.
*/
typedef struct _FourierInfo
{
ChannelType
channel;
MagickBooleanType
modulus;
size_t
width,
height;
ssize_t
center;
} FourierInfo;
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o m p l e x I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ComplexImages() performs complex mathematics on an image sequence.
%
% The format of the ComplexImages method is:
%
% MagickBooleanType ComplexImages(Image *images,
% const ComplexOperator operator,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o operator: A complex operator.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ComplexImages(const Image *images,
const ComplexOperator operator,ExceptionInfo *exception)
{
#define ComplexImageTag "Complex/Image"
CacheView
*Ai_view,
*Ar_view,
*Bi_view,
*Br_view,
*Ci_view,
*Cr_view;
const char
*artifact;
const Image
*Ai_image,
*Ar_image,
*Bi_image,
*Br_image;
double
snr;
Image
*Ci_image,
*complex_images,
*Cr_image,
*image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(images != (Image *) NULL);
assert(images->signature == MagickSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
if (images->next == (Image *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),ImageError,
"ImageSequenceRequired","`%s'",images->filename);
return((Image *) NULL);
}
image=CloneImage(images,images->columns,images->rows,MagickTrue,exception);
if (image == (Image *) NULL)
return((Image *) NULL);
image->storage_class=DirectClass;
image->depth=32UL;
complex_images=NewImageList();
AppendImageToList(&complex_images,image);
image=CloneImage(images,images->columns,images->rows,MagickTrue,exception);
if (image == (Image *) NULL)
{
complex_images=DestroyImageList(complex_images);
return(complex_images);
}
image->storage_class=DirectClass;
image->depth=32UL;
AppendImageToList(&complex_images,image);
/*
Apply complex mathematics to image pixels.
*/
artifact=GetImageArtifact(image,"complex:snr");
snr=0.0;
if (artifact != (const char *) NULL)
snr=StringToDouble(artifact,(char **) NULL);
Ar_image=images;
Ai_image=images->next;
Br_image=images;
Bi_image=images->next;
if ((images->next->next != (Image *) NULL) &&
(images->next->next->next != (Image *) NULL))
{
Br_image=images->next->next;
Bi_image=images->next->next->next;
}
Cr_image=complex_images;
Ci_image=complex_images->next;
Ar_view=AcquireVirtualCacheView(Ar_image,exception);
Ai_view=AcquireVirtualCacheView(Ai_image,exception);
Br_view=AcquireVirtualCacheView(Br_image,exception);
Bi_view=AcquireVirtualCacheView(Bi_image,exception);
Cr_view=AcquireAuthenticCacheView(Cr_image,exception);
Ci_view=AcquireAuthenticCacheView(Ci_image,exception);
status=MagickTrue;
progress=0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(images,complex_images,images->rows,1)
#endif
for (y=0; y < (ssize_t) images->rows; y++)
{
register const PixelPacket
*restrict Ai,
*restrict Ar,
*restrict Bi,
*restrict Br;
register PixelPacket
*restrict Ci,
*restrict Cr;
register ssize_t
x;
if (status == MagickFalse)
continue;
Ar=GetCacheViewVirtualPixels(Ar_view,0,y,images->columns,1,exception);
Ai=GetCacheViewVirtualPixels(Ai_view,0,y,images->columns,1,exception);
Br=GetCacheViewVirtualPixels(Br_view,0,y,images->columns,1,exception);
Bi=GetCacheViewVirtualPixels(Bi_view,0,y,images->columns,1,exception);
Cr=QueueCacheViewAuthenticPixels(Cr_view,0,y,images->columns,1,exception);
Ci=QueueCacheViewAuthenticPixels(Ci_view,0,y,images->columns,1,exception);
if ((Ar == (const PixelPacket *) NULL) ||
(Ai == (const PixelPacket *) NULL) ||
(Br == (const PixelPacket *) NULL) ||
(Bi == (const PixelPacket *) NULL) ||
(Cr == (PixelPacket *) NULL) || (Ci == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) images->columns; x++)
{
switch (operator)
{
case AddComplexOperator:
{
Cr->red=Ar->red+Br->red;
Ci->red=Ai->red+Bi->red;
Cr->green=Ar->green+Br->green;
Ci->green=Ai->green+Bi->green;
Cr->blue=Ar->blue+Br->blue;
Ci->blue=Ai->blue+Bi->blue;
if (images->matte != MagickFalse)
{
Cr->opacity=Ar->opacity+Br->opacity;
Ci->opacity=Ai->opacity+Bi->opacity;
}
break;
}
case ConjugateComplexOperator:
default:
{
Cr->red=Ar->red;
Ci->red=(-Bi->red);
Cr->green=Ar->green;
Ci->green=(-Bi->green);
Cr->blue=Ar->blue;
Ci->blue=(-Bi->blue);
if (images->matte != MagickFalse)
{
Cr->opacity=Ar->opacity;
Ci->opacity=(-Bi->opacity);
}
break;
}
case DivideComplexOperator:
{
double
gamma;
gamma=PerceptibleReciprocal(Br->red*Br->red+Bi->red*Bi->red+snr);
Cr->red=gamma*(Ar->red*Br->red+Ai->red*Bi->red);
Ci->red=gamma*(Ai->red*Br->red-Ar->red*Bi->red);
gamma=PerceptibleReciprocal(Br->green*Br->green+Bi->green*Bi->green+
snr);
Cr->green=gamma*(Ar->green*Br->green+Ai->green*Bi->green);
Ci->green=gamma*(Ai->green*Br->green-Ar->green*Bi->green);
gamma=PerceptibleReciprocal(Br->blue*Br->blue+Bi->blue*Bi->blue+snr);
Cr->blue=gamma*(Ar->blue*Br->blue+Ai->blue*Bi->blue);
Ci->blue=gamma*(Ai->blue*Br->blue-Ar->blue*Bi->blue);
if (images->matte != MagickFalse)
{
gamma=PerceptibleReciprocal(Br->opacity*Br->opacity+Bi->opacity*
Bi->opacity+snr);
Cr->opacity=gamma*(Ar->opacity*Br->opacity+Ai->opacity*
Bi->opacity);
Ci->opacity=gamma*(Ai->opacity*Br->opacity-Ar->opacity*
Bi->opacity);
}
break;
}
case MagnitudePhaseComplexOperator:
{
Cr->red=sqrt(Ar->red*Ar->red+Ai->red*Ai->red);
Ci->red=atan2(Ai->red,Ar->red)/(2.0*MagickPI)+0.5;
Cr->green=sqrt(Ar->green*Ar->green+Ai->green*Ai->green);
Ci->green=atan2(Ai->green,Ar->green)/(2.0*MagickPI)+0.5;
Cr->blue=sqrt(Ar->blue*Ar->blue+Ai->blue*Ai->blue);
Ci->blue=atan2(Ai->blue,Ar->blue)/(2.0*MagickPI)+0.5;
if (images->matte != MagickFalse)
{
Cr->opacity=sqrt(Ar->opacity*Ar->opacity+Ai->opacity*Ai->opacity);
Ci->opacity=atan2(Ai->opacity,Ar->opacity)/(2.0*MagickPI)+0.5;
}
break;
}
case MultiplyComplexOperator:
{
Cr->red=QuantumScale*(Ar->red*Br->red-Ai->red*Bi->red);
Ci->red=QuantumScale*(Ai->red*Br->red+Ar->red*Bi->red);
Cr->green=QuantumScale*(Ar->green*Br->green-Ai->green*Bi->green);
Ci->green=QuantumScale*(Ai->green*Br->green+Ar->green*Bi->green);
Cr->blue=QuantumScale*(Ar->blue*Br->blue-Ai->blue*Bi->blue);
Ci->blue=QuantumScale*(Ai->blue*Br->blue+Ar->blue*Bi->blue);
if (images->matte != MagickFalse)
{
Cr->opacity=QuantumScale*(Ar->opacity*Br->opacity-Ai->opacity*
Bi->opacity);
Ci->opacity=QuantumScale*(Ai->opacity*Br->opacity+Ar->opacity*
Bi->opacity);
}
break;
}
case RealImaginaryComplexOperator:
{
Cr->red=Ar->red*cos(2.0*MagickPI*(Ai->red-0.5));
Ci->red=Ar->red*sin(2.0*MagickPI*(Ai->red-0.5));
Cr->green=Ar->green*cos(2.0*MagickPI*(Ai->green-0.5));
Ci->green=Ar->green*sin(2.0*MagickPI*(Ai->green-0.5));
Cr->blue=Ar->blue*cos(2.0*MagickPI*(Ai->blue-0.5));
Ci->blue=Ar->blue*sin(2.0*MagickPI*(Ai->blue-0.5));
if (images->matte != MagickFalse)
{
Cr->opacity=Ar->opacity*cos(2.0*MagickPI*(Ai->opacity-0.5));
Ci->opacity=Ar->opacity*sin(2.0*MagickPI*(Ai->opacity-0.5));
}
break;
}
case SubtractComplexOperator:
{
Cr->red=Ar->red-Br->red;
Ci->red=Ai->red-Bi->red;
Cr->green=Ar->green-Br->green;
Ci->green=Ai->green-Bi->green;
Cr->blue=Ar->blue-Br->blue;
Ci->blue=Ai->blue-Bi->blue;
if (images->matte != MagickFalse)
{
Cr->opacity=Ar->opacity-Br->opacity;
Ci->opacity=Ai->opacity-Bi->opacity;
}
break;
}
}
Ar++;
Ai++;
Br++;
Bi++;
Cr++;
Ci++;
}
if (SyncCacheViewAuthenticPixels(Ci_view,exception) == MagickFalse)
status=MagickFalse;
if (SyncCacheViewAuthenticPixels(Cr_view,exception) == MagickFalse)
status=MagickFalse;
if (images->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ComplexImages)
#endif
proceed=SetImageProgress(images,ComplexImageTag,progress++,
images->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
Cr_view=DestroyCacheView(Cr_view);
Ci_view=DestroyCacheView(Ci_view);
Br_view=DestroyCacheView(Br_view);
Bi_view=DestroyCacheView(Bi_view);
Ar_view=DestroyCacheView(Ar_view);
Ai_view=DestroyCacheView(Ai_view);
if (status == MagickFalse)
complex_images=DestroyImageList(complex_images);
return(complex_images);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F o r w a r d F o u r i e r T r a n s f o r m I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ForwardFourierTransformImage() implements the discrete Fourier transform
% (DFT) of the image either as a magnitude / phase or real / imaginary image
% pair.
%
% The format of the ForwadFourierTransformImage method is:
%
% Image *ForwardFourierTransformImage(const Image *image,
% const MagickBooleanType modulus,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o modulus: if true, return as transform as a magnitude / phase pair
% otherwise a real / imaginary image pair.
%
% o exception: return any errors or warnings in this structure.
%
*/
#if defined(MAGICKCORE_FFTW_DELEGATE)
static MagickBooleanType RollFourier(const size_t width,const size_t height,
const ssize_t x_offset,const ssize_t y_offset,double *roll_pixels)
{
double
*source_pixels;
MemoryInfo
*source_info;
register ssize_t
i,
x;
ssize_t
u,
v,
y;
/*
Move zero frequency (DC, average color) from (0,0) to (width/2,height/2).
*/
source_info=AcquireVirtualMemory(height,width*sizeof(*source_pixels));
if (source_info == (MemoryInfo *) NULL)
return(MagickFalse);
source_pixels=(double *) GetVirtualMemoryBlob(source_info);
i=0L;
for (y=0L; y < (ssize_t) height; y++)
{
if (y_offset < 0L)
v=((y+y_offset) < 0L) ? y+y_offset+(ssize_t) height : y+y_offset;
else
v=((y+y_offset) > ((ssize_t) height-1L)) ? y+y_offset-(ssize_t) height :
y+y_offset;
for (x=0L; x < (ssize_t) width; x++)
{
if (x_offset < 0L)
u=((x+x_offset) < 0L) ? x+x_offset+(ssize_t) width : x+x_offset;
else
u=((x+x_offset) > ((ssize_t) width-1L)) ? x+x_offset-(ssize_t) width :
x+x_offset;
source_pixels[v*width+u]=roll_pixels[i++];
}
}
(void) CopyMagickMemory(roll_pixels,source_pixels,height*width*
sizeof(*source_pixels));
source_info=RelinquishVirtualMemory(source_info);
return(MagickTrue);
}
static MagickBooleanType ForwardQuadrantSwap(const size_t width,
const size_t height,double *source_pixels,double *forward_pixels)
{
MagickBooleanType
status;
register ssize_t
x;
ssize_t
center,
y;
/*
Swap quadrants.
*/
center=(ssize_t) floor((double) width/2L)+1L;
status=RollFourier((size_t) center,height,0L,(ssize_t) height/2L,
source_pixels);
if (status == MagickFalse)
return(MagickFalse);
for (y=0L; y < (ssize_t) height; y++)
for (x=0L; x < (ssize_t) (width/2L-1L); x++)
forward_pixels[y*width+x+width/2L]=source_pixels[y*center+x];
for (y=1; y < (ssize_t) height; y++)
for (x=0L; x < (ssize_t) (width/2L-1L); x++)
forward_pixels[(height-y)*width+width/2L-x-1L]=
source_pixels[y*center+x+1L];
for (x=0L; x < (ssize_t) (width/2L); x++)
forward_pixels[-x+width/2L-1L]=forward_pixels[x+width/2L+1L];
return(MagickTrue);
}
static void CorrectPhaseLHS(const size_t width,const size_t height,
double *fourier_pixels)
{
register ssize_t
x;
ssize_t
y;
for (y=0L; y < (ssize_t) height; y++)
for (x=0L; x < (ssize_t) (width/2L); x++)
fourier_pixels[y*width+x]*=(-1.0);
}
static MagickBooleanType ForwardFourier(const FourierInfo *fourier_info,
Image *image,double *magnitude,double *phase,ExceptionInfo *exception)
{
CacheView
*magnitude_view,
*phase_view;
double
*magnitude_pixels,
*phase_pixels;
Image
*magnitude_image,
*phase_image;
MagickBooleanType
status;
MemoryInfo
*magnitude_info,
*phase_info;
register IndexPacket
*indexes;
register PixelPacket
*q;
register ssize_t
x;
ssize_t
i,
y;
magnitude_image=GetFirstImageInList(image);
phase_image=GetNextImageInList(image);
if (phase_image == (Image *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),ImageError,
"ImageSequenceRequired","`%s'",image->filename);
return(MagickFalse);
}
/*
Create "Fourier Transform" image from constituent arrays.
*/
magnitude_info=AcquireVirtualMemory((size_t) fourier_info->height,
fourier_info->width*sizeof(*magnitude_pixels));
phase_info=AcquireVirtualMemory((size_t) fourier_info->height,
fourier_info->width*sizeof(*phase_pixels));
if ((magnitude_info == (MemoryInfo *) NULL) ||
(phase_info == (MemoryInfo *) NULL))
{
if (phase_info != (MemoryInfo *) NULL)
phase_info=RelinquishVirtualMemory(phase_info);
if (magnitude_info != (MemoryInfo *) NULL)
magnitude_info=RelinquishVirtualMemory(magnitude_info);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
magnitude_pixels=(double *) GetVirtualMemoryBlob(magnitude_info);
(void) ResetMagickMemory(magnitude_pixels,0,fourier_info->height*
fourier_info->width*sizeof(*magnitude_pixels));
phase_pixels=(double *) GetVirtualMemoryBlob(phase_info);
(void) ResetMagickMemory(phase_pixels,0,fourier_info->height*
fourier_info->width*sizeof(*phase_pixels));
status=ForwardQuadrantSwap(fourier_info->width,fourier_info->height,
magnitude,magnitude_pixels);
if (status != MagickFalse)
status=ForwardQuadrantSwap(fourier_info->width,fourier_info->height,phase,
phase_pixels);
CorrectPhaseLHS(fourier_info->width,fourier_info->height,phase_pixels);
if (fourier_info->modulus != MagickFalse)
{
i=0L;
for (y=0L; y < (ssize_t) fourier_info->height; y++)
for (x=0L; x < (ssize_t) fourier_info->width; x++)
{
phase_pixels[i]/=(2.0*MagickPI);
phase_pixels[i]+=0.5;
i++;
}
}
magnitude_view=AcquireAuthenticCacheView(magnitude_image,exception);
i=0L;
for (y=0L; y < (ssize_t) fourier_info->height; y++)
{
q=GetCacheViewAuthenticPixels(magnitude_view,0L,y,fourier_info->height,1UL,
exception);
if (q == (PixelPacket *) NULL)
break;
indexes=GetCacheViewAuthenticIndexQueue(magnitude_view);
for (x=0L; x < (ssize_t) fourier_info->width; x++)
{
switch (fourier_info->channel)
{
case RedChannel:
default:
{
SetPixelRed(q,ClampToQuantum(QuantumRange*magnitude_pixels[i]));
break;
}
case GreenChannel:
{
SetPixelGreen(q,ClampToQuantum(QuantumRange*magnitude_pixels[i]));
break;
}
case BlueChannel:
{
SetPixelBlue(q,ClampToQuantum(QuantumRange*magnitude_pixels[i]));
break;
}
case OpacityChannel:
{
SetPixelOpacity(q,ClampToQuantum(QuantumRange*magnitude_pixels[i]));
break;
}
case IndexChannel:
{
SetPixelIndex(indexes+x,ClampToQuantum(QuantumRange*
magnitude_pixels[i]));
break;
}
case GrayChannels:
{
SetPixelGray(q,ClampToQuantum(QuantumRange*magnitude_pixels[i]));
break;
}
}
i++;
q++;
}
status=SyncCacheViewAuthenticPixels(magnitude_view,exception);
if (status == MagickFalse)
break;
}
magnitude_view=DestroyCacheView(magnitude_view);
i=0L;
phase_view=AcquireAuthenticCacheView(phase_image,exception);
for (y=0L; y < (ssize_t) fourier_info->height; y++)
{
q=GetCacheViewAuthenticPixels(phase_view,0L,y,fourier_info->height,1UL,
exception);
if (q == (PixelPacket *) NULL)
break;
indexes=GetCacheViewAuthenticIndexQueue(phase_view);
for (x=0L; x < (ssize_t) fourier_info->width; x++)
{
switch (fourier_info->channel)
{
case RedChannel:
default:
{
SetPixelRed(q,ClampToQuantum(QuantumRange*phase_pixels[i]));
break;
}
case GreenChannel:
{
SetPixelGreen(q,ClampToQuantum(QuantumRange*phase_pixels[i]));
break;
}
case BlueChannel:
{
SetPixelBlue(q,ClampToQuantum(QuantumRange*phase_pixels[i]));
break;
}
case OpacityChannel:
{
SetPixelOpacity(q,ClampToQuantum(QuantumRange*phase_pixels[i]));
break;
}
case IndexChannel:
{
SetPixelIndex(indexes+x,ClampToQuantum(QuantumRange*phase_pixels[i]));
break;
}
case GrayChannels:
{
SetPixelGray(q,ClampToQuantum(QuantumRange*phase_pixels[i]));
break;
}
}
i++;
q++;
}
status=SyncCacheViewAuthenticPixels(phase_view,exception);
if (status == MagickFalse)
break;
}
phase_view=DestroyCacheView(phase_view);
phase_info=RelinquishVirtualMemory(phase_info);
magnitude_info=RelinquishVirtualMemory(magnitude_info);
return(status);
}
static MagickBooleanType ForwardFourierTransform(FourierInfo *fourier_info,
const Image *image,double *magnitude_pixels,double *phase_pixels,
ExceptionInfo *exception)
{
CacheView
*image_view;
const char
*value;
double
*source_pixels;
fftw_complex
*forward_pixels;
fftw_plan
fftw_r2c_plan;
MemoryInfo
*forward_info,
*source_info;
register const IndexPacket
*indexes;
register const PixelPacket
*p;
register ssize_t
i,
x;
ssize_t
y;
/*
Generate the forward Fourier transform.
*/
source_info=AcquireVirtualMemory((size_t) fourier_info->height,
fourier_info->width*sizeof(*source_pixels));
if (source_info == (MemoryInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
source_pixels=(double *) GetVirtualMemoryBlob(source_info);
ResetMagickMemory(source_pixels,0,fourier_info->height*fourier_info->width*
sizeof(*source_pixels));
i=0L;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0L; y < (ssize_t) fourier_info->height; y++)
{
p=GetCacheViewVirtualPixels(image_view,0L,y,fourier_info->width,1UL,
exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (x=0L; x < (ssize_t) fourier_info->width; x++)
{
switch (fourier_info->channel)
{
case RedChannel:
default:
{
source_pixels[i]=QuantumScale*GetPixelRed(p);
break;
}
case GreenChannel:
{
source_pixels[i]=QuantumScale*GetPixelGreen(p);
break;
}
case BlueChannel:
{
source_pixels[i]=QuantumScale*GetPixelBlue(p);
break;
}
case OpacityChannel:
{
source_pixels[i]=QuantumScale*GetPixelOpacity(p);
break;
}
case IndexChannel:
{
source_pixels[i]=QuantumScale*GetPixelIndex(indexes+x);
break;
}
case GrayChannels:
{
source_pixels[i]=QuantumScale*GetPixelGray(p);
break;
}
}
i++;
p++;
}
}
image_view=DestroyCacheView(image_view);
forward_info=AcquireVirtualMemory((size_t) fourier_info->height,
fourier_info->center*sizeof(*forward_pixels));
if (forward_info == (MemoryInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
source_info=(MemoryInfo *) RelinquishVirtualMemory(source_info);
return(MagickFalse);
}
forward_pixels=(fftw_complex *) GetVirtualMemoryBlob(forward_info);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ForwardFourierTransform)
#endif
fftw_r2c_plan=fftw_plan_dft_r2c_2d(fourier_info->width,fourier_info->height,
source_pixels,forward_pixels,FFTW_ESTIMATE);
fftw_execute(fftw_r2c_plan);
fftw_destroy_plan(fftw_r2c_plan);
source_info=(MemoryInfo *) RelinquishVirtualMemory(source_info);
value=GetImageArtifact(image,"fourier:normalize");
if ((value == (const char *) NULL) || (LocaleCompare(value,"forward") == 0))
{
double
gamma;
/*
Normalize inverse transform.
*/
i=0L;
gamma=PerceptibleReciprocal((double) fourier_info->width*
fourier_info->height);
for (y=0L; y < (ssize_t) fourier_info->height; y++)
for (x=0L; x < (ssize_t) fourier_info->center; x++)
{
#if defined(MAGICKCORE_HAVE_COMPLEX_H)
forward_pixels[i]*=gamma;
#else
forward_pixels[i][0]*=gamma;
forward_pixels[i][1]*=gamma;
#endif
i++;
}
}
/*
Generate magnitude and phase (or real and imaginary).
*/
i=0L;
if (fourier_info->modulus != MagickFalse)
for (y=0L; y < (ssize_t) fourier_info->height; y++)
for (x=0L; x < (ssize_t) fourier_info->center; x++)
{
magnitude_pixels[i]=cabs(forward_pixels[i]);
phase_pixels[i]=carg(forward_pixels[i]);
i++;
}
else
for (y=0L; y < (ssize_t) fourier_info->height; y++)
for (x=0L; x < (ssize_t) fourier_info->center; x++)
{
magnitude_pixels[i]=creal(forward_pixels[i]);
phase_pixels[i]=cimag(forward_pixels[i]);
i++;
}
forward_info=(MemoryInfo *) RelinquishVirtualMemory(forward_info);
return(MagickTrue);
}
static MagickBooleanType ForwardFourierTransformChannel(const Image *image,
const ChannelType channel,const MagickBooleanType modulus,
Image *fourier_image,ExceptionInfo *exception)
{
double
*magnitude_pixels,
*phase_pixels;
FourierInfo
fourier_info;
MagickBooleanType
status;
MemoryInfo
*magnitude_info,
*phase_info;
size_t
extent;
fourier_info.width=image->columns;
fourier_info.height=image->rows;
if ((image->columns != image->rows) || ((image->columns % 2) != 0) ||
((image->rows % 2) != 0))
{
extent=image->columns < image->rows ? image->rows : image->columns;
fourier_info.width=(extent & 0x01) == 1 ? extent+1UL : extent;
}
fourier_info.height=fourier_info.width;
fourier_info.center=(ssize_t) floor((double) fourier_info.width/2L)+1L;
fourier_info.channel=channel;
fourier_info.modulus=modulus;
magnitude_info=AcquireVirtualMemory((size_t) fourier_info.height,
fourier_info.center*sizeof(*magnitude_pixels));
phase_info=AcquireVirtualMemory((size_t) fourier_info.height,
fourier_info.center*sizeof(*phase_pixels));
if ((magnitude_info == (MemoryInfo *) NULL) ||
(phase_info == (MemoryInfo *) NULL))
{
if (phase_info != (MemoryInfo *) NULL)
phase_info=RelinquishVirtualMemory(phase_info);
if (magnitude_info == (MemoryInfo *) NULL)
magnitude_info=RelinquishVirtualMemory(magnitude_info);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
magnitude_pixels=(double *) GetVirtualMemoryBlob(magnitude_info);
phase_pixels=(double *) GetVirtualMemoryBlob(phase_info);
status=ForwardFourierTransform(&fourier_info,image,magnitude_pixels,
phase_pixels,exception);
if (status != MagickFalse)
status=ForwardFourier(&fourier_info,fourier_image,magnitude_pixels,
phase_pixels,exception);
phase_info=RelinquishVirtualMemory(phase_info);
magnitude_info=RelinquishVirtualMemory(magnitude_info);
return(status);
}
#endif
MagickExport Image *ForwardFourierTransformImage(const Image *image,
const MagickBooleanType modulus,ExceptionInfo *exception)
{
Image
*fourier_image;
fourier_image=NewImageList();
#if !defined(MAGICKCORE_FFTW_DELEGATE)
(void) modulus;
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn","`%s' (FFTW)",
image->filename);
#else
{
Image
*magnitude_image;
size_t
extent,
height,
width;
width=image->columns;
height=image->rows;
if ((image->columns != image->rows) || ((image->columns % 2) != 0) ||
((image->rows % 2) != 0))
{
extent=image->columns < image->rows ? image->rows : image->columns;
width=(extent & 0x01) == 1 ? extent+1UL : extent;
}
height=width;
magnitude_image=CloneImage(image,width,height,MagickTrue,exception);
if (magnitude_image != (Image *) NULL)
{
Image
*phase_image;
magnitude_image->storage_class=DirectClass;
magnitude_image->depth=32UL;
phase_image=CloneImage(image,width,height,MagickTrue,exception);
if (phase_image == (Image *) NULL)
magnitude_image=DestroyImage(magnitude_image);
else
{
MagickBooleanType
is_gray,
status;
phase_image->storage_class=DirectClass;
phase_image->depth=32UL;
AppendImageToList(&fourier_image,magnitude_image);
AppendImageToList(&fourier_image,phase_image);
status=MagickTrue;
is_gray=IsGrayImage(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel sections
#endif
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
if (is_gray != MagickFalse)
thread_status=ForwardFourierTransformChannel(image,
GrayChannels,modulus,fourier_image,exception);
else
thread_status=ForwardFourierTransformChannel(image,RedChannel,
modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
thread_status=MagickTrue;
if (is_gray == MagickFalse)
thread_status=ForwardFourierTransformChannel(image,
GreenChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
thread_status=MagickTrue;
if (is_gray == MagickFalse)
thread_status=ForwardFourierTransformChannel(image,
BlueChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
thread_status=MagickTrue;
if (image->matte != MagickFalse)
thread_status=ForwardFourierTransformChannel(image,
OpacityChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
thread_status=MagickTrue;
if (image->colorspace == CMYKColorspace)
thread_status=ForwardFourierTransformChannel(image,
IndexChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
}
if (status == MagickFalse)
fourier_image=DestroyImageList(fourier_image);
fftw_cleanup();
}
}
}
#endif
return(fourier_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I n v e r s e F o u r i e r T r a n s f o r m I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% InverseFourierTransformImage() implements the inverse discrete Fourier
% transform (DFT) of the image either as a magnitude / phase or real /
% imaginary image pair.
%
% The format of the InverseFourierTransformImage method is:
%
% Image *InverseFourierTransformImage(const Image *magnitude_image,
% const Image *phase_image,const MagickBooleanType modulus,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o magnitude_image: the magnitude or real image.
%
% o phase_image: the phase or imaginary image.
%
% o modulus: if true, return transform as a magnitude / phase pair
% otherwise a real / imaginary image pair.
%
% o exception: return any errors or warnings in this structure.
%
*/
#if defined(MAGICKCORE_FFTW_DELEGATE)
static MagickBooleanType InverseQuadrantSwap(const size_t width,
const size_t height,const double *source,double *destination)
{
register ssize_t
x;
ssize_t
center,
y;
/*
Swap quadrants.
*/
center=(ssize_t) floor((double) width/2L)+1L;
for (y=1L; y < (ssize_t) height; y++)
for (x=0L; x < (ssize_t) (width/2L+1L); x++)
destination[(height-y)*center-x+width/2L]=source[y*width+x];
for (y=0L; y < (ssize_t) height; y++)
destination[y*center]=source[y*width+width/2L];
for (x=0L; x < center; x++)
destination[x]=source[center-x-1L];
return(RollFourier(center,height,0L,(ssize_t) height/-2L,destination));
}
static MagickBooleanType InverseFourier(FourierInfo *fourier_info,
const Image *magnitude_image,const Image *phase_image,
fftw_complex *fourier_pixels,ExceptionInfo *exception)
{
CacheView
*magnitude_view,
*phase_view;
double
*inverse_pixels,
*magnitude_pixels,
*phase_pixels;
MagickBooleanType
status;
MemoryInfo
*inverse_info,
*magnitude_info,
*phase_info;
register const IndexPacket
*indexes;
register const PixelPacket
*p;
register ssize_t
i,
x;
ssize_t
y;
/*
Inverse fourier - read image and break down into a double array.
*/
magnitude_info=AcquireVirtualMemory((size_t)fourier_info->height,
fourier_info->width*sizeof(*magnitude_pixels));
phase_info=AcquireVirtualMemory((size_t) fourier_info->height,
fourier_info->width*sizeof(*phase_pixels));
inverse_info=AcquireVirtualMemory((size_t) fourier_info->height,
fourier_info->center*sizeof(*inverse_pixels));
if ((magnitude_info == (MemoryInfo *) NULL) ||
(phase_info == (MemoryInfo *) NULL) ||
(inverse_info == (MemoryInfo *) NULL))
{
if (magnitude_info != (MemoryInfo *) NULL)
magnitude_info=RelinquishVirtualMemory(magnitude_info);
if (phase_info != (MemoryInfo *) NULL)
phase_info=RelinquishVirtualMemory(phase_info);
if (inverse_info != (MemoryInfo *) NULL)
inverse_info=RelinquishVirtualMemory(inverse_info);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
magnitude_image->filename);
return(MagickFalse);
}
magnitude_pixels=(double *) GetVirtualMemoryBlob(magnitude_info);
phase_pixels=(double *) GetVirtualMemoryBlob(phase_info);
inverse_pixels=(double *) GetVirtualMemoryBlob(inverse_info);
i=0L;
magnitude_view=AcquireVirtualCacheView(magnitude_image,exception);
for (y=0L; y < (ssize_t) fourier_info->height; y++)
{
p=GetCacheViewVirtualPixels(magnitude_view,0L,y,fourier_info->width,1UL,
exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetCacheViewAuthenticIndexQueue(magnitude_view);
for (x=0L; x < (ssize_t) fourier_info->width; x++)
{
switch (fourier_info->channel)
{
case RedChannel:
default:
{
magnitude_pixels[i]=QuantumScale*GetPixelRed(p);
break;
}
case GreenChannel:
{
magnitude_pixels[i]=QuantumScale*GetPixelGreen(p);
break;
}
case BlueChannel:
{
magnitude_pixels[i]=QuantumScale*GetPixelBlue(p);
break;
}
case OpacityChannel:
{
magnitude_pixels[i]=QuantumScale*GetPixelOpacity(p);
break;
}
case IndexChannel:
{
magnitude_pixels[i]=QuantumScale*GetPixelIndex(indexes+x);
break;
}
case GrayChannels:
{
magnitude_pixels[i]=QuantumScale*GetPixelGray(p);
break;
}
}
i++;
p++;
}
}
magnitude_view=DestroyCacheView(magnitude_view);
status=InverseQuadrantSwap(fourier_info->width,fourier_info->height,
magnitude_pixels,inverse_pixels);
(void) CopyMagickMemory(magnitude_pixels,inverse_pixels,fourier_info->height*
fourier_info->center*sizeof(*magnitude_pixels));
i=0L;
phase_view=AcquireVirtualCacheView(phase_image,exception);
for (y=0L; y < (ssize_t) fourier_info->height; y++)
{
p=GetCacheViewVirtualPixels(phase_view,0,y,fourier_info->width,1,
exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetCacheViewAuthenticIndexQueue(phase_view);
for (x=0L; x < (ssize_t) fourier_info->width; x++)
{
switch (fourier_info->channel)
{
case RedChannel:
default:
{
phase_pixels[i]=QuantumScale*GetPixelRed(p);
break;
}
case GreenChannel:
{
phase_pixels[i]=QuantumScale*GetPixelGreen(p);
break;
}
case BlueChannel:
{
phase_pixels[i]=QuantumScale*GetPixelBlue(p);
break;
}
case OpacityChannel:
{
phase_pixels[i]=QuantumScale*GetPixelOpacity(p);
break;
}
case IndexChannel:
{
phase_pixels[i]=QuantumScale*GetPixelIndex(indexes+x);
break;
}
case GrayChannels:
{
phase_pixels[i]=QuantumScale*GetPixelGray(p);
break;
}
}
i++;
p++;
}
}
if (fourier_info->modulus != MagickFalse)
{
i=0L;
for (y=0L; y < (ssize_t) fourier_info->height; y++)
for (x=0L; x < (ssize_t) fourier_info->width; x++)
{
phase_pixels[i]-=0.5;
phase_pixels[i]*=(2.0*MagickPI);
i++;
}
}
phase_view=DestroyCacheView(phase_view);
CorrectPhaseLHS(fourier_info->width,fourier_info->height,phase_pixels);
if (status != MagickFalse)
status=InverseQuadrantSwap(fourier_info->width,fourier_info->height,
phase_pixels,inverse_pixels);
(void) CopyMagickMemory(phase_pixels,inverse_pixels,fourier_info->height*
fourier_info->center*sizeof(*phase_pixels));
inverse_info=RelinquishVirtualMemory(inverse_info);
/*
Merge two sets.
*/
i=0L;
if (fourier_info->modulus != MagickFalse)
for (y=0L; y < (ssize_t) fourier_info->height; y++)
for (x=0L; x < (ssize_t) fourier_info->center; x++)
{
#if defined(MAGICKCORE_HAVE_COMPLEX_H)
fourier_pixels[i]=magnitude_pixels[i]*cos(phase_pixels[i])+I*
magnitude_pixels[i]*sin(phase_pixels[i]);
#else
fourier_pixels[i][0]=magnitude_pixels[i]*cos(phase_pixels[i]);
fourier_pixels[i][1]=magnitude_pixels[i]*sin(phase_pixels[i]);
#endif
i++;
}
else
for (y=0L; y < (ssize_t) fourier_info->height; y++)
for (x=0L; x < (ssize_t) fourier_info->center; x++)
{
#if defined(MAGICKCORE_HAVE_COMPLEX_H)
fourier_pixels[i]=magnitude_pixels[i]+I*phase_pixels[i];
#else
fourier_pixels[i][0]=magnitude_pixels[i];
fourier_pixels[i][1]=phase_pixels[i];
#endif
i++;
}
magnitude_info=RelinquishVirtualMemory(magnitude_info);
phase_info=RelinquishVirtualMemory(phase_info);
return(status);
}
static MagickBooleanType InverseFourierTransform(FourierInfo *fourier_info,
fftw_complex *fourier_pixels,Image *image,ExceptionInfo *exception)
{
CacheView
*image_view;
double
*source_pixels;
const char
*value;
fftw_plan
fftw_c2r_plan;
MemoryInfo
*source_info;
register IndexPacket
*indexes;
register PixelPacket
*q;
register ssize_t
i,
x;
ssize_t
y;
source_info=AcquireVirtualMemory((size_t) fourier_info->height,
fourier_info->width*sizeof(*source_pixels));
if (source_info == (MemoryInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
source_pixels=(double *) GetVirtualMemoryBlob(source_info);
value=GetImageArtifact(image,"fourier:normalize");
if (LocaleCompare(value,"inverse") == 0)
{
double
gamma;
/*
Normalize Fourier transform.
*/
i=0L;
gamma=PerceptibleReciprocal((double) fourier_info->width*
fourier_info->height);
for (y=0L; y < (ssize_t) fourier_info->height; y++)
for (x=0L; x < (ssize_t) fourier_info->center; x++)
{
#if defined(MAGICKCORE_HAVE_COMPLEX_H)
fourier_pixels[i]*=gamma;
#else
fourier_pixels[i][0]*=gamma;
fourier_pixels[i][1]*=gamma;
#endif
i++;
}
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_InverseFourierTransform)
#endif
{
fftw_c2r_plan=fftw_plan_dft_c2r_2d(fourier_info->width,fourier_info->height,
fourier_pixels,source_pixels,FFTW_ESTIMATE);
fftw_execute(fftw_c2r_plan);
fftw_destroy_plan(fftw_c2r_plan);
}
i=0L;
image_view=AcquireAuthenticCacheView(image,exception);
for (y=0L; y < (ssize_t) fourier_info->height; y++)
{
if (y >= (ssize_t) image->rows)
break;
q=GetCacheViewAuthenticPixels(image_view,0L,y,fourier_info->width >
image->columns ? image->columns : fourier_info->width,1UL,exception);
if (q == (PixelPacket *) NULL)
break;
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0L; x < (ssize_t) fourier_info->width; x++)
{
if (x < (ssize_t) image->columns)
switch (fourier_info->channel)
{
case RedChannel:
default:
{
SetPixelRed(q,ClampToQuantum(QuantumRange*source_pixels[i]));
break;
}
case GreenChannel:
{
SetPixelGreen(q,ClampToQuantum(QuantumRange*source_pixels[i]));
break;
}
case BlueChannel:
{
SetPixelBlue(q,ClampToQuantum(QuantumRange*source_pixels[i]));
break;
}
case OpacityChannel:
{
SetPixelOpacity(q,ClampToQuantum(QuantumRange*source_pixels[i]));
break;
}
case IndexChannel:
{
SetPixelIndex(indexes+x,ClampToQuantum(QuantumRange*
source_pixels[i]));
break;
}
case GrayChannels:
{
SetPixelGray(q,ClampToQuantum(QuantumRange*source_pixels[i]));
break;
}
}
i++;
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
break;
}
image_view=DestroyCacheView(image_view);
source_info=RelinquishVirtualMemory(source_info);
return(MagickTrue);
}
static MagickBooleanType InverseFourierTransformChannel(
const Image *magnitude_image,const Image *phase_image,
const ChannelType channel,const MagickBooleanType modulus,
Image *fourier_image,ExceptionInfo *exception)
{
fftw_complex
*inverse_pixels;
FourierInfo
fourier_info;
MagickBooleanType
status;
MemoryInfo
*inverse_info;
size_t
extent;
fourier_info.width=magnitude_image->columns;
fourier_info.height=magnitude_image->rows;
if ((magnitude_image->columns != magnitude_image->rows) ||
((magnitude_image->columns % 2) != 0) ||
((magnitude_image->rows % 2) != 0))
{
extent=magnitude_image->columns < magnitude_image->rows ?
magnitude_image->rows : magnitude_image->columns;
fourier_info.width=(extent & 0x01) == 1 ? extent+1UL : extent;
}
fourier_info.height=fourier_info.width;
fourier_info.center=(ssize_t) floor((double) fourier_info.width/2L)+1L;
fourier_info.channel=channel;
fourier_info.modulus=modulus;
inverse_info=AcquireVirtualMemory((size_t) fourier_info.height,
fourier_info.center*sizeof(*inverse_pixels));
if (inverse_info == (MemoryInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
magnitude_image->filename);
return(MagickFalse);
}
inverse_pixels=(fftw_complex *) GetVirtualMemoryBlob(inverse_info);
status=InverseFourier(&fourier_info,magnitude_image,phase_image,
inverse_pixels,exception);
if (status != MagickFalse)
status=InverseFourierTransform(&fourier_info,inverse_pixels,fourier_image,
exception);
inverse_info=RelinquishVirtualMemory(inverse_info);
return(status);
}
#endif
MagickExport Image *InverseFourierTransformImage(const Image *magnitude_image,
const Image *phase_image,const MagickBooleanType modulus,
ExceptionInfo *exception)
{
Image
*fourier_image;
assert(magnitude_image != (Image *) NULL);
assert(magnitude_image->signature == MagickSignature);
if (magnitude_image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
magnitude_image->filename);
if (phase_image == (Image *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),ImageError,
"ImageSequenceRequired","`%s'",magnitude_image->filename);
return((Image *) NULL);
}
#if !defined(MAGICKCORE_FFTW_DELEGATE)
fourier_image=(Image *) NULL;
(void) modulus;
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn","`%s' (FFTW)",
magnitude_image->filename);
#else
{
fourier_image=CloneImage(magnitude_image,magnitude_image->columns,
magnitude_image->rows,MagickTrue,exception);
if (fourier_image != (Image *) NULL)
{
MagickBooleanType
is_gray,
status;
status=MagickTrue;
is_gray=IsGrayImage(magnitude_image,exception);
if (is_gray != MagickFalse)
is_gray=IsGrayImage(phase_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel sections
#endif
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
if (is_gray != MagickFalse)
thread_status=InverseFourierTransformChannel(magnitude_image,
phase_image,GrayChannels,modulus,fourier_image,exception);
else
thread_status=InverseFourierTransformChannel(magnitude_image,
phase_image,RedChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
thread_status=MagickTrue;
if (is_gray == MagickFalse)
thread_status=InverseFourierTransformChannel(magnitude_image,
phase_image,GreenChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
thread_status=MagickTrue;
if (is_gray == MagickFalse)
thread_status=InverseFourierTransformChannel(magnitude_image,
phase_image,BlueChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
thread_status=MagickTrue;
if (magnitude_image->matte != MagickFalse)
thread_status=InverseFourierTransformChannel(magnitude_image,
phase_image,OpacityChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp section
#endif
{
MagickBooleanType
thread_status;
thread_status=MagickTrue;
if (magnitude_image->colorspace == CMYKColorspace)
thread_status=InverseFourierTransformChannel(magnitude_image,
phase_image,IndexChannel,modulus,fourier_image,exception);
if (thread_status == MagickFalse)
status=thread_status;
}
}
if (status == MagickFalse)
fourier_image=DestroyImage(fourier_image);
}
fftw_cleanup();
}
#endif
return(fourier_image);
}
|
3d7pt.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 16;
tile_size[1] = 16;
tile_size[2] = 4;
tile_size[3] = 32;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,8);t1++) {
lbp=max(ceild(t1,2),ceild(16*t1-Nt+3,16));
ubp=min(floord(Nt+Nz-4,16),floord(8*t1+Nz+5,16));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(16*t2-Nz,4)),2*t1);t3<=min(min(min(floord(Nt+Ny-4,4),floord(8*t1+Ny+13,4)),floord(16*t2+Ny+12,4)),floord(16*t1-16*t2+Nz+Ny+11,4));t3++) {
for (t4=max(max(max(0,ceild(t1-3,4)),ceild(16*t2-Nz-28,32)),ceild(4*t3-Ny-28,32));t4<=min(min(min(min(floord(4*t3+Nx,32),floord(Nt+Nx-4,32)),floord(8*t1+Nx+13,32)),floord(16*t2+Nx+12,32)),floord(16*t1-16*t2+Nz+Nx+11,32));t4++) {
for (t5=max(max(max(max(max(0,8*t1),16*t1-16*t2+1),16*t2-Nz+2),4*t3-Ny+2),32*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,8*t1+15),16*t2+14),4*t3+2),32*t4+30),16*t1-16*t2+Nz+13);t5++) {
for (t6=max(max(16*t2,t5+1),-16*t1+16*t2+2*t5-15);t6<=min(min(16*t2+15,-16*t1+16*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(4*t3,t5+1);t7<=min(4*t3+3,t5+Ny-2);t7++) {
lbv=max(32*t4,t5+1);
ubv=min(32*t4+31,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
DataGen.h | // Copyright (C) 2019-2020 Zilliz. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed under the License
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
// or implied. See the License for the specific language governing permissions and limitations under the License
#pragma once
#include <boost/algorithm/string/predicate.hpp>
#include <cstring>
#include <memory>
#include <random>
#include "google/protobuf/text_format.h"
#include <knowhere/index/vector_index/VecIndex.h>
#include <knowhere/index/vector_index/adapter/VectorAdapter.h>
#include <knowhere/index/vector_index/VecIndexFactory.h>
#include <knowhere/index/vector_index/IndexIVF.h>
#include "Constants.h"
#include "common/Schema.h"
#include "query/SearchOnIndex.h"
#include "segcore/SegmentGrowingImpl.h"
#include "segcore/SegmentSealedImpl.h"
#include "segcore/Utils.h"
#include "index/ScalarIndexSort.h"
#include "index/StringIndexSort.h"
using boost::algorithm::starts_with;
namespace milvus::segcore {
struct GeneratedData {
std::vector<idx_t> row_ids_;
std::vector<Timestamp> timestamps_;
InsertData* raw_;
std::vector<FieldId> field_ids;
SchemaPtr schema_;
template <typename T>
std::vector<T>
get_col(FieldId field_id) const {
std::vector<T> ret(raw_->num_rows());
for (auto target_field_data : raw_->fields_data()) {
if (field_id.get() != target_field_data.field_id()) {
continue;
}
auto& field_meta = schema_->operator[](field_id);
if (field_meta.is_vector()) {
if (field_meta.get_data_type() == DataType::VECTOR_FLOAT) {
int len = raw_->num_rows() * field_meta.get_dim();
ret.resize(len);
auto src_data =
reinterpret_cast<const T*>(target_field_data.vectors().float_vector().data().data());
std::copy_n(src_data, len, ret.data());
} else if (field_meta.get_data_type() == DataType::VECTOR_BINARY) {
int len = raw_->num_rows() * (field_meta.get_dim() / 8);
ret.resize(len);
auto src_data = reinterpret_cast<const T*>(target_field_data.vectors().binary_vector().data());
std::copy_n(src_data, len, ret.data());
} else {
PanicInfo("unsupported");
}
return std::move(ret);
}
switch (field_meta.get_data_type()) {
case DataType::BOOL: {
auto src_data = reinterpret_cast<const T*>(target_field_data.scalars().bool_data().data().data());
std::copy_n(src_data, raw_->num_rows(), ret.data());
break;
}
case DataType::INT8:
case DataType::INT16:
case DataType::INT32: {
auto src_data =
reinterpret_cast<const int32_t*>(target_field_data.scalars().int_data().data().data());
std::copy_n(src_data, raw_->num_rows(), ret.data());
break;
}
case DataType::INT64: {
auto src_data = reinterpret_cast<const T*>(target_field_data.scalars().long_data().data().data());
std::copy_n(src_data, raw_->num_rows(), ret.data());
break;
}
case DataType::FLOAT: {
auto src_data = reinterpret_cast<const T*>(target_field_data.scalars().float_data().data().data());
std::copy_n(src_data, raw_->num_rows(), ret.data());
break;
}
case DataType::DOUBLE: {
auto src_data = reinterpret_cast<const T*>(target_field_data.scalars().double_data().data().data());
std::copy_n(src_data, raw_->num_rows(), ret.data());
break;
}
case DataType::VARCHAR: {
auto ret_data = reinterpret_cast<std::string*>(ret.data());
auto src_data = target_field_data.scalars().string_data().data();
std::copy(src_data.begin(), src_data.end(), ret_data);
break;
}
default: {
PanicInfo("unsupported");
}
}
}
return std::move(ret);
}
std::unique_ptr<DataArray>
get_col(FieldId field_id) const {
for (auto target_field_data : raw_->fields_data()) {
if (field_id.get() == target_field_data.field_id()) {
return std::make_unique<DataArray>(target_field_data);
}
}
PanicInfo("field id not find");
}
private:
GeneratedData() = default;
friend GeneratedData
DataGen(SchemaPtr schema, int64_t N, uint64_t seed, uint64_t ts_offset, int repeat_count);
};
inline GeneratedData
DataGen(SchemaPtr schema, int64_t N, uint64_t seed = 42, uint64_t ts_offset = 0, int repeat_count = 1) {
using std::vector;
std::default_random_engine er(seed);
std::normal_distribution<> distr(0, 1);
int offset = 0;
auto insert_data = std::make_unique<InsertData>();
auto insert_cols = [&insert_data](auto& data, int64_t count, auto& field_meta) {
auto array = milvus::segcore::CreateDataArrayFrom(data.data(), count, field_meta);
insert_data->mutable_fields_data()->AddAllocated(array.release());
};
for (auto field_id : schema->get_field_ids()) {
auto field_meta = schema->operator[](field_id);
switch (field_meta.get_data_type()) {
case DataType::VECTOR_FLOAT: {
auto dim = field_meta.get_dim();
vector<float> final(dim * N);
bool is_ip = starts_with(field_meta.get_name().get(), "normalized");
#pragma omp parallel for
for (int n = 0; n < N; ++n) {
vector<float> data(dim);
float sum = 0;
std::default_random_engine er2(seed + n);
std::normal_distribution<> distr2(0, 1);
for (auto& x : data) {
x = distr2(er2) + offset;
sum += x * x;
}
if (is_ip) {
sum = sqrt(sum);
for (auto& x : data) {
x /= sum;
}
}
std::copy(data.begin(), data.end(), final.begin() + dim * n);
}
insert_cols(final, N, field_meta);
break;
}
case DataType::VECTOR_BINARY: {
auto dim = field_meta.get_dim();
Assert(dim % 8 == 0);
vector<uint8_t> data(dim / 8 * N);
for (auto& x : data) {
x = er();
}
insert_cols(data, N, field_meta);
break;
}
case DataType::INT64: {
vector<int64_t> data(N);
for (int i = 0; i < N; i++) {
data[i] = i / repeat_count;
}
insert_cols(data, N, field_meta);
break;
}
case DataType::INT32: {
vector<int> data(N);
for (auto& x : data) {
x = er() % (2 * N);
}
insert_cols(data, N, field_meta);
break;
}
case DataType::INT16: {
vector<int16_t> data(N);
for (auto& x : data) {
x = er() % (2 * N);
}
insert_cols(data, N, field_meta);
break;
}
case DataType::INT8: {
vector<int8_t> data(N);
for (auto& x : data) {
x = er() % (2 * N);
}
insert_cols(data, N, field_meta);
break;
}
case DataType::FLOAT: {
vector<float> data(N);
for (auto& x : data) {
x = distr(er);
}
insert_cols(data, N, field_meta);
break;
}
case DataType::DOUBLE: {
vector<double> data(N);
for (auto& x : data) {
x = distr(er);
}
insert_cols(data, N, field_meta);
break;
}
case DataType::VARCHAR: {
vector<std::string> data(N);
for (int i = 0; i < N / repeat_count; i++) {
auto str = std::to_string(er());
for (int j = 0; j < repeat_count; j++) {
data[i * repeat_count + j] = str;
}
}
insert_cols(data, N, field_meta);
break;
}
default: {
throw std::runtime_error("unimplemented");
}
}
++offset;
}
GeneratedData res;
res.schema_ = schema;
res.raw_ = insert_data.release();
res.raw_->set_num_rows(N);
for (int i = 0; i < N; ++i) {
res.row_ids_.push_back(i);
res.timestamps_.push_back(i + ts_offset);
}
return res;
}
inline auto
CreatePlaceholderGroup(int64_t num_queries, int dim, int64_t seed = 42) {
namespace ser = milvus::proto::common;
ser::PlaceholderGroup raw_group;
auto value = raw_group.add_placeholders();
value->set_tag("$0");
value->set_type(ser::PlaceholderType::FloatVector);
std::normal_distribution<double> dis(0, 1);
std::default_random_engine e(seed);
for (int i = 0; i < num_queries; ++i) {
std::vector<float> vec;
for (int d = 0; d < dim; ++d) {
vec.push_back(dis(e));
}
// std::string line((char*)vec.data(), (char*)vec.data() + vec.size() * sizeof(float));
value->add_values(vec.data(), vec.size() * sizeof(float));
}
return raw_group;
}
inline auto
CreatePlaceholderGroupFromBlob(int64_t num_queries, int dim, const float* src) {
namespace ser = milvus::proto::common;
ser::PlaceholderGroup raw_group;
auto value = raw_group.add_placeholders();
value->set_tag("$0");
value->set_type(ser::PlaceholderType::FloatVector);
int64_t src_index = 0;
for (int i = 0; i < num_queries; ++i) {
std::vector<float> vec;
for (int d = 0; d < dim; ++d) {
vec.push_back(src[src_index++]);
}
// std::string line((char*)vec.data(), (char*)vec.data() + vec.size() * sizeof(float));
value->add_values(vec.data(), vec.size() * sizeof(float));
}
return raw_group;
}
inline auto
CreateBinaryPlaceholderGroup(int64_t num_queries, int64_t dim, int64_t seed = 42) {
assert(dim % 8 == 0);
namespace ser = milvus::proto::common;
ser::PlaceholderGroup raw_group;
auto value = raw_group.add_placeholders();
value->set_tag("$0");
value->set_type(ser::PlaceholderType::BinaryVector);
std::default_random_engine e(seed);
for (int i = 0; i < num_queries; ++i) {
std::vector<uint8_t> vec;
for (int d = 0; d < dim / 8; ++d) {
vec.push_back(e());
}
// std::string line((char*)vec.data(), (char*)vec.data() + vec.size() * sizeof(float));
value->add_values(vec.data(), vec.size());
}
return raw_group;
}
inline auto
CreateBinaryPlaceholderGroupFromBlob(int64_t num_queries, int64_t dim, const uint8_t* ptr) {
assert(dim % 8 == 0);
namespace ser = milvus::proto::common;
ser::PlaceholderGroup raw_group;
auto value = raw_group.add_placeholders();
value->set_tag("$0");
value->set_type(ser::PlaceholderType::BinaryVector);
for (int i = 0; i < num_queries; ++i) {
std::vector<uint8_t> vec;
for (int d = 0; d < dim / 8; ++d) {
vec.push_back(*ptr);
++ptr;
}
// std::string line((char*)vec.data(), (char*)vec.data() + vec.size() * sizeof(float));
value->add_values(vec.data(), vec.size());
}
return raw_group;
}
inline json
SearchResultToJson(const SearchResult& sr) {
int64_t num_queries = sr.total_nq_;
int64_t topk = sr.unity_topK_;
std::vector<std::vector<std::string>> results;
for (int q = 0; q < num_queries; ++q) {
std::vector<std::string> result;
for (int k = 0; k < topk; ++k) {
int index = q * topk + k;
result.emplace_back(std::to_string(sr.seg_offsets_[index]) + "->" + std::to_string(sr.distances_[index]));
}
results.emplace_back(std::move(result));
}
return json{results};
};
inline void
SealedLoadFieldData(const GeneratedData& dataset, SegmentSealed& seg) {
auto row_count = dataset.row_ids_.size();
{
LoadFieldDataInfo info;
FieldMeta field_meta(FieldName("RowID"), RowFieldID, DataType::INT64);
auto array = CreateScalarDataArrayFrom(dataset.row_ids_.data(), row_count, field_meta);
info.field_data = array.release();
info.row_count = dataset.row_ids_.size();
info.field_id = RowFieldID.get(); // field id for RowId
seg.LoadFieldData(info);
}
{
LoadFieldDataInfo info;
FieldMeta field_meta(FieldName("Timestamp"), TimestampFieldID, DataType::INT64);
auto array = CreateScalarDataArrayFrom(dataset.timestamps_.data(), row_count, field_meta);
info.field_data = array.release();
info.row_count = dataset.timestamps_.size();
info.field_id = TimestampFieldID.get();
seg.LoadFieldData(info);
}
for (auto field_data : dataset.raw_->fields_data()) {
LoadFieldDataInfo info;
info.field_id = field_data.field_id();
info.row_count = row_count;
info.field_data = &field_data;
seg.LoadFieldData(info);
}
}
inline std::unique_ptr<SegmentSealed>
SealedCreator(SchemaPtr schema, const GeneratedData& dataset) {
auto segment = CreateSealedSegment(schema);
SealedLoadFieldData(dataset, *segment);
return segment;
}
inline knowhere::VecIndexPtr
GenVecIndexing(int64_t N, int64_t dim, const float* vec) {
// {knowhere::IndexParams::nprobe, 10},
auto conf = knowhere::Config{{knowhere::meta::DIM, dim},
{knowhere::IndexParams::nlist, 1024},
{knowhere::Metric::TYPE, knowhere::Metric::L2},
{knowhere::meta::DEVICEID, 0}};
auto database = knowhere::GenDataset(N, dim, vec);
auto indexing = std::make_shared<knowhere::IVF>();
indexing->Train(database, conf);
indexing->AddWithoutIds(database, conf);
return indexing;
}
template <typename T>
inline scalar::IndexBasePtr
GenScalarIndexing(int64_t N, const T* data) {
if constexpr (std::is_same_v<T, std::string>) {
auto indexing = scalar::CreateStringIndexSort();
indexing->Build(N, data);
return indexing;
} else {
auto indexing = scalar::CreateScalarIndexSort<T>();
indexing->Build(N, data);
return indexing;
}
}
inline std::vector<char>
translate_text_plan_to_binary_plan(const char* text_plan) {
proto::plan::PlanNode plan_node;
auto ok = google::protobuf::TextFormat::ParseFromString(text_plan, &plan_node);
AssertInfo(ok, "Failed to parse");
std::string binary_plan;
plan_node.SerializeToString(&binary_plan);
std::vector<char> ret;
ret.resize(binary_plan.size());
std::memcpy(ret.data(), binary_plan.c_str(), binary_plan.size());
return ret;
}
} // namespace milvus::segcore
|
20_omp_priv_combi_nested.c | // clang-format off
// RUN: %c-to-llvm -fno-discard-value-names %omp_c_flags %s | %apply-typeart -typeart-alloca -call-filter -typeart-filter-pointer-alloca=false -S 2>&1 | %filecheck %s
// RUN: %c-to-llvm -fno-discard-value-names %omp_c_flags %s | %opt -O2 -S | %apply-typeart -typeart-alloca -call-filter -typeart-filter-pointer-alloca=false -S 2>&1 | %filecheck %s --check-prefix=check-opt
// RUN: %c-to-llvm -fno-discard-value-names %omp_c_flags %s | %apply-typeart -typeart-alloca -call-filter -typeart-filter-pointer-alloca=false -S | %filecheck %s --check-prefix=check-inst
// RUN: %c-to-llvm -fno-discard-value-names %omp_c_flags %s | %opt -O2 -S | %apply-typeart -typeart-alloca -call-filter -typeart-filter-pointer-alloca=false -S | %filecheck %s --check-prefix=check-opt-inst
// REQUIRES: openmp
// clang-format on
#include "omp.h"
// NOTE: with opt, the compiler passes the address until the MPI_Send, hence
// only the initial allocation is tracked.
extern void MPI_Send(void*, int);
void func(int* x, int* e) {
// check-inst: define {{.*}} @func
// check-inst-NOT: call void @__typeart_alloc_stack
// check-opt-inst: define {{.*}} @func
// check-opt-inst-NOT: call void @__typeart_alloc_stack
// check-inst: define {{.*}} @.omp_outlined
// check-inst: call void @__typeart_alloc_stack_omp(i8* %0, i32 10, i64 1)
// check-opt-inst: define {{.*}} @.omp_outlined
// check-opt-inst-NOT: call void @__typeart_alloc_stack_omp
#pragma omp parallel for firstprivate(x), lastprivate(x), shared(e)
for (int i = 0; i < 10; ++i) {
// Analysis should not filter x, but e...
MPI_Send((void*)x, *e);
}
}
void foo() {
// check-inst: define {{.*}} @foo
// check-inst: call void @__typeart_alloc_stack(i8* %0, i32 2, i64 1)
// check-opt-inst: define {{.*}} @foo
// check-opt-inst: call void @__typeart_alloc_stack(i8* %0, i32 2, i64 1)
int x = 1;
int y = 2;
#pragma omp parallel
{ func(&x, &y); }
}
void func_other(int* x, int* e) {
// check-inst: define {{.*}} @func_other
// check-inst-NOT: call void @__typeart_alloc_stack
// check-opt-inst: define {{.*}} @func_other
// check-opt-inst-NOT: call void @__typeart_alloc_stack
// check-inst: define {{.*}} @.omp_outlined
// check-inst: call void @__typeart_alloc_stack_omp(i8* %0, i32 10, i64 1)
// check-opt-inst: define {{.*}} @.omp_outlined
// check-opt-inst-NOT: call void @__typeart_alloc_stack_omp
#pragma omp parallel for firstprivate(x), lastprivate(x), shared(e)
for (int i = 0; i < 10; ++i) {
// Analysis should not filter x, but e...
MPI_Send(x, *e);
}
MPI_Send(x, *e);
}
void bar(int x_other) {
// check-inst: define {{.*}} @bar
// check-inst: call void @__typeart_alloc_stack(i8* %0, i32 2, i64 1)
// check-opt-inst: define {{.*}} @bar
// check-opt-inst: call void @__typeart_alloc_stack(i8* %0, i32 2, i64 1)
int x = x_other;
int y = 2;
#pragma omp parallel
{ func_other(&x, &y); }
}
// CHECK: TypeArtPass [Heap & Stack]
// CHECK-NEXT: Malloc : 0
// CHECK-NEXT: Free : 0
// CHECK-NEXT: Alloca : 4
// CHECK-NEXT: Global : 0
// check-opt: TypeArtPass [Heap & Stack]
// check-opt: Malloc : 0
// check-opt: Free : 0
// check-opt: Alloca : 2
// check-opt: Global : 0 |
omp_app.c | /* for affinity functions */
#define _GNU_SOURCE
#include <sched.h>
#include <stdlib.h>
#include <unistd.h>
#include <assert.h>
#include <omp.h>
#include <stdio.h>
void debug_affinity(void);
int main(int argc, char **argv) {
printf("omp_num_procs: %d (available cpus)\n", omp_get_num_procs());
printf("omp_max_threads: %d (allowed threads)\n", omp_get_max_threads());
printf("omp_num_threads: %d (threads in current block)\n", omp_get_num_threads());
printf("omp_thread_num: %d (id of main thread)\n", omp_get_thread_num());
debug_affinity();
#pragma omp parallel
printf("%d/%d thread ready\n", omp_get_thread_num(), omp_get_num_procs());
return 0;
}
void debug_affinity(void) {
cpu_set_t *cs;
int count, size, i, first;
cs = CPU_ALLOC(CPU_SETSIZE);
assert(cs != NULL);
size = CPU_ALLOC_SIZE(CPU_SETSIZE);
CPU_ZERO_S(size, cs);
sched_getaffinity(0, size, cs);
count = CPU_COUNT(cs);
first = 1;
printf("cpu affinity (%d count): ", count);
for (i = 0; i < CPU_SETSIZE; ++i) {
if (CPU_ISSET(i, cs)) {
if (!first)
printf(",");
printf("%d", i);
first = 0;
}
}
printf("\n");
CPU_FREE(cs);
}
|
3d7pt_var.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 4;
tile_size[1] = 4;
tile_size[2] = 4;
tile_size[3] = 64;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,2);t1++) {
lbp=max(ceild(t1,2),ceild(4*t1-Nt+3,4));
ubp=min(floord(Nt+Nz-4,4),floord(2*t1+Nz-1,4));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(t1-1,2)),ceild(4*t2-Nz,4));t3<=min(min(min(floord(4*t2+Ny,4),floord(Nt+Ny-4,4)),floord(2*t1+Ny+1,4)),floord(4*t1-4*t2+Nz+Ny-1,4));t3++) {
for (t4=max(max(max(0,ceild(t1-31,32)),ceild(4*t2-Nz-60,64)),ceild(4*t3-Ny-60,64));t4<=min(min(min(min(floord(4*t2+Nx,64),floord(4*t3+Nx,64)),floord(Nt+Nx-4,64)),floord(2*t1+Nx+1,64)),floord(4*t1-4*t2+Nz+Nx-1,64));t4++) {
for (t5=max(max(max(max(max(0,2*t1),4*t1-4*t2+1),4*t2-Nz+2),4*t3-Ny+2),64*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,2*t1+3),4*t2+2),4*t3+2),64*t4+62),4*t1-4*t2+Nz+1);t5++) {
for (t6=max(max(4*t2,t5+1),-4*t1+4*t2+2*t5-3);t6<=min(min(4*t2+3,-4*t1+4*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(4*t3,t5+1);t7<=min(4*t3+3,t5+Ny-2);t7++) {
lbv=max(64*t4,t5+1);
ubv=min(64*t4+63,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = (((((((coef[0][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (coef[1][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)])) + (coef[2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)])) + (coef[3][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1])) + (coef[4][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)])) + (coef[5][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)])) + (coef[6][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1]));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
private-clause.c | /*
* private-clause.c
*
* Created on: 02/04/2014
* Author: Carlos de la Torre
*/
#include <stdio.h>
#ifdef _OPENMP
#include <omp.h>
#else
#define omp_get_thread_num() 0
#endif
int main() {
int i, n = 7;
int a[n], suma=0;
for (i = 0; i < n; i++)
a[i] = i;
#pragma omp parallel private(suma)
{
suma = 0;
#pragma omp for
for (i = 0; i < n; i++) {
suma += a[i];
printf("thread %d suma a[%d] / ", omp_get_thread_num(), i);
}
printf("\n* thread %d suma= %d", omp_get_thread_num(), suma);
}
printf("\nSuma final: %d\n",suma);
return 0;
}
|
a.35.6.c | /* { dg-do compile } */
void
wrong6 (int n)
{
#pragma omp parallel
{
#pragma omp single
{
work (n, 0);
/* incorrect nesting of barrier region in a single region */
#pragma omp barrier
work (n, 1);
}
}
}
|
broadcast_reduce_customized-inl.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2015-2017 by Contributors
* \file broadcast_reduce_customized-inl.h
* \brief CPU-specific Function definition of broadcast and reduce operators
*/
#ifndef MXNET_OPERATOR_NUMPY_LINALG_BROADCAST_REDUCE_CUSTOMIZED_INL_H_
#define MXNET_OPERATOR_NUMPY_LINALG_BROADCAST_REDUCE_CUSTOMIZED_INL_H_
#include "../../tensor/broadcast_reduce-inl.h"
namespace mxnet {
namespace op {
namespace broadcast {
using namespace mshadow;
using mxnet_op::unravel;
using mxnet_op::ravel;
using mxnet_op::dot;
using mxnet_op::unravel_dot;
template<typename Reducer, int ndim, typename AType, typename DType, typename OType, typename OP>
MSHADOW_XINLINE void seq_reduce_assign_wr(const index_t idx, const size_t M, const bool addto,
const DType* __restrict big, OType *small,
const Shape<ndim>& bshape, const Shape<ndim>& sshape,
const Shape<ndim>& rshape, const Shape<ndim>& rstride,
Reducer* reducer) {
Shape<ndim> coord = unravel(idx, sshape);
index_t j = ravel(coord, bshape);
AType val, residual;
reducer->SetInitValue(val, residual);
for (size_t k = 0; k < M; ++k) {
coord = unravel(k, rshape);
reducer->Reduce(val, AType(OP::Map(big[j + dot(coord, rstride)])), residual);
}
reducer->Finalize(val, residual);
assign(&small[idx], addto, OType(val));
}
#ifdef __CUDACC__
#include "broadcast_reduce_customized-inl.cuh"
#include "../../tensor/broadcast_reduce-inl.cuh"
#else
template<typename Reducer, int ndim, typename AType, typename DType, typename OType, typename OP>
void seq_reduce_compute_wr(const size_t N, const size_t M, const bool addto,
const DType *big, OType *small, const Shape<ndim> bshape,
const Shape<ndim> sshape, const Shape<ndim> rshape,
const Shape<ndim> rstride,
Reducer* reducer) {
#pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount())
for (index_t idx = 0; idx < static_cast<index_t>(N); ++idx) {
seq_reduce_assign_wr<Reducer, ndim, AType, DType, OType, OP>(idx, M, addto, big, small,
bshape, sshape, rshape, rstride, reducer);
}
}
template <typename Reducer, int ndim, typename DType, typename OP, bool safe_acc = false>
void ReduceWithReducer(Stream<cpu>* s, const TBlob& small, const OpReqType req,
const Tensor<cpu, 1, char>& workspace, const TBlob& big,
Reducer* reducer) {
if (req == kNullOp) return;
Shape<ndim> rshape, rstride;
diff(small.shape_.get<ndim>(), big.shape_.get<ndim>(), &rshape, &rstride);
size_t N = small.shape_.Size(), M = rshape.Size();
if (!safe_acc) {
seq_reduce_compute_wr<Reducer, ndim, DType, DType, DType, OP>(
N, M, req == kAddTo, big.dptr<DType>(), small.dptr<DType>(),
big.shape_.get<ndim>(), small.shape_.get<ndim>(), rshape, rstride, reducer);
} else {
MXNET_ACC_TYPE_SWITCH(mshadow::DataType<DType>::kFlag, DataType, AType, {
typedef typename std::conditional<safe_acc, AType, DataType>::type AccType;
MSHADOW_TYPE_SWITCH_WITH_BOOL(small.type_flag_, OType, {
typedef typename std::conditional<safe_acc, OType, DataType>::type OutType;
seq_reduce_compute_wr<Reducer, ndim, AccType, DataType, OutType, OP>(
N, M, req == kAddTo, big.dptr<DataType>(), small.dptr<OutType>(),
big.shape_.get<ndim>(), small.shape_.get<ndim>(), rshape, rstride, reducer);
});
});
}
}
template<typename Reducer, int ndim, typename DType, typename OP1, typename OP2>
MSHADOW_XINLINE void seq_reduce_assign_wr(const index_t idx, const size_t M, const bool addto,
const DType* __restrict big, const DType* __restrict lhs,
const DType* __restrict rhs, DType *small,
const Shape<ndim>& big_shape,
const Shape<ndim>& lhs_shape0,
const Shape<ndim>& rhs_shape0,
const Shape<ndim>& small_shape, const Shape<ndim>& rshape,
const Shape<ndim>& lhs_shape,
const Shape<ndim>& rhs_shape,
const Shape<ndim>& rstride, const Shape<ndim>& lhs_stride,
const Shape<ndim>& rhs_stride,
Reducer* reducer) {
Shape<ndim> coord = unravel(idx, small_shape);
const index_t idx_big0 = ravel(coord, big_shape);
const index_t idx_lhs0 = ravel(coord, lhs_shape0);
const index_t idx_rhs0 = ravel(coord, rhs_shape0);
DType val, residual;
reducer->SetInitValue(val, residual);
for (size_t k = 0; k < M; ++k) {
Shape<ndim> coord_big = unravel(k, rshape);
index_t idx_big = idx_big0 + dot(coord_big, rstride);
Shape<ndim> coord_lhs = unravel(k, lhs_shape);
index_t idx_lhs = idx_lhs0 + dot(coord_lhs, lhs_stride);
Shape<ndim> coord_rhs = unravel(k, rhs_shape);
index_t idx_rhs = idx_rhs0 + dot(coord_rhs, rhs_stride);
reducer->Reduce(val, OP1::Map(big[idx_big], OP2::Map(lhs[idx_lhs], rhs[idx_rhs])), residual);
}
reducer->Finalize(val, residual);
assign(&small[idx], addto, val);
}
template<typename Reducer, int ndim, typename DType, typename OP1, typename OP2>
void seq_reduce_compute_wr(const size_t N, const size_t M, const bool addto,
const DType *big, const DType *lhs, const DType *rhs, DType *small,
const Shape<ndim> big_shape, const Shape<ndim> small_shape,
const Shape<ndim> rshape, const Shape<ndim> rstride,
const Shape<ndim> lhs_shape, const Shape<ndim> lhs_stride,
const Shape<ndim> rhs_shape, const Shape<ndim> rhs_stride,
const Shape<ndim>& lhs_shape0, const Shape<ndim>& rhs_shape0,
Reducer* reducer) {
#pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount())
for (index_t idx = 0; idx < static_cast<index_t>(N); ++idx) {
seq_reduce_assign_wr<Reducer, ndim, DType, OP1, OP2>(idx, M, addto, big, lhs, rhs, small,
big_shape, lhs_shape0, rhs_shape0, small_shape, rshape, lhs_shape, rhs_shape, rstride,
lhs_stride, rhs_stride, reducer);
}
}
template<typename Reducer, int ndim, typename DType, typename OP1, typename OP2>
void ReduceWithReducer(Stream<cpu> *s, const TBlob& small, const OpReqType req,
const Tensor<cpu, 1, char>& workspace, const TBlob& big, const TBlob& lhs,
const TBlob& rhs, Reducer* reducer) {
if (req == kNullOp) return;
Shape<ndim> rshape, rstride;
diff(small.shape_.get<ndim>(), big.shape_.get<ndim>(), &rshape, &rstride);
size_t N = small.shape_.Size();
size_t M = rshape.Size();
Shape<ndim> lhs_shape, lhs_stride;
diff(small.shape_.get<ndim>(), lhs.shape_.get<ndim>(), &lhs_shape, &lhs_stride);
Shape<ndim> rhs_shape, rhs_stride;
diff(small.shape_.get<ndim>(), rhs.shape_.get<ndim>(), &rhs_shape, &rhs_stride);
seq_reduce_compute_wr<Reducer, ndim, DType, OP1, OP2>(
N, M, req == kAddTo,
big.dptr<DType>(), lhs.dptr<DType>(), rhs.dptr<DType>(), small.dptr<DType>(),
big.shape_.get<ndim>(), small.shape_.get<ndim>(),
rshape, rstride,
lhs_shape, lhs_stride,
rhs_shape, rhs_stride,
lhs.shape_.get<ndim>(), rhs.shape_.get<ndim>(),
reducer);
}
#endif
} // namespace broadcast
} // namespace op
} // namespace mxnet
#endif // MXNET_OPERATOR_NUMPY_LINALG_BROADCAST_REDUCE_CUSTOMIZED_INL_H_
|
target_array_extension.c | // --------------------------------------------------
// Check extends before
// --------------------------------------------------
// RUN: %libomptarget-compile-generic \
// RUN: -fopenmp-version=51 -DEXTENDS=BEFORE
// RUN: %libomptarget-run-fail-generic 2>&1 \
// RUN: | %fcheck-generic
// --------------------------------------------------
// Check extends after
// --------------------------------------------------
// RUN: %libomptarget-compile-generic \
// RUN: -fopenmp-version=51 -DEXTENDS=AFTER
// RUN: %libomptarget-run-fail-generic 2>&1 \
// RUN: | %fcheck-generic
// END.
#include <stdio.h>
#define BEFORE 0
#define AFTER 1
#define SIZE 100
#if EXTENDS == BEFORE
# define SMALL_BEG (SIZE-2)
# define SMALL_END SIZE
# define LARGE_BEG 0
# define LARGE_END SIZE
#elif EXTENDS == AFTER
# define SMALL_BEG 0
# define SMALL_END 2
# define LARGE_BEG 0
# define LARGE_END SIZE
#else
# error EXTENDS undefined
#endif
#define SMALL_SIZE (SMALL_END-SMALL_BEG)
#define LARGE_SIZE (LARGE_END-LARGE_BEG)
#define SMALL SMALL_BEG:SMALL_SIZE
#define LARGE LARGE_BEG:LARGE_SIZE
int main() {
int arr[SIZE];
// CHECK: addr=0x[[#%x,SMALL_ADDR:]], size=[[#%u,SMALL_BYTES:]]
fprintf(stderr, "addr=%p, size=%ld\n", &arr[SMALL_BEG],
SMALL_SIZE * sizeof arr[0]);
// CHECK: addr=0x[[#%x,LARGE_ADDR:]], size=[[#%u,LARGE_BYTES:]]
fprintf(stderr, "addr=%p, size=%ld\n", &arr[LARGE_BEG],
LARGE_SIZE * sizeof arr[0]);
// CHECK-NOT: Libomptarget
#pragma omp target data map(alloc: arr[LARGE])
{
#pragma omp target map(present, tofrom: arr[SMALL])
;
}
// CHECK: arr is present
fprintf(stderr, "arr is present\n");
// CHECK: Libomptarget message: explicit extension not allowed: host address specified is 0x{{0*}}[[#LARGE_ADDR]] ([[#LARGE_BYTES]] bytes), but device allocation maps to host at 0x{{0*}}[[#SMALL_ADDR]] ([[#SMALL_BYTES]] bytes)
// CHECK: Libomptarget message: device mapping required by 'present' map type modifier does not exist for host address 0x{{0*}}[[#LARGE_ADDR]] ([[#LARGE_BYTES]] bytes)
// CHECK: Libomptarget error: Call to getTargetPointer returned null pointer ('present' map type modifier).
// CHECK: Libomptarget error: Call to targetDataBegin failed, abort target.
// CHECK: Libomptarget error: Failed to process data before launching the kernel.
// CHECK: Libomptarget fatal error 1: failure of target construct while offloading is mandatory
#pragma omp target data map(alloc: arr[SMALL])
{
#pragma omp target map(present, tofrom: arr[LARGE])
;
}
// CHECK-NOT: arr is present
fprintf(stderr, "arr is present\n");
return 0;
}
|
GB_unop__ainv_fp32_fp32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__ainv_fp32_fp32)
// op(A') function: GB (_unop_tran__ainv_fp32_fp32)
// C type: float
// A type: float
// cast: float cij = aij
// unaryop: cij = -aij
#define GB_ATYPE \
float
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CAST(z, aij) \
float z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
float aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
float z = aij ; \
Cx [pC] = -z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__ainv_fp32_fp32)
(
float *Cx, // Cx and Ax may be aliased
const float *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (float), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
float z = aij ;
Cx [p] = -z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
float aij = Ax [p] ;
float z = aij ;
Cx [p] = -z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__ainv_fp32_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
placement.c | #include <stdlib.h>
#include <stdio.h>
#include "omp.h"
int main(void)
{
int reps = 1000;
int N = 20;
int a= 0;
#pragma omp parallel
{ // not a parallel for: just a bunch of reps
for (int j = 0; j < reps; j++)
{
#pragma omp for schedule(static, 1)
for (int i = 0; i < N; i++)
{
#pragma omp atomic
a++;
}
}
}
} |
spmspv.h | /******************************************************************************
* ** Copyright (c) 2016, Intel Corporation **
* ** All rights reserved. **
* ** **
* ** Redistribution and use in source and binary forms, with or without **
* ** modification, are permitted provided that the following conditions **
* ** are met: **
* ** 1. Redistributions of source code must retain the above copyright **
* ** notice, this list of conditions and the following disclaimer. **
* ** 2. Redistributions in binary form must reproduce the above copyright **
* ** notice, this list of conditions and the following disclaimer in the **
* ** documentation and/or other materials provided with the distribution. **
* ** 3. Neither the name of the copyright holder nor the names of its **
* ** contributors may be used to endorse or promote products derived **
* ** from this software without specific prior written permission. **
* ** **
* ** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS **
* ** "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT **
* ** LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR **
* ** A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT **
* ** HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, **
* ** SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED **
* ** TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR **
* ** PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF **
* ** LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING **
* ** NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS **
* ** SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* * ******************************************************************************/
/* Narayanan Sundaram (Intel Corp.), Michael Anderson (Intel Corp.)
* * ******************************************************************************/
#ifndef SRC_SINGLENODE_SPMSPV_H_
#define SRC_SINGLENODE_SPMSPV_H_
#include <xmmintrin.h>
#include "GMDP/utils/bitvector.h"
template <typename Ta, typename Tx, typename Ty>
void my_spmspv(int* row_inds, int* col_ptrs, int* col_indices, Ta* vals,
int num_partitions, int* row_pointers, int* col_starts,
int* edge_pointers, Tx* xvalue, int * xbit_vector, Ty* yvalue,
int * ybit_vector, int m, int n, int* nnz, void (*op_mul)(const Ta&, const Tx&, Ty*, void*),
void (*op_add)(const Ty&, const Ty&, Ty*, void*), void* vsp) {
// int * new_nnz = new int[num_partitions];
// memset(new_nnz, 0, num_partitions * sizeof(int));
#pragma omp parallel for schedule(dynamic, 1)
for (int p = 0; p < num_partitions; p++) {
// For each column
const int* column_offset = col_indices + col_starts[p];
const int* partitioned_row_offset = row_inds + edge_pointers[p];
const Ta* partitioned_val_offset = vals + edge_pointers[p];
const int* col_ptrs_cur = col_ptrs + col_starts[p];
for (int j = 0; j < (col_starts[p + 1] - col_starts[p]) - 1 ; j++) {
int col_index = col_indices[col_starts[p] + j];
if(get_bitvector(col_index, xbit_vector)) {
Tx Xval = xvalue[col_index];
_mm_prefetch((char*)(xvalue + column_offset[j + 4]), _MM_HINT_T0);
int nz_idx = col_ptrs_cur[j];
for (; nz_idx < col_ptrs_cur[j + 1]; nz_idx++) {
int row_ind = partitioned_row_offset[nz_idx];
Ta Aval = partitioned_val_offset[nz_idx];
Ty temp_mul_result;
op_mul(Aval, Xval, &temp_mul_result, vsp);
if(get_bitvector(row_ind, ybit_vector))
{
//Ty temp_y_copy = yvalue[row_ind];
//op_add(temp_y_copy, temp_mul_result, &(yvalue[row_ind]), vsp);
op_add(yvalue[row_ind], temp_mul_result, &(yvalue[row_ind]), vsp);
}
else
{
yvalue[row_ind] = temp_mul_result;
set_bitvector(row_ind, ybit_vector);
}
}
}
}
}
for (int p = 0; p < num_partitions; p++) {
// nnz += new_nnz[p];
}
*nnz = m * n;
}
template <typename Ta, typename Tx, typename Ty>
void my_csrspmspv(Ta* a, int* ia, int* ja, Tx* xvalue, int * xbit_vector,
Ty* yvalue, int * ybit_vector, int m, int n, int* nnz,
void (*op_mul)(const Ta&, const Tx&, Ty*, void*), void (*op_add)(const Ty&, const Ty&, Ty*, void*), void* vsp) {
int num_partitions = omp_get_max_threads() * 4;
int rows_per_partition = (m + num_partitions - 1) / num_partitions;
rows_per_partition = ((rows_per_partition + 31) / 32) * 32;
#pragma omp parallel for schedule(dynamic, 1)
for(int partition = 0 ; partition < num_partitions ; partition++)
{
int start_row = partition * rows_per_partition;
int end_row = (partition+1) * rows_per_partition;
if(end_row > m) end_row = m;
for(int row = start_row ; row < end_row ; row++)
{
bool row_exists = get_bitvector(row, ybit_vector);
Ty yval;
if(row_exists)
{
yval = yvalue[row];
}
for (int nz = ia[row]; nz < ia[row + 1]; nz++) {
Ty tmp_mul;
int col_id = ja[nz-1]-1;
if(get_bitvector(col_id, xbit_vector))
{
op_mul(a[nz - 1], xvalue[col_id], &tmp_mul, vsp);
if(row_exists)
{
Ty tmp_add = yval;
op_add(tmp_add, tmp_mul, &yval, vsp);
}
else
{
yval = tmp_mul;
set_bitvector(row, ybit_vector);
row_exists=true;
}
}
}
if(row_exists)
{
yvalue[row] = yval;
}
}
}
//*nnz = m * n;
}
template <typename Ta, typename Tx, typename Ty>
void my_dcsrspmspv(Ta* a, int* ia, int* ja, int * row_ids, int num_rows, int * partition_ptrs, int num_partitions,
Tx* xvalue, int * xbit_vector,
Ty* yvalue, int * ybit_vector, int m, int n, int* nnz,
void (*op_mul)(const Ta&, const Tx&, Ty*, void*), void (*op_add)(const Ty&, const Ty&, Ty*, void*), void* vsp) {
#pragma omp parallel for schedule(dynamic, 1)
for(int p = 0 ; p < num_partitions ; p++)
{
for(int _row = partition_ptrs[p] ; _row < partition_ptrs[p+1] ; _row++)
{
int row = row_ids[_row];
bool row_exists = get_bitvector(row, ybit_vector);
Ty yval;
if(row_exists)
{
yval = yvalue[row];
}
for (int nz = ia[_row]; nz < ia[_row + 1]; nz++) {
Ty tmp_mul;
int col_id = ja[nz];
if(get_bitvector(col_id, xbit_vector))
{
op_mul(a[nz], xvalue[col_id], &tmp_mul, vsp);
if(row_exists)
{
Ty tmp_add = yval;
op_add(tmp_add, tmp_mul, &yval, vsp);
}
else
{
yval = tmp_mul;
row_exists=true;
}
}
}
if(row_exists)
{
set_bitvector(row, ybit_vector);
yvalue[row] = yval;
}
}
}
}
template <typename Ta, typename Tx, typename Ty>
void my_coospmspv(Ta* a, int* ia, int* ja, int num_partitions, int * partition_starts,
Tx* xvalue, int * xbit_vector,
Ty* yvalue, int * ybit_vector, int m, int n, int* nnz,
void (*op_mul)(const Ta&, const Tx&, Ty*, void*), void (*op_add)(const Ty&, const Ty&, Ty*, void*), void* vsp) {
#pragma omp parallel for schedule(dynamic, 1)
for(int partition = 0 ; partition < num_partitions ; partition++)
{
for(int nz = partition_starts[partition] ; nz < partition_starts[partition+1] ; nz++)
{
int row = ia[nz]-1;
int col = ja[nz]-1;
#ifdef __DEBUG
assert(row < m);
assert(row >= 0);
assert(col < n);
assert(col >= 0);
#endif
if(get_bitvector(col, xbit_vector))
{
Ty tmp_mul;
op_mul(a[nz], xvalue[col], &tmp_mul, vsp);
bool row_exists = get_bitvector(row, ybit_vector);
if(!row_exists)
{
yvalue[row] = tmp_mul;
}
else
{
Ty tmp_add = yvalue[row];
Ty yval;
op_add(tmp_add, tmp_mul, &yval, vsp);
yvalue[row] = yval;
}
set_bitvector(row, ybit_vector);
}
}
}
}
template <typename Ta, typename Tx, typename Ty>
void mult_segment(const DCSCTile<Ta>* tile, DenseSegment<Tx>* segmentx,
DenseSegment<Ty>* segmenty,
void (*mul_fp)(const Ta&, const Tx&, Ty*, void*), void (*add_fp)(const Ty&, const Ty&, Ty*, void*), void* vsp) {
segmenty->alloc();
segmenty->initialize();
my_spmspv(tile->row_inds, tile->col_ptrs, tile->col_indices, tile->vals,
tile->num_partitions, tile->row_pointers, tile->col_starts,
tile->edge_pointers, segmentx->properties->value, segmentx->properties->bit_vector,
segmenty->properties->value, segmenty->properties->bit_vector, tile->m, tile->n, (&segmenty->properties->nnz),
mul_fp, add_fp, vsp);
segmenty->properties->nnz = segmenty->compute_nnz();
}
template <typename Ta, typename Tx, typename Ty>
void mult_segment(const HybridTile<Ta>* tile, const DenseSegment<Tx> * segmentx,
DenseSegment<Ty>* segmenty,
void (*mul_fp)(const Ta&, const Tx&, Ty*, void*), void (*add_fp)(const Ty&, const Ty&, Ty*, void*), void* vsp) {
segmenty->alloc();
segmenty->initialize();
int nnz = 0;
if(tile->t1->nnz > 0)
{
my_dcsrspmspv(tile->t1->a, tile->t1->ia, tile->t1->ja, tile->t1->row_ids, tile->t1->num_rows, tile->t1->partition_ptrs, tile->t1->num_partitions, segmentx->properties->value, segmentx->properties->bit_vector,
segmenty->properties->value, segmenty->properties->bit_vector, tile->t1->m, tile->t1->n, (&nnz),
mul_fp, add_fp, vsp);
}
if(tile->t2->nnz > 0)
{
my_coospmspv(tile->t2->a, tile->t2->ia, tile->t2->ja, tile->t2->num_partitions, tile->t2->partition_start,
segmentx->properties->value, segmentx->properties->bit_vector,
segmenty->properties->value, segmenty->properties->bit_vector, tile->t2->m, tile->t2->n, (&nnz),
mul_fp, add_fp, vsp);
}
segmenty->properties->nnz = segmenty->compute_nnz();
}
template <typename Ta, typename Tx, typename Ty>
void mult_segment(const CSRTile<Ta>* tile, const DenseSegment<Tx> * segmentx,
DenseSegment<Ty>* segmenty,
void (*mul_fp)(const Ta&, const Tx&, Ty*, void*), void (*add_fp)(const Ty&, const Ty&, Ty*, void*), void* vsp) {
segmenty->alloc();
segmenty->initialize();
int nnz = 0;
if(tile->nnz > 0)
{
my_csrspmspv(tile->a, tile->ia, tile->ja, segmentx->properties->value, segmentx->properties->bit_vector,
segmenty->properties->value, segmenty->properties->bit_vector, tile->m, tile->n, (&nnz),
mul_fp, add_fp, vsp);
}
segmenty->properties->nnz = segmenty->compute_nnz();
}
template <typename Ta, typename Tx, typename Ty>
void mult_segment(const COOTile<Ta>* tile, const DenseSegment<Tx>* segmentx,
DenseSegment<Ty>* segmenty,
void (*mul_fp)(const Ta&, const Tx&, Ty*, void*), void (*add_fp)(const Ty&, const Ty&, Ty*, void*), void* vsp) {
segmenty->alloc();
segmenty->initialize();
int nnz = 0;
if(tile->nnz > 0)
{
my_coospmspv(tile->a, tile->ia, tile->ja, tile->num_partitions, tile->partition_start,
segmentx->properties->value, segmentx->properties->bit_vector,
segmenty->properties->value, segmenty->properties->bit_vector, tile->m, tile->n, (&nnz),
mul_fp, add_fp, vsp);
}
segmenty->properties->nnz = segmenty->compute_nnz();
}
template <typename Ta, typename Tx, typename Ty>
void mult_segment(const COOSIMD32Tile<Ta>* tile, const DenseSegment<Tx>* segmentx,
DenseSegment<Ty>* segmenty,
void (*mul_fp)(const Ta&, const Tx&, Ty*, void*), void (*add_fp)(const Ty&, const Ty&, Ty*, void*), void* vsp) {
segmenty->alloc();
segmenty->initialize();
int nnz = 0;
if(tile->nnz > 0)
{
my_coospmspv(tile->a, tile->ia, tile->ja, tile->num_partitions, tile->partition_start,
segmentx->properties->value, segmentx->properties->bit_vector,
segmenty->properties->value, segmenty->properties->bit_vector, tile->m, tile->n, (&nnz),
mul_fp, add_fp, vsp);
}
segmenty->properties->nnz = segmenty->compute_nnz();
}
#endif // SRC_SINGLENODE_SPMSPV_H_
|
transform.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% TTTTT RRRR AAA N N SSSSS FFFFF OOO RRRR M M %
% T R R A A NN N SS F O O R R MM MM %
% T RRRR AAAAA N N N SSS FFF O O RRRR M M M %
% T R R A A N NN SS F O O R R M M %
% T R R A A N N SSSSS F OOO R R M M %
% %
% %
% MagickCore Image Transform Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/attribute.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/distort.h"
#include "MagickCore/draw.h"
#include "MagickCore/effect.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/image.h"
#include "MagickCore/memory_.h"
#include "MagickCore/layer.h"
#include "MagickCore/list.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/profile-private.h"
#include "MagickCore/property.h"
#include "MagickCore/resource_.h"
#include "MagickCore/resize.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string_.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/transform.h"
#include "MagickCore/transform-private.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A u t o O r i e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AutoOrientImage() adjusts an image so that its orientation is suitable for
% viewing (i.e. top-left orientation).
%
% The format of the AutoOrientImage method is:
%
% Image *AutoOrientImage(const Image *image,
% const OrientationType orientation,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: The image.
%
% o orientation: Current image orientation.
%
% o exception: Return any errors or warnings in this structure.
%
*/
MagickExport Image *AutoOrientImage(const Image *image,
const OrientationType orientation,ExceptionInfo *exception)
{
Image
*orient_image;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
orient_image=(Image *) NULL;
switch(orientation)
{
case UndefinedOrientation:
case TopLeftOrientation:
default:
{
orient_image=CloneImage(image,0,0,MagickTrue,exception);
break;
}
case TopRightOrientation:
{
orient_image=FlopImage(image,exception);
break;
}
case BottomRightOrientation:
{
orient_image=RotateImage(image,180.0,exception);
break;
}
case BottomLeftOrientation:
{
orient_image=FlipImage(image,exception);
break;
}
case LeftTopOrientation:
{
orient_image=TransposeImage(image,exception);
break;
}
case RightTopOrientation:
{
orient_image=RotateImage(image,90.0,exception);
break;
}
case RightBottomOrientation:
{
orient_image=TransverseImage(image,exception);
break;
}
case LeftBottomOrientation:
{
orient_image=RotateImage(image,270.0,exception);
break;
}
}
if (orient_image != (Image *) NULL)
orient_image->orientation=TopLeftOrientation;
return(orient_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C h o p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ChopImage() removes a region of an image and collapses the image to occupy
% the removed portion.
%
% The format of the ChopImage method is:
%
% Image *ChopImage(const Image *image,const RectangleInfo *chop_info)
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o chop_info: Define the region of the image to chop.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ChopImage(const Image *image,const RectangleInfo *chop_info,
ExceptionInfo *exception)
{
#define ChopImageTag "Chop/Image"
CacheView
*chop_view,
*image_view;
Image
*chop_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
extent;
ssize_t
y;
/*
Check chop geometry.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
assert(chop_info != (RectangleInfo *) NULL);
if (((chop_info->x+(ssize_t) chop_info->width) < 0) ||
((chop_info->y+(ssize_t) chop_info->height) < 0) ||
(chop_info->x > (ssize_t) image->columns) ||
(chop_info->y > (ssize_t) image->rows))
ThrowImageException(OptionWarning,"GeometryDoesNotContainImage");
extent=(*chop_info);
if ((extent.x+(ssize_t) extent.width) > (ssize_t) image->columns)
extent.width=(size_t) ((ssize_t) image->columns-extent.x);
if ((extent.y+(ssize_t) extent.height) > (ssize_t) image->rows)
extent.height=(size_t) ((ssize_t) image->rows-extent.y);
if (extent.x < 0)
{
extent.width-=(size_t) (-extent.x);
extent.x=0;
}
if (extent.y < 0)
{
extent.height-=(size_t) (-extent.y);
extent.y=0;
}
chop_image=CloneImage(image,image->columns-extent.width,image->rows-
extent.height,MagickTrue,exception);
if (chop_image == (Image *) NULL)
return((Image *) NULL);
/*
Extract chop image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
chop_view=AcquireAuthenticCacheView(chop_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,chop_image,extent.y,1)
#endif
for (y=0; y < (ssize_t) extent.y; y++)
{
const Quantum
*magick_restrict p;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(chop_view,0,y,chop_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((x < extent.x) || (x >= (ssize_t) (extent.x+extent.width)))
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait chop_traits=GetPixelChannelTraits(chop_image,channel);
if ((traits == UndefinedPixelTrait) ||
(chop_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(chop_image,channel,p[i],q);
}
q+=GetPixelChannels(chop_image);
}
p+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(chop_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ChopImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
/*
Extract chop image.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,chop_image,image->rows-(extent.y+extent.height),1)
#endif
for (y=0; y < (ssize_t) (image->rows-(extent.y+extent.height)); y++)
{
const Quantum
*magick_restrict p;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,extent.y+extent.height+y,
image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(chop_view,0,extent.y+y,chop_image->columns,
1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((x < extent.x) || (x >= (ssize_t) (extent.x+extent.width)))
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait chop_traits=GetPixelChannelTraits(chop_image,channel);
if ((traits == UndefinedPixelTrait) ||
(chop_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(chop_image,channel,p[i],q);
}
q+=GetPixelChannels(chop_image);
}
p+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(chop_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ChopImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
chop_view=DestroyCacheView(chop_view);
image_view=DestroyCacheView(image_view);
chop_image->type=image->type;
if (status == MagickFalse)
chop_image=DestroyImage(chop_image);
return(chop_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C o n s o l i d a t e C M Y K I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConsolidateCMYKImage() consolidates separate C, M, Y, and K planes into a
% single image.
%
% The format of the ConsolidateCMYKImage method is:
%
% Image *ConsolidateCMYKImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image sequence.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ConsolidateCMYKImages(const Image *images,
ExceptionInfo *exception)
{
CacheView
*cmyk_view,
*image_view;
Image
*cmyk_image,
*cmyk_images;
ssize_t
j;
ssize_t
y;
/*
Consolidate separate C, M, Y, and K planes into a single image.
*/
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
cmyk_images=NewImageList();
for (j=0; j < (ssize_t) GetImageListLength(images); j+=4)
{
ssize_t
i;
assert(images != (Image *) NULL);
cmyk_image=CloneImage(images,0,0,MagickTrue,
exception);
if (cmyk_image == (Image *) NULL)
break;
if (SetImageStorageClass(cmyk_image,DirectClass,exception) == MagickFalse)
break;
(void) SetImageColorspace(cmyk_image,CMYKColorspace,exception);
for (i=0; i < 4; i++)
{
image_view=AcquireVirtualCacheView(images,exception);
cmyk_view=AcquireAuthenticCacheView(cmyk_image,exception);
for (y=0; y < (ssize_t) images->rows; y++)
{
const Quantum
*magick_restrict p;
ssize_t
x;
Quantum
*magick_restrict q;
p=GetCacheViewVirtualPixels(image_view,0,y,images->columns,1,exception);
q=QueueCacheViewAuthenticPixels(cmyk_view,0,y,cmyk_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) images->columns; x++)
{
Quantum
pixel;
pixel=ClampToQuantum(QuantumRange-GetPixelIntensity(images,p));
switch (i)
{
case 0: SetPixelCyan(cmyk_image,pixel,q); break;
case 1: SetPixelMagenta(cmyk_image,pixel,q); break;
case 2: SetPixelYellow(cmyk_image,pixel,q); break;
case 3: SetPixelBlack(cmyk_image,pixel,q); break;
default: break;
}
p+=GetPixelChannels(images);
q+=GetPixelChannels(cmyk_image);
}
if (SyncCacheViewAuthenticPixels(cmyk_view,exception) == MagickFalse)
break;
}
cmyk_view=DestroyCacheView(cmyk_view);
image_view=DestroyCacheView(image_view);
images=GetNextImageInList(images);
if (images == (Image *) NULL)
break;
}
AppendImageToList(&cmyk_images,cmyk_image);
}
return(cmyk_images);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C r o p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CropImage() extracts a region of the image starting at the offset defined
% by geometry. Region must be fully defined, and no special handling of
% geometry flags is performed.
%
% The format of the CropImage method is:
%
% Image *CropImage(const Image *image,const RectangleInfo *geometry,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o geometry: Define the region of the image to crop with members
% x, y, width, and height.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *CropImage(const Image *image,const RectangleInfo *geometry,
ExceptionInfo *exception)
{
#define CropImageTag "Crop/Image"
CacheView
*crop_view,
*image_view;
Image
*crop_image;
MagickBooleanType
status;
MagickOffsetType
progress;
OffsetInfo
offset;
RectangleInfo
bounding_box,
page;
ssize_t
y;
/*
Check crop geometry.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(geometry != (const RectangleInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
bounding_box=image->page;
if ((bounding_box.width == 0) || (bounding_box.height == 0))
{
bounding_box.width=image->columns;
bounding_box.height=image->rows;
}
page=(*geometry);
if (page.width == 0)
page.width=bounding_box.width;
if (page.height == 0)
page.height=bounding_box.height;
if (((bounding_box.x-page.x) >= (ssize_t) page.width) ||
((bounding_box.y-page.y) >= (ssize_t) page.height) ||
((page.x-bounding_box.x) > (ssize_t) image->columns) ||
((page.y-bounding_box.y) > (ssize_t) image->rows))
{
/*
Crop is not within virtual canvas, return 1 pixel transparent image.
*/
(void) ThrowMagickException(exception,GetMagickModule(),OptionWarning,
"GeometryDoesNotContainImage","`%s'",image->filename);
crop_image=CloneImage(image,1,1,MagickTrue,exception);
if (crop_image == (Image *) NULL)
return((Image *) NULL);
crop_image->background_color.alpha_trait=BlendPixelTrait;
crop_image->background_color.alpha=(MagickRealType) TransparentAlpha;
(void) SetImageBackgroundColor(crop_image,exception);
crop_image->page=bounding_box;
crop_image->page.x=(-1);
crop_image->page.y=(-1);
if (crop_image->dispose == BackgroundDispose)
crop_image->dispose=NoneDispose;
return(crop_image);
}
if ((page.x < 0) && (bounding_box.x >= 0))
{
page.width+=page.x-bounding_box.x;
page.x=0;
}
else
{
page.width-=bounding_box.x-page.x;
page.x-=bounding_box.x;
if (page.x < 0)
page.x=0;
}
if ((page.y < 0) && (bounding_box.y >= 0))
{
page.height+=page.y-bounding_box.y;
page.y=0;
}
else
{
page.height-=bounding_box.y-page.y;
page.y-=bounding_box.y;
if (page.y < 0)
page.y=0;
}
if ((page.x+(ssize_t) page.width) > (ssize_t) image->columns)
page.width=image->columns-page.x;
if ((geometry->width != 0) && (page.width > geometry->width))
page.width=geometry->width;
if ((page.y+(ssize_t) page.height) > (ssize_t) image->rows)
page.height=image->rows-page.y;
if ((geometry->height != 0) && (page.height > geometry->height))
page.height=geometry->height;
bounding_box.x+=page.x;
bounding_box.y+=page.y;
if ((page.width == 0) || (page.height == 0))
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionWarning,
"GeometryDoesNotContainImage","`%s'",image->filename);
return((Image *) NULL);
}
/*
Initialize crop image attributes.
*/
crop_image=CloneImage(image,page.width,page.height,MagickTrue,exception);
if (crop_image == (Image *) NULL)
return((Image *) NULL);
crop_image->page.width=image->page.width;
crop_image->page.height=image->page.height;
offset.x=(ssize_t) (bounding_box.x+bounding_box.width);
offset.y=(ssize_t) (bounding_box.y+bounding_box.height);
if ((offset.x > (ssize_t) image->page.width) ||
(offset.y > (ssize_t) image->page.height))
{
crop_image->page.width=bounding_box.width;
crop_image->page.height=bounding_box.height;
}
crop_image->page.x=bounding_box.x;
crop_image->page.y=bounding_box.y;
/*
Crop image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
crop_view=AcquireAuthenticCacheView(crop_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,crop_image,crop_image->rows,1)
#endif
for (y=0; y < (ssize_t) crop_image->rows; y++)
{
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,page.x,page.y+y,crop_image->columns,
1,exception);
q=QueueCacheViewAuthenticPixels(crop_view,0,y,crop_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) crop_image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait crop_traits=GetPixelChannelTraits(crop_image,channel);
if ((traits == UndefinedPixelTrait) ||
(crop_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(crop_image,channel,p[i],q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(crop_image);
}
if (SyncCacheViewAuthenticPixels(crop_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,CropImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
crop_view=DestroyCacheView(crop_view);
image_view=DestroyCacheView(image_view);
crop_image->type=image->type;
if (status == MagickFalse)
crop_image=DestroyImage(crop_image);
return(crop_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C r o p I m a g e T o T i l e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CropImageToTiles() crops a single image, into a possible list of tiles.
% This may include a single sub-region of the image. This basically applies
% all the normal geometry flags for Crop.
%
% Image *CropImageToTiles(const Image *image,
% const RectangleInfo *crop_geometry, ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image The transformed image is returned as this parameter.
%
% o crop_geometry: A crop geometry string.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double ConstrainPixelOffset(double x)
{
if (x < (double) -(LONG_MAX-512))
return((double) -(LONG_MAX-512));
if (x > (double) (LONG_MAX-512))
return((double) (LONG_MAX-512));
return(x);
}
static inline ssize_t PixelRoundOffset(double x)
{
/*
Round the fraction to nearest integer.
*/
if ((x-floor(x)) < (ceil(x)-x))
return((ssize_t) floor(ConstrainPixelOffset(x)));
return((ssize_t) ceil(ConstrainPixelOffset(x)));
}
MagickExport Image *CropImageToTiles(const Image *image,
const char *crop_geometry,ExceptionInfo *exception)
{
Image
*next,
*crop_image;
MagickStatusType
flags;
RectangleInfo
geometry;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
crop_image=NewImageList();
next=NewImageList();
flags=ParseGravityGeometry(image,crop_geometry,&geometry,exception);
if ((flags & AreaValue) != 0)
{
PointInfo
delta,
offset;
RectangleInfo
crop;
size_t
height,
width;
/*
Crop into NxM tiles (@ flag).
*/
width=image->columns;
height=image->rows;
if (geometry.width == 0)
geometry.width=1;
if (geometry.height == 0)
geometry.height=1;
if ((flags & AspectValue) == 0)
{
width-=(geometry.x < 0 ? -1 : 1)*geometry.x;
height-=(geometry.y < 0 ? -1 : 1)*geometry.y;
}
else
{
width+=(geometry.x < 0 ? -1 : 1)*geometry.x;
height+=(geometry.y < 0 ? -1 : 1)*geometry.y;
}
delta.x=(double) width/geometry.width;
delta.y=(double) height/geometry.height;
if (delta.x < 1.0)
delta.x=1.0;
if (delta.y < 1.0)
delta.y=1.0;
for (offset.y=0; offset.y < (double) height; )
{
if ((flags & AspectValue) == 0)
{
crop.y=PixelRoundOffset((double) (offset.y-
(geometry.y > 0 ? 0 : geometry.y)));
offset.y+=delta.y; /* increment now to find width */
crop.height=(size_t) PixelRoundOffset((double) (offset.y+
(geometry.y < 0 ? 0 : geometry.y)));
}
else
{
crop.y=PixelRoundOffset((double) (offset.y-
(geometry.y > 0 ? geometry.y : 0)));
offset.y+=delta.y; /* increment now to find width */
crop.height=(size_t) PixelRoundOffset((double)
(offset.y+(geometry.y < -1 ? geometry.y : 0)));
}
crop.height-=crop.y;
crop.y+=image->page.y;
for (offset.x=0; offset.x < (double) width; )
{
if ((flags & AspectValue) == 0)
{
crop.x=PixelRoundOffset((double) (offset.x-
(geometry.x > 0 ? 0 : geometry.x)));
offset.x+=delta.x; /* increment now to find height */
crop.width=(size_t) PixelRoundOffset((double) (offset.x+
(geometry.x < 0 ? 0 : geometry.x)));
}
else
{
crop.x=PixelRoundOffset((double) (offset.x-
(geometry.x > 0 ? geometry.x : 0)));
offset.x+=delta.x; /* increment now to find height */
crop.width=(size_t) PixelRoundOffset((double) (offset.x+
(geometry.x < 0 ? geometry.x : 0)));
}
crop.width-=crop.x;
crop.x+=image->page.x;
next=CropImage(image,&crop,exception);
if (next != (Image *) NULL)
AppendImageToList(&crop_image,next);
}
}
ClearMagickException(exception);
return(crop_image);
}
if (((geometry.width == 0) && (geometry.height == 0)) ||
((flags & XValue) != 0) || ((flags & YValue) != 0))
{
/*
Crop a single region at +X+Y.
*/
crop_image=CropImage(image,&geometry,exception);
if ((crop_image != (Image *) NULL) && ((flags & AspectValue) != 0))
{
crop_image->page.width=geometry.width;
crop_image->page.height=geometry.height;
crop_image->page.x-=geometry.x;
crop_image->page.y-=geometry.y;
}
return(crop_image);
}
if ((image->columns > geometry.width) || (image->rows > geometry.height))
{
RectangleInfo
page;
size_t
height,
width;
ssize_t
x,
y;
/*
Crop into tiles of fixed size WxH.
*/
page=image->page;
if (page.width == 0)
page.width=image->columns;
if (page.height == 0)
page.height=image->rows;
width=geometry.width;
if (width == 0)
width=page.width;
height=geometry.height;
if (height == 0)
height=page.height;
next=NewImageList();
for (y=0; y < (ssize_t) page.height; y+=(ssize_t) height)
{
for (x=0; x < (ssize_t) page.width; x+=(ssize_t) width)
{
geometry.width=width;
geometry.height=height;
geometry.x=x;
geometry.y=y;
next=CropImage(image,&geometry,exception);
if (next == (Image *) NULL)
break;
AppendImageToList(&crop_image,next);
}
if (next == (Image *) NULL)
break;
}
return(crop_image);
}
return(CloneImage(image,0,0,MagickTrue,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E x c e r p t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ExcerptImage() returns a excerpt of the image as defined by the geometry.
%
% The format of the ExcerptImage method is:
%
% Image *ExcerptImage(const Image *image,const RectangleInfo *geometry,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o geometry: Define the region of the image to extend with members
% x, y, width, and height.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ExcerptImage(const Image *image,
const RectangleInfo *geometry,ExceptionInfo *exception)
{
#define ExcerptImageTag "Excerpt/Image"
CacheView
*excerpt_view,
*image_view;
Image
*excerpt_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Allocate excerpt image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(geometry != (const RectangleInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
excerpt_image=CloneImage(image,geometry->width,geometry->height,MagickTrue,
exception);
if (excerpt_image == (Image *) NULL)
return((Image *) NULL);
/*
Excerpt each row.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
excerpt_view=AcquireAuthenticCacheView(excerpt_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,excerpt_image,excerpt_image->rows,1)
#endif
for (y=0; y < (ssize_t) excerpt_image->rows; y++)
{
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,geometry->x,geometry->y+y,
geometry->width,1,exception);
q=GetCacheViewAuthenticPixels(excerpt_view,0,y,excerpt_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) excerpt_image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait excerpt_traits=GetPixelChannelTraits(excerpt_image,channel);
if ((traits == UndefinedPixelTrait) ||
(excerpt_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(excerpt_image,channel,p[i],q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(excerpt_image);
}
if (SyncCacheViewAuthenticPixels(excerpt_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ExcerptImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
excerpt_view=DestroyCacheView(excerpt_view);
image_view=DestroyCacheView(image_view);
excerpt_image->type=image->type;
if (status == MagickFalse)
excerpt_image=DestroyImage(excerpt_image);
return(excerpt_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E x t e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ExtentImage() extends the image as defined by the geometry, gravity, and
% image background color. Set the (x,y) offset of the geometry to move the
% original image relative to the extended image.
%
% The format of the ExtentImage method is:
%
% Image *ExtentImage(const Image *image,const RectangleInfo *geometry,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o geometry: Define the region of the image to extend with members
% x, y, width, and height.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ExtentImage(const Image *image,
const RectangleInfo *geometry,ExceptionInfo *exception)
{
Image
*extent_image;
MagickBooleanType
status;
/*
Allocate extent image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(geometry != (const RectangleInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
extent_image=CloneImage(image,geometry->width,geometry->height,MagickTrue,
exception);
if (extent_image == (Image *) NULL)
return((Image *) NULL);
status=SetImageBackgroundColor(extent_image,exception);
if (status == MagickFalse)
{
extent_image=DestroyImage(extent_image);
return((Image *) NULL);
}
status=CompositeImage(extent_image,image,image->compose,MagickTrue,
-geometry->x,-geometry->y,exception);
if (status != MagickFalse)
Update8BIMClipPath(extent_image,image->columns,image->rows,geometry);
return(extent_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F l i p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FlipImage() creates a vertical mirror image by reflecting the pixels
% around the central x-axis.
%
% The format of the FlipImage method is:
%
% Image *FlipImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *FlipImage(const Image *image,ExceptionInfo *exception)
{
#define FlipImageTag "Flip/Image"
CacheView
*flip_view,
*image_view;
Image
*flip_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
page;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
flip_image=CloneImage(image,0,0,MagickTrue,exception);
if (flip_image == (Image *) NULL)
return((Image *) NULL);
/*
Flip image.
*/
status=MagickTrue;
progress=0;
page=image->page;
image_view=AcquireVirtualCacheView(image,exception);
flip_view=AcquireAuthenticCacheView(flip_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,flip_image,flip_image->rows,1)
#endif
for (y=0; y < (ssize_t) flip_image->rows; y++)
{
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(flip_view,0,(ssize_t) (flip_image->rows-y-
1),flip_image->columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) flip_image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait flip_traits=GetPixelChannelTraits(flip_image,channel);
if ((traits == UndefinedPixelTrait) ||
(flip_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(flip_image,channel,p[i],q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(flip_image);
}
if (SyncCacheViewAuthenticPixels(flip_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,FlipImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
flip_view=DestroyCacheView(flip_view);
image_view=DestroyCacheView(image_view);
flip_image->type=image->type;
if (page.height != 0)
page.y=(ssize_t) (page.height-flip_image->rows-page.y);
flip_image->page=page;
if (status == MagickFalse)
flip_image=DestroyImage(flip_image);
return(flip_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F l o p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FlopImage() creates a horizontal mirror image by reflecting the pixels
% around the central y-axis.
%
% The format of the FlopImage method is:
%
% Image *FlopImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *FlopImage(const Image *image,ExceptionInfo *exception)
{
#define FlopImageTag "Flop/Image"
CacheView
*flop_view,
*image_view;
Image
*flop_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
page;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
flop_image=CloneImage(image,0,0,MagickTrue,exception);
if (flop_image == (Image *) NULL)
return((Image *) NULL);
/*
Flop each row.
*/
status=MagickTrue;
progress=0;
page=image->page;
image_view=AcquireVirtualCacheView(image,exception);
flop_view=AcquireAuthenticCacheView(flop_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,flop_image,flop_image->rows,1)
#endif
for (y=0; y < (ssize_t) flop_image->rows; y++)
{
const Quantum
*magick_restrict p;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(flop_view,0,y,flop_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
q+=GetPixelChannels(flop_image)*flop_image->columns;
for (x=0; x < (ssize_t) flop_image->columns; x++)
{
ssize_t
i;
q-=GetPixelChannels(flop_image);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait flop_traits=GetPixelChannelTraits(flop_image,channel);
if ((traits == UndefinedPixelTrait) ||
(flop_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(flop_image,channel,p[i],q);
}
p+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(flop_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,FlopImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
flop_view=DestroyCacheView(flop_view);
image_view=DestroyCacheView(image_view);
flop_image->type=image->type;
if (page.width != 0)
page.x=(ssize_t) (page.width-flop_image->columns-page.x);
flop_image->page=page;
if (status == MagickFalse)
flop_image=DestroyImage(flop_image);
return(flop_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R o l l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RollImage() offsets an image as defined by x_offset and y_offset.
%
% The format of the RollImage method is:
%
% Image *RollImage(const Image *image,const ssize_t x_offset,
% const ssize_t y_offset,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x_offset: the number of columns to roll in the horizontal direction.
%
% o y_offset: the number of rows to roll in the vertical direction.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType CopyImageRegion(Image *destination,const Image *source, const size_t columns,const size_t rows,const ssize_t sx,const ssize_t sy,
const ssize_t dx,const ssize_t dy,ExceptionInfo *exception)
{
CacheView
*source_view,
*destination_view;
MagickBooleanType
status;
ssize_t
y;
if (columns == 0)
return(MagickTrue);
status=MagickTrue;
source_view=AcquireVirtualCacheView(source,exception);
destination_view=AcquireAuthenticCacheView(destination,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(source,destination,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
MagickBooleanType
sync;
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
/*
Transfer scanline.
*/
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(source_view,sx,sy+y,columns,1,exception);
q=GetCacheViewAuthenticPixels(destination_view,dx,dy+y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(source); i++)
{
PixelChannel channel = GetPixelChannelChannel(source,i);
PixelTrait source_traits=GetPixelChannelTraits(source,channel);
PixelTrait destination_traits=GetPixelChannelTraits(destination,
channel);
if ((source_traits == UndefinedPixelTrait) ||
(destination_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(destination,channel,p[i],q);
}
p+=GetPixelChannels(source);
q+=GetPixelChannels(destination);
}
sync=SyncCacheViewAuthenticPixels(destination_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
destination_view=DestroyCacheView(destination_view);
source_view=DestroyCacheView(source_view);
return(status);
}
MagickExport Image *RollImage(const Image *image,const ssize_t x_offset,
const ssize_t y_offset,ExceptionInfo *exception)
{
#define RollImageTag "Roll/Image"
Image
*roll_image;
MagickStatusType
status;
RectangleInfo
offset;
/*
Initialize roll image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
roll_image=CloneImage(image,0,0,MagickTrue,exception);
if (roll_image == (Image *) NULL)
return((Image *) NULL);
offset.x=x_offset;
offset.y=y_offset;
while (offset.x < 0)
offset.x+=(ssize_t) image->columns;
while (offset.x >= (ssize_t) image->columns)
offset.x-=(ssize_t) image->columns;
while (offset.y < 0)
offset.y+=(ssize_t) image->rows;
while (offset.y >= (ssize_t) image->rows)
offset.y-=(ssize_t) image->rows;
/*
Roll image.
*/
status=CopyImageRegion(roll_image,image,(size_t) offset.x,
(size_t) offset.y,(ssize_t) image->columns-offset.x,(ssize_t) image->rows-
offset.y,0,0,exception);
(void) SetImageProgress(image,RollImageTag,0,3);
status&=CopyImageRegion(roll_image,image,image->columns-offset.x,
(size_t) offset.y,0,(ssize_t) image->rows-offset.y,offset.x,0,
exception);
(void) SetImageProgress(image,RollImageTag,1,3);
status&=CopyImageRegion(roll_image,image,(size_t) offset.x,image->rows-
offset.y,(ssize_t) image->columns-offset.x,0,0,offset.y,exception);
(void) SetImageProgress(image,RollImageTag,2,3);
status&=CopyImageRegion(roll_image,image,image->columns-offset.x,image->rows-
offset.y,0,0,offset.x,offset.y,exception);
(void) SetImageProgress(image,RollImageTag,3,3);
roll_image->type=image->type;
if (status == MagickFalse)
roll_image=DestroyImage(roll_image);
return(roll_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h a v e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ShaveImage() shaves pixels from the image edges. It allocates the memory
% necessary for the new Image structure and returns a pointer to the new
% image.
%
% The format of the ShaveImage method is:
%
% Image *ShaveImage(const Image *image,const RectangleInfo *shave_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o shave_image: Method ShaveImage returns a pointer to the shaved
% image. A null image is returned if there is a memory shortage or
% if the image width or height is zero.
%
% o image: the image.
%
% o shave_info: Specifies a pointer to a RectangleInfo which defines the
% region of the image to crop.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ShaveImage(const Image *image,
const RectangleInfo *shave_info,ExceptionInfo *exception)
{
Image
*shave_image;
RectangleInfo
geometry;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (((2*shave_info->width) >= image->columns) ||
((2*shave_info->height) >= image->rows))
ThrowImageException(OptionWarning,"GeometryDoesNotContainImage");
SetGeometry(image,&geometry);
geometry.width-=2*shave_info->width;
geometry.height-=2*shave_info->height;
geometry.x=(ssize_t) shave_info->width+image->page.x;
geometry.y=(ssize_t) shave_info->height+image->page.y;
shave_image=CropImage(image,&geometry,exception);
if (shave_image == (Image *) NULL)
return((Image *) NULL);
shave_image->page.width-=2*shave_info->width;
shave_image->page.height-=2*shave_info->height;
shave_image->page.x-=(ssize_t) shave_info->width;
shave_image->page.y-=(ssize_t) shave_info->height;
return(shave_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S p l i c e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SpliceImage() splices a solid color into the image as defined by the
% geometry.
%
% The format of the SpliceImage method is:
%
% Image *SpliceImage(const Image *image,const RectangleInfo *geometry,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o geometry: Define the region of the image to splice with members
% x, y, width, and height.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SpliceImage(const Image *image,
const RectangleInfo *geometry,ExceptionInfo *exception)
{
#define SpliceImageTag "Splice/Image"
CacheView
*image_view,
*splice_view;
Image
*splice_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
splice_geometry;
ssize_t
columns,
y;
/*
Allocate splice image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(geometry != (const RectangleInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
splice_geometry=(*geometry);
splice_image=CloneImage(image,image->columns+splice_geometry.width,
image->rows+splice_geometry.height,MagickTrue,exception);
if (splice_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(splice_image,DirectClass,exception) == MagickFalse)
{
splice_image=DestroyImage(splice_image);
return((Image *) NULL);
}
if ((IsPixelInfoGray(&splice_image->background_color) == MagickFalse) &&
(IsGrayColorspace(splice_image->colorspace) != MagickFalse))
(void) SetImageColorspace(splice_image,sRGBColorspace,exception);
if ((splice_image->background_color.alpha_trait != UndefinedPixelTrait) &&
(splice_image->alpha_trait == UndefinedPixelTrait))
(void) SetImageAlpha(splice_image,OpaqueAlpha,exception);
(void) SetImageBackgroundColor(splice_image,exception);
/*
Respect image geometry.
*/
switch (image->gravity)
{
default:
case UndefinedGravity:
case NorthWestGravity:
break;
case NorthGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width/2;
break;
}
case NorthEastGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width;
break;
}
case WestGravity:
{
splice_geometry.y+=(ssize_t) splice_geometry.width/2;
break;
}
case CenterGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width/2;
splice_geometry.y+=(ssize_t) splice_geometry.height/2;
break;
}
case EastGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width;
splice_geometry.y+=(ssize_t) splice_geometry.height/2;
break;
}
case SouthWestGravity:
{
splice_geometry.y+=(ssize_t) splice_geometry.height;
break;
}
case SouthGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width/2;
splice_geometry.y+=(ssize_t) splice_geometry.height;
break;
}
case SouthEastGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width;
splice_geometry.y+=(ssize_t) splice_geometry.height;
break;
}
}
/*
Splice image.
*/
status=MagickTrue;
progress=0;
columns=MagickMin(splice_geometry.x,(ssize_t) splice_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
splice_view=AcquireAuthenticCacheView(splice_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,splice_image,splice_geometry.y,1)
#endif
for (y=0; y < (ssize_t) splice_geometry.y; y++)
{
const Quantum
*magick_restrict p;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,splice_image->columns,1,
exception);
q=QueueCacheViewAuthenticPixels(splice_view,0,y,splice_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait splice_traits=GetPixelChannelTraits(splice_image,channel);
if ((traits == UndefinedPixelTrait) ||
(splice_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(splice_image,channel,p[i],q);
}
SetPixelRed(splice_image,GetPixelRed(image,p),q);
SetPixelGreen(splice_image,GetPixelGreen(image,p),q);
SetPixelBlue(splice_image,GetPixelBlue(image,p),q);
SetPixelAlpha(splice_image,GetPixelAlpha(image,p),q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(splice_image);
}
for ( ; x < (ssize_t) (splice_geometry.x+splice_geometry.width); x++)
q+=GetPixelChannels(splice_image);
for ( ; x < (ssize_t) splice_image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait splice_traits=GetPixelChannelTraits(splice_image,channel);
if ((traits == UndefinedPixelTrait) ||
(splice_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(splice_image,channel,p[i],q);
}
SetPixelRed(splice_image,GetPixelRed(image,p),q);
SetPixelGreen(splice_image,GetPixelGreen(image,p),q);
SetPixelBlue(splice_image,GetPixelBlue(image,p),q);
SetPixelAlpha(splice_image,GetPixelAlpha(image,p),q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(splice_image);
}
if (SyncCacheViewAuthenticPixels(splice_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SpliceImageTag,progress,
splice_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,splice_image,splice_image->rows,2)
#endif
for (y=(ssize_t) (splice_geometry.y+splice_geometry.height);
y < (ssize_t) splice_image->rows; y++)
{
const Quantum
*magick_restrict p;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
if ((y < 0) || (y >= (ssize_t)splice_image->rows))
continue;
p=GetCacheViewVirtualPixels(image_view,0,y-(ssize_t) splice_geometry.height,
splice_image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(splice_view,0,y,splice_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait splice_traits=GetPixelChannelTraits(splice_image,channel);
if ((traits == UndefinedPixelTrait) ||
(splice_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(splice_image,channel,p[i],q);
}
SetPixelRed(splice_image,GetPixelRed(image,p),q);
SetPixelGreen(splice_image,GetPixelGreen(image,p),q);
SetPixelBlue(splice_image,GetPixelBlue(image,p),q);
SetPixelAlpha(splice_image,GetPixelAlpha(image,p),q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(splice_image);
}
for ( ; x < (ssize_t) (splice_geometry.x+splice_geometry.width); x++)
q+=GetPixelChannels(splice_image);
for ( ; x < (ssize_t) splice_image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait splice_traits=GetPixelChannelTraits(splice_image,channel);
if ((traits == UndefinedPixelTrait) ||
(splice_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(splice_image,channel,p[i],q);
}
SetPixelRed(splice_image,GetPixelRed(image,p),q);
SetPixelGreen(splice_image,GetPixelGreen(image,p),q);
SetPixelBlue(splice_image,GetPixelBlue(image,p),q);
SetPixelAlpha(splice_image,GetPixelAlpha(image,p),q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(splice_image);
}
if (SyncCacheViewAuthenticPixels(splice_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SpliceImageTag,progress,
splice_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
splice_view=DestroyCacheView(splice_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
splice_image=DestroyImage(splice_image);
return(splice_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s f o r m I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransformImage() is a convenience method that behaves like ResizeImage() or
% CropImage() but accepts scaling and/or cropping information as a region
% geometry specification. If the operation fails, the original image handle
% is left as is.
%
% This should only be used for single images.
%
% This function destroys what it assumes to be a single image list.
% If the input image is part of a larger list, all other images in that list
% will be simply 'lost', not destroyed.
%
% Also if the crop generates a list of images only the first image is resized.
% And finally if the crop succeeds and the resize failed, you will get a
% cropped image, as well as a 'false' or 'failed' report.
%
% This function and should probably be deprecated in favor of direct calls
% to CropImageToTiles() or ResizeImage(), as appropriate.
%
% The format of the TransformImage method is:
%
% MagickBooleanType TransformImage(Image **image,const char *crop_geometry,
% const char *image_geometry,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image The transformed image is returned as this parameter.
%
% o crop_geometry: A crop geometry string. This geometry defines a
% subregion of the image to crop.
%
% o image_geometry: An image geometry string. This geometry defines the
% final size of the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate MagickBooleanType TransformImage(Image **image,
const char *crop_geometry,const char *image_geometry,ExceptionInfo *exception)
{
Image
*resize_image,
*transform_image;
RectangleInfo
geometry;
assert(image != (Image **) NULL);
assert((*image)->signature == MagickCoreSignature);
if ((*image)->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",(*image)->filename);
transform_image=(*image);
if (crop_geometry != (const char *) NULL)
{
Image
*crop_image;
/*
Crop image to a user specified size.
*/
crop_image=CropImageToTiles(*image,crop_geometry,exception);
if (crop_image == (Image *) NULL)
transform_image=CloneImage(*image,0,0,MagickTrue,exception);
else
{
transform_image=DestroyImage(transform_image);
transform_image=GetFirstImageInList(crop_image);
}
*image=transform_image;
}
if (image_geometry == (const char *) NULL)
return(MagickTrue);
/*
Scale image to a user specified size.
*/
(void) ParseRegionGeometry(transform_image,image_geometry,&geometry,
exception);
if ((transform_image->columns == geometry.width) &&
(transform_image->rows == geometry.height))
return(MagickTrue);
resize_image=ResizeImage(transform_image,geometry.width,geometry.height,
transform_image->filter,exception);
if (resize_image == (Image *) NULL)
return(MagickFalse);
transform_image=DestroyImage(transform_image);
transform_image=resize_image;
*image=transform_image;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s p o s e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransposeImage() creates a horizontal mirror image by reflecting the pixels
% around the central y-axis while rotating them by 90 degrees.
%
% The format of the TransposeImage method is:
%
% Image *TransposeImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *TransposeImage(const Image *image,ExceptionInfo *exception)
{
#define TransposeImageTag "Transpose/Image"
CacheView
*image_view,
*transpose_view;
Image
*transpose_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
page;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
transpose_image=CloneImage(image,image->rows,image->columns,MagickTrue,
exception);
if (transpose_image == (Image *) NULL)
return((Image *) NULL);
/*
Transpose image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
transpose_view=AcquireAuthenticCacheView(transpose_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,transpose_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-y-1,
image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(transpose_view,(ssize_t) (image->rows-y-1),
0,1,transpose_image->rows,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait transpose_traits=GetPixelChannelTraits(transpose_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(transpose_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(transpose_image,channel,p[i],q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(transpose_image);
}
if (SyncCacheViewAuthenticPixels(transpose_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,TransposeImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
transpose_view=DestroyCacheView(transpose_view);
image_view=DestroyCacheView(image_view);
transpose_image->type=image->type;
page=transpose_image->page;
Swap(page.width,page.height);
Swap(page.x,page.y);
transpose_image->page=page;
if (status == MagickFalse)
transpose_image=DestroyImage(transpose_image);
return(transpose_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s v e r s e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransverseImage() creates a vertical mirror image by reflecting the pixels
% around the central x-axis while rotating them by 270 degrees.
%
% The format of the TransverseImage method is:
%
% Image *TransverseImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *TransverseImage(const Image *image,ExceptionInfo *exception)
{
#define TransverseImageTag "Transverse/Image"
CacheView
*image_view,
*transverse_view;
Image
*transverse_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
page;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
transverse_image=CloneImage(image,image->rows,image->columns,MagickTrue,
exception);
if (transverse_image == (Image *) NULL)
return((Image *) NULL);
/*
Transverse image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
transverse_view=AcquireAuthenticCacheView(transverse_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,transverse_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(transverse_view,(ssize_t) (image->rows-y-1),
0,1,transverse_image->rows,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
q+=GetPixelChannels(transverse_image)*image->columns;
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
i;
q-=GetPixelChannels(transverse_image);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait transverse_traits=GetPixelChannelTraits(transverse_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(transverse_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(transverse_image,channel,p[i],q);
}
p+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(transverse_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,TransverseImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
transverse_view=DestroyCacheView(transverse_view);
image_view=DestroyCacheView(image_view);
transverse_image->type=image->type;
page=transverse_image->page;
Swap(page.width,page.height);
Swap(page.x,page.y);
if (page.width != 0)
page.x=(ssize_t) (page.width-transverse_image->columns-page.x);
if (page.height != 0)
page.y=(ssize_t) (page.height-transverse_image->rows-page.y);
transverse_image->page=page;
if (status == MagickFalse)
transverse_image=DestroyImage(transverse_image);
return(transverse_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r i m I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TrimImage() trims pixels from the image edges. It allocates the memory
% necessary for the new Image structure and returns a pointer to the new
% image.
%
% The format of the TrimImage method is:
%
% Image *TrimImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *TrimImage(const Image *image,ExceptionInfo *exception)
{
Image
*trim_image;
RectangleInfo
geometry;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
geometry=GetImageBoundingBox(image,exception);
if ((geometry.width == 0) || (geometry.height == 0))
{
Image
*crop_image;
crop_image=CloneImage(image,1,1,MagickTrue,exception);
if (crop_image == (Image *) NULL)
return((Image *) NULL);
crop_image->background_color.alpha_trait=BlendPixelTrait;
crop_image->background_color.alpha=(MagickRealType) TransparentAlpha;
(void) SetImageBackgroundColor(crop_image,exception);
crop_image->page=image->page;
crop_image->page.x=(-1);
crop_image->page.y=(-1);
return(crop_image);
}
geometry.x+=image->page.x;
geometry.y+=image->page.y;
trim_image=CropImage(image,&geometry,exception);
if (trim_image != (Image *) NULL)
Update8BIMClipPath(trim_image,image->columns,image->rows,&geometry);
return(trim_image);
}
|
trsm_x_dia_n_lo_row.c | #include "alphasparse/kernel.h"
#include "alphasparse/util.h"
#include "alphasparse/opt.h"
#ifdef _OPENMP
#include <omp.h>
#endif
#include <memory.h>
alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_DIA *A, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, ALPHA_Number *y, const ALPHA_INT ldy)
{
ALPHA_INT m = A->rows;
ALPHA_INT main_diag_pos = 0;
ALPHA_Number diag[m];
memset(diag, '\0', m * sizeof(ALPHA_Number));
int num_thread = alpha_get_thread_num();
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_thread)
#endif
for (ALPHA_INT i = 0; i < A->ndiag; i++)
{
if(A->distance[i] == 0)
{
main_diag_pos = i;
for (ALPHA_INT r = 0; r < A->rows; r++)
{
diag[r] = A->values[i * A->lval + r];
}
}
}
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_thread)
#endif
for(ALPHA_INT out_y_col = 0; out_y_col < columns; out_y_col++)
{
for (ALPHA_INT r = 0; r < m; r++)
{
ALPHA_Number temp;
alpha_setzero(temp);
for (ALPHA_INT ndiag = 0; ndiag < main_diag_pos; ndiag++)
{
if (-A->distance[ndiag] <= r)
{
ALPHA_INT ac = r + A->distance[ndiag];
alpha_madde(temp, A->values[ndiag * A->lval + r], y[ac * ldy + out_y_col]);
}
}
ALPHA_Number t;
alpha_setzero(t);
alpha_mul(t, alpha, x[r * ldx + out_y_col]);
alpha_sub(t, t, temp);
alpha_div(y[r * ldy + out_y_col], t, diag[r]);
}
}
return ALPHA_SPARSE_STATUS_SUCCESS;
}
|
GB_binop__land_fp32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__land_fp32
// A.*B function (eWiseMult): GB_AemultB__land_fp32
// A*D function (colscale): GB_AxD__land_fp32
// D*A function (rowscale): GB_DxB__land_fp32
// C+=B function (dense accum): GB_Cdense_accumB__land_fp32
// C+=b function (dense accum): GB_Cdense_accumb__land_fp32
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__land_fp32
// C=scalar+B GB_bind1st__land_fp32
// C=scalar+B' GB_bind1st_tran__land_fp32
// C=A+scalar GB_bind2nd__land_fp32
// C=A'+scalar GB_bind2nd_tran__land_fp32
// C type: float
// A type: float
// B,b type: float
// BinaryOp: cij = ((aij != 0) && (bij != 0))
#define GB_ATYPE \
float
#define GB_BTYPE \
float
#define GB_CTYPE \
float
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
float bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
float t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = ((x != 0) && (y != 0)) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LAND || GxB_NO_FP32 || GxB_NO_LAND_FP32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__land_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__land_fp32
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__land_fp32
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type float
float bwork = (*((float *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__land_fp32
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *GB_RESTRICT Cx = (float *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__land_fp32
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *GB_RESTRICT Cx = (float *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__land_fp32
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__land_fp32
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__land_fp32
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *Cx = (float *) Cx_output ;
float x = (*((float *) x_input)) ;
float *Bx = (float *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
float bij = Bx [p] ;
Cx [p] = ((x != 0) && (bij != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__land_fp32
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
float *Cx = (float *) Cx_output ;
float *Ax = (float *) Ax_input ;
float y = (*((float *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
float aij = Ax [p] ;
Cx [p] = ((aij != 0) && (y != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = Ax [pA] ; \
Cx [pC] = ((x != 0) && (aij != 0)) ; \
}
GrB_Info GB_bind1st_tran__land_fp32
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
float
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float x = (*((const float *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
float
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = Ax [pA] ; \
Cx [pC] = ((aij != 0) && (y != 0)) ; \
}
GrB_Info GB_bind2nd_tran__land_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float y = (*((const float *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_AxB_saxpy3_slice_balanced.c | //------------------------------------------------------------------------------
// GB_AxB_saxpy3_slice_balanced: construct balanced tasks for GB_AxB_saxpy3
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If the mask is present but must be discarded, this function returns
// GrB_NO_VALUE, to indicate that the analysis was terminated early.
#include "GB_AxB_saxpy3.h"
// control parameters for generating parallel tasks
#define GB_NTASKS_PER_THREAD 2
#define GB_COSTLY 1.2
#define GB_FINE_WORK 2
#define GB_MWORK_ALPHA 0.01
#define GB_MWORK_BETA 0.10
#define GB_FREE_WORK \
{ \
GB_WERK_POP (Fine_fl, int64_t) ; \
GB_WERK_POP (Fine_slice, int64_t) ; \
GB_WERK_POP (Coarse_Work, int64_t) ; \
GB_WERK_POP (Coarse_initial, int64_t) ; \
}
#define GB_FREE_ALL \
{ \
GB_FREE_WORK ; \
GB_FREE_WERK (&SaxpyTasks, SaxpyTasks_size) ; \
}
//------------------------------------------------------------------------------
// GB_hash_table_size
//------------------------------------------------------------------------------
// flmax is the max flop count for computing A*B(:,j), for any vector j that
// this task computes. If the mask M is present, flmax also includes the
// number of entries in M(:,j). GB_hash_table_size determines the hash table
// size for this task, which is twice the smallest power of 2 larger than
// flmax. If flmax is large enough, the hash_size is returned as cvlen, so
// that Gustavson's method will be used instead of the Hash method.
// By default, Gustavson vs Hash is selected automatically. AxB_method can be
// selected via the descriptor or a global setting, as the non-default
// GxB_AxB_GUSTAVSON or GxB_AxB_HASH settings, to enforce the selection of
// either of those methods. However, if Hash is selected but the hash table
// equals or exceeds cvlen, then Gustavson's method is used instead.
static inline int64_t GB_hash_table_size
(
int64_t flmax, // max flop count for any vector computed by this task
int64_t cvlen, // vector length of C
const GrB_Desc_Value AxB_method // Default, Gustavson, or Hash
)
{
int64_t hash_size ;
if (AxB_method == GxB_AxB_GUSTAVSON || flmax >= cvlen/2)
{
//----------------------------------------------------------------------
// use Gustavson if selected explicitly or if flmax is large
//----------------------------------------------------------------------
hash_size = cvlen ;
}
else
{
//----------------------------------------------------------------------
// flmax is small; consider hash vs Gustavson
//----------------------------------------------------------------------
// hash_size = 2 * (smallest power of 2 >= flmax)
hash_size = ((uint64_t) 2) << (GB_FLOOR_LOG2 (flmax) + 1) ;
bool use_Gustavson ;
if (AxB_method == GxB_AxB_HASH)
{
// always use Hash method, unless the hash_size >= cvlen
use_Gustavson = (hash_size >= cvlen) ;
}
else
{
// default: auto selection:
// use Gustavson's method if hash_size is too big
use_Gustavson = (hash_size >= cvlen/12) ;
}
if (use_Gustavson)
{
hash_size = cvlen ;
}
}
//--------------------------------------------------------------------------
// return result
//--------------------------------------------------------------------------
return (hash_size) ;
}
//------------------------------------------------------------------------------
// GB_create_coarse_task: create a single coarse task
//------------------------------------------------------------------------------
// Compute the max flop count for any vector in a coarse task, determine the
// hash table size, and construct the coarse task.
static inline void GB_create_coarse_task
(
int64_t kfirst, // coarse task consists of vectors kfirst:klast
int64_t klast,
GB_saxpy3task_struct *SaxpyTasks,
int taskid, // taskid for this coarse task
int64_t *Bflops, // size bnvec; cum sum of flop counts for vectors of B
int64_t cvlen, // vector length of B and C
double chunk,
int nthreads_max,
int64_t *Coarse_Work, // workspace for parallel reduction for flop count
const GrB_Desc_Value AxB_method // Default, Gustavson, or Hash
)
{
//--------------------------------------------------------------------------
// find the max # of flops for any vector in this task
//--------------------------------------------------------------------------
int64_t nk = klast - kfirst + 1 ;
int nth = GB_nthreads (nk, chunk, nthreads_max) ;
// each thread finds the max flop count for a subset of the vectors
int tid ;
#pragma omp parallel for num_threads(nth) schedule(static)
for (tid = 0 ; tid < nth ; tid++)
{
int64_t my_flmax = 1, istart, iend ;
GB_PARTITION (istart, iend, nk, tid, nth) ;
for (int64_t i = istart ; i < iend ; i++)
{
int64_t kk = kfirst + i ;
int64_t fl = Bflops [kk+1] - Bflops [kk] ;
my_flmax = GB_IMAX (my_flmax, fl) ;
}
Coarse_Work [tid] = my_flmax ;
}
// combine results from each thread
int64_t flmax = 1 ;
for (tid = 0 ; tid < nth ; tid++)
{
flmax = GB_IMAX (flmax, Coarse_Work [tid]) ;
}
// check the parallel computation
#ifdef GB_DEBUG
int64_t flmax2 = 1 ;
for (int64_t kk = kfirst ; kk <= klast ; kk++)
{
int64_t fl = Bflops [kk+1] - Bflops [kk] ;
flmax2 = GB_IMAX (flmax2, fl) ;
}
ASSERT (flmax == flmax2) ;
#endif
//--------------------------------------------------------------------------
// define the coarse task
//--------------------------------------------------------------------------
SaxpyTasks [taskid].start = kfirst ;
SaxpyTasks [taskid].end = klast ;
SaxpyTasks [taskid].vector = -1 ;
SaxpyTasks [taskid].hsize = GB_hash_table_size (flmax, cvlen, AxB_method) ;
SaxpyTasks [taskid].Hi = NULL ; // assigned later
SaxpyTasks [taskid].Hf = NULL ; // assigned later
SaxpyTasks [taskid].Hx = NULL ; // assigned later
SaxpyTasks [taskid].my_cjnz = 0 ; // for fine tasks only
SaxpyTasks [taskid].leader = taskid ;
SaxpyTasks [taskid].team_size = 1 ;
}
//------------------------------------------------------------------------------
// GB_AxB_saxpy3_slice_balanced: create balanced tasks for saxpy3
//------------------------------------------------------------------------------
GrB_Info GB_AxB_saxpy3_slice_balanced
(
// inputs
GrB_Matrix C, // output matrix
const GrB_Matrix M, // optional mask matrix
const bool Mask_comp, // if true, use !M
const GrB_Matrix A, // input matrix A
const GrB_Matrix B, // input matrix B
GrB_Desc_Value AxB_method, // Default, Gustavson, or Hash
// outputs
GB_saxpy3task_struct **SaxpyTasks_handle,
size_t *SaxpyTasks_size_handle,
bool *apply_mask, // if true, apply M during sapxy3
bool *M_packed_in_place, // if true, use M in-place
int *ntasks, // # of tasks created (coarse and fine)
int *nfine, // # of fine tasks created
int *nthreads, // # of threads to use
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
GrB_Info info ;
(*apply_mask) = false ;
(*M_packed_in_place) = false ;
(*ntasks) = 0 ;
(*nfine) = 0 ;
(*nthreads) = 0 ;
ASSERT_MATRIX_OK_OR_NULL (M, "M for saxpy3_slice_balanced A*B", GB0) ;
ASSERT (!GB_PENDING (M)) ;
ASSERT (GB_JUMBLED_OK (M)) ;
ASSERT (!GB_ZOMBIES (M)) ;
ASSERT_MATRIX_OK (A, "A for saxpy3_slice_balanced A*B", GB0) ;
ASSERT (!GB_PENDING (A)) ;
ASSERT (GB_JUMBLED_OK (A)) ;
ASSERT (!GB_ZOMBIES (A)) ;
ASSERT_MATRIX_OK (B, "B for saxpy3_slice_balanced A*B", GB0) ;
ASSERT (!GB_PENDING (B)) ;
ASSERT (GB_JUMBLED_OK (B)) ;
ASSERT (!GB_ZOMBIES (B)) ;
//--------------------------------------------------------------------------
// determine the # of threads to use
//--------------------------------------------------------------------------
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
//--------------------------------------------------------------------------
// define result and workspace
//--------------------------------------------------------------------------
GB_saxpy3task_struct *restrict SaxpyTasks = NULL ;
size_t SaxpyTasks_size = 0 ;
GB_WERK_DECLARE (Coarse_initial, int64_t) ; // initial coarse tasks
GB_WERK_DECLARE (Coarse_Work, int64_t) ; // workspace for flop counts
GB_WERK_DECLARE (Fine_slice, int64_t) ;
GB_WERK_DECLARE (Fine_fl, int64_t) ; // size max(nnz(B(:,j)))
//--------------------------------------------------------------------------
// get A, and B
//--------------------------------------------------------------------------
const int64_t *restrict Ap = A->p ;
const int64_t *restrict Ah = A->h ;
const int64_t avlen = A->vlen ;
const int64_t anvec = A->nvec ;
const bool A_is_hyper = GB_IS_HYPERSPARSE (A) ;
const int64_t *restrict Bp = B->p ;
const int64_t *restrict Bh = B->h ;
const int8_t *restrict Bb = B->b ;
const int64_t *restrict Bi = B->i ;
const int64_t bvdim = B->vdim ;
const int64_t bnz = GB_NNZ_HELD (B) ;
const int64_t bnvec = B->nvec ;
const int64_t bvlen = B->vlen ;
const bool B_is_hyper = GB_IS_HYPERSPARSE (B) ;
int64_t cvlen = avlen ;
int64_t cvdim = bvdim ;
//--------------------------------------------------------------------------
// compute flop counts for each vector of B and C
//--------------------------------------------------------------------------
int64_t Mwork = 0 ;
int64_t *restrict Bflops = C->p ; // use C->p as workspace for Bflops
GB_OK (GB_AxB_saxpy3_flopcount (&Mwork, Bflops, M, Mask_comp, A, B,
Context)) ;
int64_t total_flops = Bflops [bnvec] ;
double axbflops = total_flops - Mwork ;
GBURBLE ("axbwork %g ", axbflops) ;
if (Mwork > 0) GBURBLE ("mwork %g ", (double) Mwork) ;
//--------------------------------------------------------------------------
// determine if the mask M should be applied, or done later
//--------------------------------------------------------------------------
if (M == NULL)
{
//----------------------------------------------------------------------
// M is not present
//----------------------------------------------------------------------
(*apply_mask) = false ;
}
else if (GB_is_packed (M))
{
//----------------------------------------------------------------------
// M is present and full, bitmap, or sparse/hyper with all entries
//----------------------------------------------------------------------
// Choose all-hash or all-Gustavson tasks, and apply M during saxpy3.
(*apply_mask) = true ;
// The work for M has not yet been added Bflops.
// Each vector M(:,j) has cvlen entries.
Mwork = cvlen * cvdim ;
if (!(AxB_method == GxB_AxB_HASH || AxB_method == GxB_AxB_GUSTAVSON))
{
if (axbflops < (double) Mwork * GB_MWORK_BETA)
{
// The mask is too costly to scatter into the Hf workspace.
// Leave it in place and use all-hash tasks.
AxB_method = GxB_AxB_HASH ;
}
else
{
// Scatter M into Hf and use all-Gustavson tasks.
AxB_method = GxB_AxB_GUSTAVSON ;
}
}
if (AxB_method == GxB_AxB_HASH)
{
// Use the hash method for all tasks (except for those tasks which
// require a hash table size >= cvlen; those tasks use Gustavson).
// Do not scatter the mask into the Hf hash workspace. The work
// for the mask is not accounted for in Bflops, so the hash tables
// can be small.
(*M_packed_in_place) = true ;
GBURBLE ("(use packed mask in-place) ") ;
}
else
{
// Use the Gustavson method for all tasks, and scatter M into the
// fine Gustavson workspace. The work for M is not yet in the
// Bflops cumulative sum. Add it now.
ASSERT (AxB_method == GxB_AxB_GUSTAVSON)
int nth = GB_nthreads (bnvec, chunk, nthreads_max) ;
int64_t kk ;
#pragma omp parallel for num_threads(nth) schedule(static)
for (kk = 0 ; kk <= bnvec ; kk++)
{
Bflops [kk] += cvlen * (kk+1) ;
}
total_flops = Bflops [bnvec] ;
GBURBLE ("(use packed mask) ") ;
}
}
else if (axbflops < ((double) Mwork * GB_MWORK_ALPHA))
{
//----------------------------------------------------------------------
// M is costly to use; apply it after C=A*B
//----------------------------------------------------------------------
// Do not use M during the computation of A*B. Instead, compute C=A*B
// and then apply the mask later. Tell the caller that the mask should
// not be applied, so that it will be applied later in GB_mxm.
(*apply_mask) = false ;
GBURBLE ("(discard mask) ") ;
GB_FREE_ALL ;
return (GrB_NO_VALUE) ;
}
else
{
//----------------------------------------------------------------------
// use M during saxpy3
//----------------------------------------------------------------------
(*apply_mask) = true ;
GBURBLE ("(use mask) ") ;
}
//--------------------------------------------------------------------------
// determine # of threads and # of initial coarse tasks
//--------------------------------------------------------------------------
(*nthreads) = GB_nthreads ((double) total_flops, chunk, nthreads_max) ;
int ntasks_initial = ((*nthreads) == 1) ? 1 :
(GB_NTASKS_PER_THREAD * (*nthreads)) ;
//--------------------------------------------------------------------------
// give preference to Gustavson when using few threads
//--------------------------------------------------------------------------
if ((*nthreads) <= 8 &&
(!(AxB_method == GxB_AxB_HASH || AxB_method == GxB_AxB_GUSTAVSON)))
{
// Unless a specific method has been explicitly requested, see if
// Gustavson should be used with a small number of threads.
// Matrix-vector has a maximum intensity of 1, so this heuristic only
// applies to GrB_mxm.
double abnz = GB_NNZ (A) + GB_NNZ (B) + 1 ;
double workspace = (double) ntasks_initial * (double) cvlen ;
double intensity = total_flops / abnz ;
GBURBLE ("(intensity: %0.3g workspace/(nnz(A)+nnz(B)): %0.3g",
intensity, workspace / abnz) ;
if (intensity >= 8 && workspace < abnz)
{
// work intensity is large, and Gustvason workspace is modest;
// use Gustavson for all tasks
AxB_method = GxB_AxB_GUSTAVSON ;
GBURBLE (": select Gustvason) ") ;
}
else
{
// use default task creation: mix of Hash and Gustavson
GBURBLE (") ") ;
}
}
//--------------------------------------------------------------------------
// determine target task size
//--------------------------------------------------------------------------
double target_task_size = ((double) total_flops) / ntasks_initial ;
target_task_size = GB_IMAX (target_task_size, chunk) ;
double target_fine_size = target_task_size / GB_FINE_WORK ;
target_fine_size = GB_IMAX (target_fine_size, chunk) ;
//--------------------------------------------------------------------------
// determine # of parallel tasks
//--------------------------------------------------------------------------
int ncoarse = 0 ; // # of coarse tasks
int max_bjnz = 0 ; // max (nnz (B (:,j))) of fine tasks
// FUTURE: also use ultra-fine tasks that compute A(i1:i2,k)*B(k,j)
if (ntasks_initial > 1)
{
//----------------------------------------------------------------------
// construct initial coarse tasks
//----------------------------------------------------------------------
GB_WERK_PUSH (Coarse_initial, ntasks_initial + 1, int64_t) ;
if (Coarse_initial == NULL)
{
// out of memory
GB_FREE_ALL ;
return (GrB_OUT_OF_MEMORY) ;
}
GB_pslice (Coarse_initial, Bflops, bnvec, ntasks_initial, true) ;
//----------------------------------------------------------------------
// split the work into coarse and fine tasks
//----------------------------------------------------------------------
for (int taskid = 0 ; taskid < ntasks_initial ; taskid++)
{
// get the initial coarse task
int64_t kfirst = Coarse_initial [taskid] ;
int64_t klast = Coarse_initial [taskid+1] ;
int64_t task_ncols = klast - kfirst ;
int64_t task_flops = Bflops [klast] - Bflops [kfirst] ;
if (task_ncols == 0)
{
// This coarse task is empty, having been squeezed out by
// costly vectors in adjacent coarse tasks.
}
else if (task_flops > 2 * GB_COSTLY * target_task_size)
{
// This coarse task is too costly, because it contains one or
// more costly vectors. Split its vectors into a mixture of
// coarse and fine tasks.
int64_t kcoarse_start = kfirst ;
for (int64_t kk = kfirst ; kk < klast ; kk++)
{
// jflops = # of flops to compute a single vector A*B(:,j)
// where j == GBH (Bh, kk)
double jflops = Bflops [kk+1] - Bflops [kk] ;
// bjnz = nnz (B (:,j))
int64_t bjnz = (Bp == NULL) ? bvlen : (Bp [kk+1] - Bp [kk]);
if (jflops > GB_COSTLY * target_task_size && bjnz > 1)
{
// A*B(:,j) is costly; split it into 2 or more fine
// tasks. First flush the prior coarse task, if any.
if (kcoarse_start < kk)
{
// vectors kcoarse_start to kk-1 form a single
// coarse task
ncoarse++ ;
}
// next coarse task (if any) starts at kk+1
kcoarse_start = kk+1 ;
// vectors kk will be split into multiple fine tasks
max_bjnz = GB_IMAX (max_bjnz, bjnz) ;
int team_size = ceil (jflops / target_fine_size) ;
(*nfine) += team_size ;
}
}
// flush the last coarse task, if any
if (kcoarse_start < klast)
{
// vectors kcoarse_start to klast-1 form a single
// coarse task
ncoarse++ ;
}
}
else
{
// This coarse task is OK as-is.
ncoarse++ ;
}
}
}
else
{
//----------------------------------------------------------------------
// entire computation in a single fine or coarse task
//----------------------------------------------------------------------
if (bnvec == 1)
{
// If B is a single vector, and is computed by a single thread,
// then a single fine task is used.
(*nfine) = 1 ;
ncoarse = 0 ;
}
else
{
// One thread uses a single coarse task if B is not a vector.
(*nfine) = 0 ;
ncoarse = 1 ;
}
}
(*ntasks) = ncoarse + (*nfine) ;
//--------------------------------------------------------------------------
// allocate the tasks, and workspace to construct fine tasks
//--------------------------------------------------------------------------
SaxpyTasks = GB_MALLOC_WERK ((*ntasks), GB_saxpy3task_struct,
&SaxpyTasks_size) ;
GB_WERK_PUSH (Coarse_Work, nthreads_max, int64_t) ;
if (max_bjnz > 0)
{
// also allocate workspace to construct fine tasks
GB_WERK_PUSH (Fine_slice, (*ntasks)+1, int64_t) ;
// Fine_fl will only fit on the Werk stack if max_bjnz is small,
// but try anyway, in case it fits. It is placed at the top of the
// Werk stack.
GB_WERK_PUSH (Fine_fl, max_bjnz+1, int64_t) ;
}
if (SaxpyTasks == NULL || Coarse_Work == NULL ||
(max_bjnz > 0 && (Fine_slice == NULL || Fine_fl == NULL)))
{
// out of memory
GB_FREE_ALL ;
return (GrB_OUT_OF_MEMORY) ;
}
// clear SaxpyTasks
memset (SaxpyTasks, 0, SaxpyTasks_size) ;
//--------------------------------------------------------------------------
// create the tasks
//--------------------------------------------------------------------------
if (ntasks_initial > 1)
{
//----------------------------------------------------------------------
// create the coarse and fine tasks
//----------------------------------------------------------------------
int nf = 0 ; // fine tasks have task id 0:nfine-1
int nc = (*nfine) ; // coarse task ids are nfine:ntasks-1
for (int taskid = 0 ; taskid < ntasks_initial ; taskid++)
{
// get the initial coarse task
int64_t kfirst = Coarse_initial [taskid] ;
int64_t klast = Coarse_initial [taskid+1] ;
int64_t task_ncols = klast - kfirst ;
int64_t task_flops = Bflops [klast] - Bflops [kfirst] ;
if (task_ncols == 0)
{
// This coarse task is empty, having been squeezed out by
// costly vectors in adjacent coarse tasks.
}
else if (task_flops > 2 * GB_COSTLY * target_task_size)
{
// This coarse task is too costly, because it contains one or
// more costly vectors. Split its vectors into a mixture of
// coarse and fine tasks.
int64_t kcoarse_start = kfirst ;
for (int64_t kk = kfirst ; kk < klast ; kk++)
{
// jflops = # of flops to compute a single vector A*B(:,j)
double jflops = Bflops [kk+1] - Bflops [kk] ;
// bjnz = nnz (B (:,j))
int64_t bjnz = (Bp == NULL) ? bvlen : (Bp [kk+1] - Bp [kk]);
if (jflops > GB_COSTLY * target_task_size && bjnz > 1)
{
// A*B(:,j) is costly; split it into 2 or more fine
// tasks. First flush the prior coarse task, if any.
if (kcoarse_start < kk)
{
// kcoarse_start:kk-1 form a single coarse task
GB_create_coarse_task (kcoarse_start, kk-1,
SaxpyTasks, nc++, Bflops, cvlen, chunk,
nthreads_max, Coarse_Work, AxB_method) ;
}
// next coarse task (if any) starts at kk+1
kcoarse_start = kk+1 ;
// count the work for each entry B(k,j). Do not
// include the work to scan M(:,j), since that will
// be evenly divided between all tasks in this team.
int64_t pB_start = GBP (Bp, kk, bvlen) ;
int nth = GB_nthreads (bjnz, chunk, nthreads_max) ;
int64_t s ;
#pragma omp parallel for num_threads(nth) \
schedule(static)
for (s = 0 ; s < bjnz ; s++)
{
// get B(k,j)
Fine_fl [s] = 1 ;
int64_t pB = pB_start + s ;
if (!GBB (Bb, pB)) continue ;
int64_t k = GBI (Bi, pB, bvlen) ;
// fl = flop count for just A(:,k)*B(k,j)
int64_t pA, pA_end ;
int64_t pleft = 0 ;
GB_lookup (A_is_hyper, Ah, Ap, avlen, &pleft,
anvec-1, k, &pA, &pA_end) ;
int64_t fl = pA_end - pA ;
Fine_fl [s] = fl ;
ASSERT (fl >= 0) ;
}
// cumulative sum of flops to compute A*B(:,j)
GB_cumsum (Fine_fl, bjnz, NULL, nth, Context) ;
// slice B(:,j) into fine tasks
int team_size = ceil (jflops / target_fine_size) ;
ASSERT (Fine_slice != NULL) ;
GB_pslice (Fine_slice, Fine_fl, bjnz, team_size, false);
// shared hash table for all fine tasks for A*B(:,j)
int64_t hsize =
GB_hash_table_size (jflops, cvlen, AxB_method) ;
// construct the fine tasks for C(:,j)=A*B(:,j)
int leader = nf ;
for (int fid = 0 ; fid < team_size ; fid++)
{
int64_t pstart = Fine_slice [fid] ;
int64_t pend = Fine_slice [fid+1] ;
int64_t fl = Fine_fl [pend] - Fine_fl [pstart] ;
SaxpyTasks [nf].start = pB_start + pstart ;
SaxpyTasks [nf].end = pB_start + pend - 1 ;
SaxpyTasks [nf].vector = kk ;
SaxpyTasks [nf].hsize = hsize ;
SaxpyTasks [nf].Hi = NULL ; // assigned later
SaxpyTasks [nf].Hf = NULL ; // assigned later
SaxpyTasks [nf].Hx = NULL ; // assigned later
SaxpyTasks [nf].my_cjnz = 0 ;
SaxpyTasks [nf].leader = leader ;
SaxpyTasks [nf].team_size = team_size ;
nf++ ;
}
}
}
// flush the last coarse task, if any
if (kcoarse_start < klast)
{
// kcoarse_start:klast-1 form a single coarse task
GB_create_coarse_task (kcoarse_start, klast-1, SaxpyTasks,
nc++, Bflops, cvlen, chunk, nthreads_max,
Coarse_Work, AxB_method) ;
}
}
else
{
// This coarse task is OK as-is.
GB_create_coarse_task (kfirst, klast-1, SaxpyTasks,
nc++, Bflops, cvlen, chunk, nthreads_max,
Coarse_Work, AxB_method) ;
}
}
}
else
{
//----------------------------------------------------------------------
// entire computation in a single fine or coarse task
//----------------------------------------------------------------------
// create a single coarse task: hash or Gustavson
GB_create_coarse_task (0, bnvec-1, SaxpyTasks, 0, Bflops, cvlen, 1, 1,
Coarse_Work, AxB_method) ;
if (bnvec == 1)
{
// convert the single coarse task into a single fine task
SaxpyTasks [0].start = 0 ; // first entry in B(:,0)
SaxpyTasks [0].end = bnz - 1 ; // last entry in B(:,0)
SaxpyTasks [0].vector = 0 ;
}
}
//--------------------------------------------------------------------------
// free workspace and return result
//--------------------------------------------------------------------------
GB_FREE_WORK ;
(*SaxpyTasks_handle) = SaxpyTasks ;
(*SaxpyTasks_size_handle) = SaxpyTasks_size ;
return (GrB_SUCCESS) ;
}
|
GB_AxB_saxpy_parallel.c | //------------------------------------------------------------------------------
// GB_AxB_saxpy_parallel: C<M>=A*B, C=A*B
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// Parallel matrix-matrix multiply, A*B with optional mask M, using the saxpy
// method. This method is used by GrB_mxm, GrB_vxm, and GrB_mxv. For both of
// the latter two methods, B on input will be an nrows-by-1 column vxector.
// The strategy is to "slice" (or partition) B, as B = [B0 B1 ... B(t-1)] if
// there are t threads. Then each thread k computes C(k) = A*B(k), and then
// the result is concatenated, as C = [C0 C1 ... C(t-1)].
// Each thread k computes an independent output matrix C(k), doing both its
// analysis and numeric phases.
// This strategy works well for OpenMP, but it could also be written in a
// purely inspector+executor style, like the GB_AxB_dot* methods. Those
// methods do the analysis in parallel, and first determine the size of the
// output matrix C. Then a parallel cumulative sum is computed, and the entire
// output matrix is allocated. Then each task of the the numeric phase
// computes its part of the result C, without the need for any memory
// allocation by individual threads.
// This function, and the matrices C, M, A, and B are all CSR/CSC agnostic.
// For this discussion, suppose they are CSC, with vlen = # of rows, and vdim =
// # of columns.
// A*B is being computed, and the vector dimension of A must be identical to
// the vector length of B (as if both A and B are CSC matrices, and the number
// of columns of A is the same as the number of rows of B).
// The output matrix C = *Chandle has not been allocated, so C is NULL on
// input. The mask M is optional.
// The semiring defines C=A*B. flipxy modifies how the semiring multiply
// operator is applied. If false, then fmult(aik,bkj) is computed. If true,
// then the operands are swapped, and fmult(bkj,aij) is done instead.
// AxB_method selects the method to use:
// GxB_DEFAULT: the method is selected automatically
// GxB_AxB_GUSTAVSON: Gustavson's method for A*B
// GxB_AxB_HEAP: heap method for A*B
// GxB_AxB_HASH: hash method for A*B (FUTURE)
// The dot product method does not use this function.
// AxB_method_used reports the method actually chosen. This is for
// informational purposes only, so if a parallel C=A*B splits the work into
// multiple submatrix multiplications, and uses different methods on each
// submatrix, then AxB_method_used is the method chosen by thread zero.
// FUTURE:: hash-based method, and multi-phase Gustavson and Heap methods,
// which do not do any memory allocations in parallel, but instead use an
// inspector+executur style (like GB_AxB_dot*). This should work better on the
// GPU.
#include "GB_mxm.h"
#include "GB_Sauna.h"
GrB_Info GB_AxB_saxpy_parallel // parallel matrix-matrix multiply
(
GrB_Matrix *Chandle, // output matrix, NULL on input
GrB_Matrix M, // optional mask matrix
const bool Mask_comp, // if true, use !M
const GrB_Matrix A, // input matrix A
const GrB_Matrix B, // input matrix B
const GrB_Semiring semiring, // semiring that defines C=A*B
const bool flipxy, // if true, do z=fmult(b,a) vs fmult(a,b)
const GrB_Desc_Value AxB_method,// for auto vs user selection of methods
GrB_Desc_Value *AxB_method_used,// method selected by thread zero
bool *mask_applied, // if true, mask was applied
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
ASSERT (Chandle != NULL) ; // C = (*Chandle) is NULL
ASSERT (*Chandle == NULL) ;
ASSERT_OK_OR_NULL (GB_check (M, "M for parallel A*B", GB0)) ;
ASSERT_OK (GB_check (A, "A for parallel A*B", GB0)) ;
ASSERT_OK (GB_check (B, "B for parallel A*B", GB0)) ;
ASSERT (!GB_PENDING (M)) ; ASSERT (!GB_ZOMBIES (M)) ;
ASSERT (!GB_PENDING (A)) ; ASSERT (!GB_ZOMBIES (A)) ;
ASSERT (!GB_PENDING (B)) ; ASSERT (!GB_ZOMBIES (B)) ;
ASSERT_OK (GB_check (semiring, "semiring for parallel A*B", GB0)) ;
ASSERT (AxB_method_used != NULL) ;
GrB_Info info ;
//--------------------------------------------------------------------------
// get A and B
//--------------------------------------------------------------------------
if (B->nvec_nonempty < 0)
{
B->nvec_nonempty = GB_nvec_nonempty (B, NULL) ;
}
if (A->nvec_nonempty < 0)
{
A->nvec_nonempty = GB_nvec_nonempty (A, NULL) ;
}
int64_t anz = GB_NNZ (A) ;
int64_t bnvec = B->nvec ;
int64_t bnz = GB_NNZ (B) ;
//--------------------------------------------------------------------------
// determine the number of threads to use
//--------------------------------------------------------------------------
// nthreads may be reduced after the flopcount is computed.
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
int nthreads = GB_nthreads (anz + bnz, chunk, nthreads_max) ;
//==========================================================================
// sequential C<M>=A*B
//==========================================================================
#define GB_FREE_ALL ;
if (nthreads == 1)
{
// select the method
int64_t bjnz_max ;
GB_AxB_select (A, B, semiring, AxB_method, AxB_method_used, &bjnz_max) ;
// acquire a Sauna if Gustavson's method is being used
int Sauna_id = -2 ;
if (*AxB_method_used == GxB_AxB_GUSTAVSON)
{
GB_OK (GB_Sauna_acquire (1, &Sauna_id, AxB_method_used, Context)) ;
}
// C<M>=A*B
GrB_Info info1 = GB_AxB_saxpy_sequential (Chandle, M, Mask_comp, A, B,
semiring, flipxy, *AxB_method_used, bjnz_max, true, mask_applied,
Sauna_id) ;
// release the Sauna for Gustavson's method
if (*AxB_method_used == GxB_AxB_GUSTAVSON)
{
// info is reset, so info1 is used above
GB_OK (GB_Sauna_release (1, &Sauna_id)) ;
}
return ((info1 == GrB_OUT_OF_MEMORY) ? GB_OUT_OF_MEMORY : info1) ;
}
//==========================================================================
// parallel C<M>=A*B
//==========================================================================
// The # of threads may be reduced, if the problem small, even to
// nthreads=1. But so far, for now, nthreads > 1.
ASSERT (nthreads > 1) ;
//--------------------------------------------------------------------------
// count the flops and determine # of threads to use
//--------------------------------------------------------------------------
int64_t total_flops ;
bool fine_slice = (nthreads > bnvec) ;
int64_t *restrict Bflops = NULL ;
int64_t *restrict Bflops_per_entry = NULL ;
if (!fine_slice)
{
//----------------------------------------------------------------------
// slice B by flops
//----------------------------------------------------------------------
// Slice B so that each slice has a balanced amount of flops, to
// compute its slice of C. Each thread gets enough columns of B so
// that it has roughly total_flops / nthreads work to do. Individual
// columns are not sliced, so the final step to compute C is a
// concatenation, not as summation. This should give a very good load
// balance where there are enough columns of B, but at the cost of a
// more expensive symbolic analysis, taking O(bnz) time. The analysis
// is itself fully parallel, however. This method cannot parallelize
// A*B when B is a single column (GrB_mxv or GrB_vxm).
// thread tid will do columns Slice [tid] to Slice [tid+1]-1
// note that Bflops is initialized to zero
GB_CALLOC_MEMORY (Bflops, bnvec+1, sizeof (int64_t)) ;
if (Bflops == NULL)
{
// out of memory
return (GB_OUT_OF_MEMORY) ;
}
// Bflops [k] = # of flops to compute A*B(:,j) where j is the kth
// vector in B
GB_AxB_flopcount (Bflops, NULL, (Mask_comp) ? NULL : M, A, B, 0,
Context) ;
// reduce # of threads, based on flop count and the chunk size
total_flops = Bflops [bnvec] ;
}
else
{
//----------------------------------------------------------------------
// fine slice of B by flops (split columns of B)
//----------------------------------------------------------------------
// Slice B so that each slice has nearly exactly balanced amount of
// flops to compute its slice of C. Each thread gets exactly the
// number of entries so that it does total_flops/nthreads work (rounded
// to the nearest number of entries in B).
// note that Bflops_per_entry is initialized to zero
GB_CALLOC_MEMORY (Bflops_per_entry, bnz+1, sizeof (int64_t)) ;
if (Bflops_per_entry == NULL)
{
// out of memory
return (GB_OUT_OF_MEMORY) ;
}
// Bflops_per_entry [p] = # of flops to compute A(:,k)*B(k,j)
// where B(k,j) is in Bi [p] and Bx [p].
GB_AxB_flopcount (NULL, Bflops_per_entry, (Mask_comp) ? NULL : M,
A, B, 0, Context) ;
// reduce # of threads, based on flop count and the chunk size
total_flops = Bflops_per_entry [bnz] ;
}
//--------------------------------------------------------------------------
// find the size of each slice
//--------------------------------------------------------------------------
nthreads = GB_nthreads (total_flops, chunk, nthreads_max) ;
int64_t Slice [nthreads+1] ;
Slice [0] = 0 ;
if (!fine_slice)
{
// slice B by the flops needed for each vector
GB_pslice (Slice, Bflops, bnvec, nthreads) ;
GB_FREE_MEMORY (Bflops, bnvec+1, sizeof (int64_t)) ;
}
else
{
// slice B by the flops needed for each entry
GB_pslice (Slice, Bflops_per_entry, bnz, nthreads) ;
GB_FREE_MEMORY (Bflops_per_entry, bnz+1, sizeof (int64_t)) ;
}
//--------------------------------------------------------------------------
// discard the mask if it's too costly to use
//--------------------------------------------------------------------------
if (M != NULL && total_flops < GB_NNZ (M))
{
// The mask is too dense; discard it. mask_applied will be false.
M = NULL ;
}
//--------------------------------------------------------------------------
// construct each slice of B
//--------------------------------------------------------------------------
// If the problem is small enough so that nthreads has been reduced to 1,
// B is not sliced.
GrB_Matrix Cslice [nthreads] ;
GrB_Matrix Bslice [nthreads] ;
for (int tid = 0 ; tid < nthreads ; tid++)
{
Cslice [tid] = NULL ;
Bslice [tid] = NULL ;
}
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
for (int tid = 0 ; tid < nthreads ; tid++) \
{ \
GB_MATRIX_FREE (& (Cslice [tid])) ; \
GB_MATRIX_FREE (& (Bslice [tid])) ; \
} \
}
if (nthreads > 1)
{
if (fine_slice)
{
GB_OK (GB_fine_slice (B, nthreads, Slice, Bslice, Context)) ;
}
else
{
GB_OK (GB_slice (B, nthreads, Slice, Bslice, Context)) ;
}
}
//--------------------------------------------------------------------------
// select the method for each slice
//--------------------------------------------------------------------------
GrB_Desc_Value AxB_methods_used [nthreads] ;
int64_t bjnz_max [nthreads] ;
int Sauna_ids [nthreads] ;
bool any_Gustavson = false ;
#pragma omp parallel for num_threads(nthreads) schedule(static,1) \
reduction(||:any_Gustavson)
for (int tid = 0 ; tid < nthreads ; tid++)
{
GrB_Desc_Value thread_method_to_use ;
GB_AxB_select (A, (nthreads == 1) ? B : Bslice [tid], semiring,
AxB_method, &thread_method_to_use, &(bjnz_max [tid])) ;
AxB_methods_used [tid] = thread_method_to_use ;
// collect all thread-specific info
any_Gustavson = any_Gustavson ||
(thread_method_to_use == GxB_AxB_GUSTAVSON) ;
}
(*AxB_method_used) = AxB_methods_used [0] ;
//--------------------------------------------------------------------------
// acquire the Saunas for each thread that needs it
//--------------------------------------------------------------------------
if (any_Gustavson)
{
// at least one thread needs a Sauna
GB_OK (GB_Sauna_acquire (nthreads, Sauna_ids, AxB_methods_used,
Context)) ;
}
else
{
// no thread needs a Sauna
for (int tid = 0 ; tid < nthreads ; tid++)
{
Sauna_ids [tid] = -2 ;
}
}
//--------------------------------------------------------------------------
// compute each slice of C = A*B with optional mask M
//--------------------------------------------------------------------------
// This is the only parallel region in which each thread allocates memory.
// The memory space is not known until the thread determines the size of
// its own output, in its analysis phase. Note the "reduction(&&:ok)"
// clause. This is the only place where a clause like that apppears in
// SuiteSparse:GraphBLAS. This could be removed if C=A*B were to be
// computed with an inspector+exector style of algorithm.
// B has been "sliced"; in MATLAB notation, B = [B0 B1 B2 ... B(t-1] if
// there are t threads. Then each k thread computes its own Ck = A*Bk,
// and the results are concatenated below, as C = [C0 C1 ... C(t-1)].
// If a 'fine slice' was used for B, then C = C0+C1+...+C(t-1) must be
// computed.
// for all threads in parallel, with no synchronization except for these
// boolean reductions:
bool ok = true ; // false if any thread's malloc or realloc fails
bool panic = false ; // true if any critical section fails
bool allmask = true ; // true if all threads apply the mask
#pragma omp parallel for num_threads(nthreads) schedule(static,1) \
reduction(&&:allmask) reduction(||:panic) \
reduction(&&:ok)
for (int tid = 0 ; tid < nthreads ; tid++)
{
// each thread allocates its output, using malloc and realloc
bool thread_mask_applied = false ;
GrB_Info thread_info = GB_AxB_saxpy_sequential (&(Cslice [tid]), M,
Mask_comp, A, (nthreads == 1) ? B : Bslice [tid], semiring,
flipxy, AxB_methods_used [tid], bjnz_max [tid],
false, &thread_mask_applied, Sauna_ids [tid]) ;
// collect all thread-specific info
ok = ok && (thread_info == GrB_SUCCESS) ;
allmask = allmask && (thread_mask_applied) ;
panic = panic || (thread_info == GrB_PANIC) ;
}
//--------------------------------------------------------------------------
// check error conditions
//--------------------------------------------------------------------------
// panic if a critical section fails
if (panic) return (GrB_PANIC) ;
// check the return info from all the threads
if (!ok)
{
// out of memory
if (any_Gustavson)
{
// at least one thread used a Sauna; free and release all Sauna
// workspaces
for (int tid = 0 ; tid < nthreads ; tid++)
{
int Sauna_id = Sauna_ids [tid] ;
if (Sauna_id >= 0)
{
GB_Sauna_free (Sauna_id) ;
}
}
GB_OK (GB_Sauna_release (nthreads, Sauna_ids)) ;
}
GB_FREE_ALL ;
return (GB_OUT_OF_MEMORY) ;
}
//--------------------------------------------------------------------------
// release the Saunas
//--------------------------------------------------------------------------
if (any_Gustavson)
{
// at least one thread used a Sauna
GB_OK (GB_Sauna_release (nthreads, Sauna_ids)) ;
}
//--------------------------------------------------------------------------
// check if all threads applied the mask
//--------------------------------------------------------------------------
// if all threads applied the mask to their slices, then GB_accum_mask does
// not need to apply it to the concatenated C in GB_AxB_meta. If just some
// of them did, then GB_accum_mask needs to apply the mask again.
(*mask_applied) = allmask ;
//--------------------------------------------------------------------------
// concatenate or sum the slices of C
//--------------------------------------------------------------------------
// Each slice Cslice [tid] has the same dimensions and type as C. C is
// stored by column.
if (nthreads == 1)
{
// one thread, so only one slice: just copy Cslice[0] to C
(*Chandle) = Cslice [0] ;
Cslice [0] = NULL ;
}
else if (fine_slice)
{
// C = sum (Cslice [0..nthreads-1]). Adjacent slices of C can share
// columns, which must be summed. Columns in the middle of each slice
// are concatenated horizontally.
GB_OK (GB_hcat_fine_slice (Chandle, nthreads, Cslice, semiring->add,
Sauna_ids, Context)) ;
}
else
{
// C = [Cslice(0) Cslice(1) ... Cslice(nthreads-1)] concatenatied
// horizontally. Each slice contains entries that appear in a unique
// and contiguous subset of the columns of C.
GB_OK (GB_hcat_slice (Chandle, nthreads, Cslice, Context)) ;
}
//--------------------------------------------------------------------------
// free workspace and return result
//--------------------------------------------------------------------------
GB_FREE_ALL ;
ASSERT_OK (GB_check (*Chandle, "C for parallel A*B", GB0)) ;
return (GrB_SUCCESS) ;
}
|
density.h | // Copyright (c) 2013-2017 Anton Kozhevnikov, Thomas Schulthess
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification, are permitted provided that
// the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the
// following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions
// and the following disclaimer in the documentation and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
// PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/** \file density.h
*
* \brief Contains definition and partial implementation of sirius::Density class.
*/
#ifndef __DENSITY_H__
#define __DENSITY_H__
#include "periodic_function.h"
#include "k_point_set.h"
#include "simulation_context.h"
#include "mixer.h"
#ifdef __GPU
extern "C" void generate_dm_pw_gpu(int num_atoms__,
int num_gvec_loc__,
int num_beta__,
double const* atom_pos__,
int const* gvec__,
double* phase_factors__,
double const* dm__,
double* dm_pw__,
int stream_id__);
extern "C" void sum_q_pw_dm_pw_gpu(int num_gvec_loc__,
int nbf__,
double const* q_pw__,
double const* dm_pw__,
double const* sym_weight__,
double_complex* rho_pw__,
int stream_id__);
extern "C" void update_density_rg_1_gpu(int size__,
double_complex const* psi_rg__,
double wt__,
double* density_rg__);
extern "C" void update_density_rg_2_gpu(int size__,
double_complex const* psi_rg_up__,
double_complex const* psi_rg_dn__,
double wt__,
double* density_x_rg__,
double* density_y_rg__);
#endif
namespace sirius {
/// Generate charge density and magnetization from occupied spinor wave-functions.
/** Let's start from the definition of the complex density matrix:
* \f[
* \rho_{\sigma' \sigma}({\bf r}) =
* \sum_{j{\bf k}} n_{j{\bf k}} \Psi_{j{\bf k}}^{\sigma*}({\bf r}) \Psi_{j{\bf k}}^{\sigma'}({\bf r}) =
* \frac{1}{2} \left( \begin{array}{cc} \rho({\bf r})+m_z({\bf r}) &
* m_x({\bf r})-im_y({\bf r}) \\ m_x({\bf r})+im_y({\bf r}) & \rho({\bf r})-m_z({\bf r}) \end{array} \right)
* \f]
* We notice that the diagonal components of the density matrix are actually real and the off-diagonal components are
* expressed trough two independent functions \f$ m_x({\bf r}) \f$ and \f$ m_y({\bf r}) \f$. Having this in mind we
* will work with a slightly different object, namely a real density matrix, defined as a 1-, 2- or 4-dimensional
* (depending on the number of magnetic components) vector with the following elements:
* - \f$ [ \rho({\bf r}) ] \f$ in case of non-magnetic configuration
* - \f$ [ \rho_{\uparrow \uparrow}({\bf r}), \rho_{\downarrow \downarrow}({\bf r}) ] =
* [ \frac{\rho({\bf r})+m_z({\bf r})}{2}, \frac{\rho({\bf r})-m_z({\bf r})}{2} ] \f$ in case of collinear
* magnetic configuration
* - \f$ [ \rho_{\uparrow \uparrow}({\bf r}), \rho_{\downarrow \downarrow}({\bf r}),
* 2 \Re \rho_{\uparrow \downarrow}({\bf r}), -2 \Im \rho_{\uparrow \downarrow}({\bf r}) ] =
* [ \frac{\rho({\bf r})+m_z({\bf r})}{2}, \frac{\rho({\bf r})-m_z({\bf r})}{2},
* m_x({\bf r}), m_y({\bf r}) ] \f$ in the general case of non-collinear magnetic configuration
*
* At this point it is straightforward to compute the density and magnetization in the interstitial (see add_k_point_contribution_rg()).
* The muffin-tin part of the density and magnetization is obtained in a slighlty more complicated way. Recall the
* expansion of spinor wave-functions inside the muffin-tin \f$ \alpha \f$
* \f[
* \Psi_{j{\bf k}}^{\sigma}({\bf r}) = \sum_{\xi}^{N_{\xi}^{\alpha}} {S_{\xi}^{\sigma j {\bf k},\alpha}}
* f_{\ell_{\xi} \lambda_{\xi}}^{\alpha}(r)Y_{\ell_{\xi}m_{\xi}}(\hat {\bf r})
* \f]
* which we insert into expression for the complex density matrix:
* \f[
* \rho_{\sigma' \sigma}({\bf r}) = \sum_{j{\bf k}} n_{j{\bf k}} \sum_{\xi}^{N_{\xi}^{\alpha}}
* S_{\xi}^{\sigma j {\bf k},\alpha*} f_{\ell_{\xi} \lambda_{\xi}}^{\alpha}(r)
* Y_{\ell_{\xi}m_{\xi}}^{*}(\hat {\bf r}) \sum_{\xi'}^{N_{\xi'}^{\alpha}} S_{\xi'}^{\sigma' j{\bf k},\alpha}
* f_{\ell_{\xi'} \lambda_{\xi'}}^{\alpha}(r)Y_{\ell_{\xi'}m_{\xi'}}(\hat {\bf r})
* \f]
* First, we eliminate a sum over bands and k-points by forming an auxiliary density tensor:
* \f[
* D_{\xi \sigma, \xi' \sigma'}^{\alpha} = \sum_{j{\bf k}} n_{j{\bf k}} S_{\xi}^{\sigma j {\bf k},\alpha*}
* S_{\xi'}^{\sigma' j {\bf k},\alpha}
* \f]
* The expression for complex density matrix simplifies to:
* \f[
* \rho_{\sigma' \sigma}({\bf r}) = \sum_{\xi \xi'} D_{\xi \sigma, \xi' \sigma'}^{\alpha}
* f_{\ell_{\xi} \lambda_{\xi}}^{\alpha}(r)Y_{\ell_{\xi}m_{\xi}}^{*}(\hat {\bf r})
* f_{\ell_{\xi'} \lambda_{\xi'}}^{\alpha}(r)Y_{\ell_{\xi'}m_{\xi'}}(\hat {\bf r})
* \f]
* Now we can switch to the real density matrix and write its' expansion in real spherical harmonics. Let's take
* non-magnetic case as an example:
* \f[
* \rho({\bf r}) = \sum_{\xi \xi'} D_{\xi \xi'}^{\alpha}
* f_{\ell_{\xi} \lambda_{\xi}}^{\alpha}(r)Y_{\ell_{\xi}m_{\xi}}^{*}(\hat {\bf r})
* f_{\ell_{\xi'} \lambda_{\xi'}}^{\alpha}(r)Y_{\ell_{\xi'}m_{\xi'}}(\hat {\bf r}) =
* \sum_{\ell_3 m_3} \rho_{\ell_3 m_3}^{\alpha}(r) R_{\ell_3 m_3}(\hat {\bf r})
* \f]
* where
* \f[
* \rho_{\ell_3 m_3}^{\alpha}(r) = \sum_{\xi \xi'} D_{\xi \xi'}^{\alpha} f_{\ell_{\xi} \lambda_{\xi}}^{\alpha}(r)
* f_{\ell_{\xi'} \lambda_{\xi'}}^{\alpha}(r) \langle Y_{\ell_{\xi}m_{\xi}} | R_{\ell_3 m_3} | Y_{\ell_{\xi'}m_{\xi'}} \rangle
* \f]
* We are almost done. Now it is time to switch to the full index notation \f$ \xi \rightarrow \{ \ell \lambda m \} \f$
* and sum over \a m and \a m' indices:
* \f[
* \rho_{\ell_3 m_3}^{\alpha}(r) = \sum_{\ell \lambda, \ell' \lambda'} f_{\ell \lambda}^{\alpha}(r)
* f_{\ell' \lambda'}^{\alpha}(r) d_{\ell \lambda, \ell' \lambda', \ell_3 m_3}^{\alpha}
* \f]
* where
* \f[
* d_{\ell \lambda, \ell' \lambda', \ell_3 m_3}^{\alpha} =
* \sum_{mm'} D_{\ell \lambda m, \ell' \lambda' m'}^{\alpha}
* \langle Y_{\ell m} | R_{\ell_3 m_3} | Y_{\ell' m'} \rangle
* \f]
* This is our final answer: radial components of density and magnetization are expressed as a linear combination of
* quadratic forms in radial functions.
*
* \note density and potential are allocated as global function because it's easier to load and save them. */
class Density
{
private:
/// Context of the simulation.
Simulation_context& ctx_;
/// Alias to ctx_.unit_cell()
Unit_cell& unit_cell_;
/// Density matrix for all atoms.
mdarray<double_complex, 4> density_matrix_; // TODO: make it local for LAPW
struct paw_density_data_t
{
Atom *atom_{nullptr};
int ia{-1};
/// ae and ps local unified densities+magnetization
std::vector<Spheric_function<spectral, double>> ae_density_;
std::vector<Spheric_function<spectral, double>> ps_density_;
};
std::vector<paw_density_data_t> paw_density_data_;
/// Pointer to charge density.
/** In the case of full-potential calculation this is the full (valence + core) electron charge density.
* In the case of pseudopotential this is the valence charge density. */
std::unique_ptr<Periodic_function<double>> rho_{nullptr};
/// Magnetization.
std::array<std::unique_ptr<Periodic_function<double>>, 3> magnetization_;
/// Alias for density and magnetization.
std::array<Periodic_function<double>*, 4> rho_vec_{{nullptr, nullptr, nullptr, nullptr}};
/// Density and magnetization on the coarse FFT mesh.
/** Coarse FFT grid is enough to generate density and magnetization from the wave-functions. The components
* of the <tt>rho_mag_coarse</tt> vector have the following order:
* \f$ \{\rho({\bf r}), m_z({\bf r}), m_x({\bf r}), m_y({\bf r}) \} \f$. */
std::array<std::unique_ptr<Smooth_periodic_function<double>>, 4> rho_mag_coarse_;
/// Pointer to pseudo core charge density
/** In the case of pseudopotential we need to know the non-linear core correction to the
* exchange-correlation energy which is introduced trough the pseudo core density:
* \f$ E_{xc}[\rho_{val} + \rho_{core}] \f$. The 'pseudo' reflects the fact that
* this density integrated does not reproduce the total number of core elctrons. */
std::unique_ptr<Smooth_periodic_function<double>> rho_pseudo_core_{nullptr};
/// Non-zero Gaunt coefficients.
std::unique_ptr<Gaunt_coefficients<double_complex>> gaunt_coefs_{nullptr};
/// Fast mapping between composite lm index and corresponding orbital quantum number.
mdarray<int, 1> l_by_lm_;
/// High-frequency mixer for the pseudopotential density mixing.
std::unique_ptr<Mixer<double_complex>> hf_mixer_{nullptr};
/// Low-frequency mixer for the pseudopotential density mixing.
std::unique_ptr<Mixer<double_complex>> lf_mixer_{nullptr};
/// Mixer for the full-potential density mixing.
std::unique_ptr<Mixer<double>> mixer_{nullptr};
/// List of local low-fequency G-vectors.
std::vector<int> lf_gvec_;
/// List of local high-fequency G-vectors.
std::vector<int> hf_gvec_;
/// Weights of local low-frequency G-vectors.
std::vector<double> lf_gvec_weights_;
/// Allocate PAW data.
void init_paw();
void generate_paw_atom_density(paw_density_data_t &pdd);
/// Initialize \rho_{ij} - density matrix, occupation on basis of beta-projectors (used for PAW).
void init_density_matrix_for_paw();
/// Reduce complex density matrix over magnetic quantum numbers
/** The following operation is performed:
* \f[
* n_{\ell \lambda, \ell' \lambda', \ell_3 m_3}^{\alpha} =
* \sum_{mm'} D_{\ell \lambda m, \ell' \lambda' m'}^{\alpha}
* \langle Y_{\ell m} | R_{\ell_3 m_3} | Y_{\ell' m'} \rangle
* \f]
*/
template <int num_mag_dims>
void reduce_density_matrix(Atom_type const& atom_type__,
int ia__,
mdarray<double_complex, 4> const& zdens__,
Gaunt_coefficients<double_complex> const& gaunt_coeffs__,
mdarray<double, 3>& mt_density_matrix__)
{
mt_density_matrix__.zero();
#pragma omp parallel for default(shared)
for (int idxrf2 = 0; idxrf2 < atom_type__.mt_radial_basis_size(); idxrf2++) {
int l2 = atom_type__.indexr(idxrf2).l;
for (int idxrf1 = 0; idxrf1 <= idxrf2; idxrf1++) {
int offs = idxrf2 * (idxrf2 + 1) / 2 + idxrf1;
int l1 = atom_type__.indexr(idxrf1).l;
int xi2 = atom_type__.indexb().index_by_idxrf(idxrf2);
for (int lm2 = Utils::lm_by_l_m(l2, -l2); lm2 <= Utils::lm_by_l_m(l2, l2); lm2++, xi2++) {
int xi1 = atom_type__.indexb().index_by_idxrf(idxrf1);
for (int lm1 = Utils::lm_by_l_m(l1, -l1); lm1 <= Utils::lm_by_l_m(l1, l1); lm1++, xi1++) {
for (int k = 0; k < gaunt_coeffs__.num_gaunt(lm1, lm2); k++) {
int lm3 = gaunt_coeffs__.gaunt(lm1, lm2, k).lm3;
auto gc = gaunt_coeffs__.gaunt(lm1, lm2, k).coef;
switch (num_mag_dims) {
case 3: {
mt_density_matrix__(lm3, offs, 2) += 2.0 * std::real(zdens__(xi1, xi2, 2, ia__) * gc);
mt_density_matrix__(lm3, offs, 3) -= 2.0 * std::imag(zdens__(xi1, xi2, 2, ia__) * gc);
}
case 1: {
mt_density_matrix__(lm3, offs, 1) += std::real(zdens__(xi1, xi2, 1, ia__) * gc);
}
case 0: {
mt_density_matrix__(lm3, offs, 0) += std::real(zdens__(xi1, xi2, 0, ia__) * gc);
}
}
}
}
}
}
}
}
/// Add k-point contribution to the density matrix in the canonical form.
/** In case of full-potential LAPW complex density matrix has the following expression:
* \f[
* d_{\xi \sigma, \xi' \sigma'}^{\alpha} = \sum_{j{\bf k}} n_{j{\bf k}}
* S_{\xi}^{\sigma j {\bf k},\alpha*} S_{\xi'}^{\sigma' j {\bf k},\alpha}
* \f]
*
* where \f$ S_{\xi}^{\sigma j {\bf k},\alpha} \f$ are the expansion coefficients of
* spinor wave functions inside muffin-tin spheres.
*
* In case of LDA+U the occupation matrix is also computed. It has the following expression:
* \f[
* n_{\ell,mm'}^{\sigma \sigma'} = \sum_{i {\bf k}}^{occ} \int_{0}^{R_{MT}} r^2 dr
* \Psi_{\ell m}^{i{\bf k}\sigma *}({\bf r}) \Psi_{\ell m'}^{i{\bf k}\sigma'}({\bf r})
* \f]
*
* In case of ultrasoft pseudopotential the following density matrix has to be computed for each atom:
* \f[
* d_{\xi \xi'}^{\alpha} = \langle \beta_{\xi}^{\alpha} | \hat N | \beta_{\xi'}^{\alpha} \rangle =
* \sum_{j {\bf k}} \langle \beta_{\xi}^{\alpha} | \Psi_{j{\bf k}} \rangle n_{j{\bf k}}
* \langle \Psi_{j{\bf k}} | \beta_{\xi'}^{\alpha} \rangle
* \f]
* Here \f$ \hat N = \sum_{j{\bf k}} | \Psi_{j{\bf k}} \rangle n_{j{\bf k}} \langle \Psi_{j{\bf k}} | \f$ is
* the occupancy operator written in spectral representation. */
template <typename T>
inline void add_k_point_contribution_dm(K_point* kp__,
mdarray<double_complex, 4>& density_matrix__);
/// Add k-point contribution to the density and magnetization defined on the regular FFT grid.
inline void add_k_point_contribution_rg(K_point* kp__);
/// Generate valence density in the muffin-tins
void generate_valence_mt(K_point_set& ks);
/// Generate charge density of core states
void generate_core_charge_density()
{
PROFILE("sirius::Density::generate_core_charge_density");
for (int icloc = 0; icloc < unit_cell_.spl_num_atom_symmetry_classes().local_size(); icloc++) {
int ic = unit_cell_.spl_num_atom_symmetry_classes(icloc);
unit_cell_.atom_symmetry_class(ic).generate_core_charge_density(ctx_.core_relativity());
}
for (int ic = 0; ic < unit_cell_.num_atom_symmetry_classes(); ic++) {
int rank = unit_cell_.spl_num_atom_symmetry_classes().local_rank(ic);
unit_cell_.atom_symmetry_class(ic).sync_core_charge_density(ctx_.comm(), rank);
}
}
void generate_pseudo_core_charge_density()
{
PROFILE("sirius::Density::generate_pseudo_core_charge_density");
auto ri = Radial_integrals_rho_core_pseudo<false>(ctx_.unit_cell(), ctx_.pw_cutoff(), ctx_.settings().nprii_rho_core_);
auto v = ctx_.make_periodic_function<index_domain_t::local>([&ri](int iat, double g)
{
return ri.value<int>(iat, g);
});
std::copy(v.begin(), v.end(), &rho_pseudo_core_->f_pw_local(0));
rho_pseudo_core_->fft_transform(1);
}
public:
/// Constructor
Density(Simulation_context& ctx__)
: ctx_(ctx__)
, unit_cell_(ctx_.unit_cell())
{
/* allocate charge density */
rho_ = std::unique_ptr<Periodic_function<double>>(new Periodic_function<double>(ctx_, ctx_.lmmax_rho()));
rho_vec_[0] = rho_.get();
/* allocate magnetization density */
for (int i = 0; i < ctx_.num_mag_dims(); i++) {
magnetization_[i] = std::unique_ptr<Periodic_function<double>>(new Periodic_function<double>(ctx_, ctx_.lmmax_rho()));
rho_vec_[i + 1] = magnetization_[i].get();
}
/* allocate charge density and magnetization on a coarse grid */
for (int i = 0; i < ctx_.num_mag_dims() + 1; i++) {
rho_mag_coarse_[i] = std::unique_ptr<Smooth_periodic_function<double>>(new Smooth_periodic_function<double>(ctx_.fft_coarse(), ctx_.gvec_coarse_partition()));
}
/* core density of the pseudopotential method */
if (!ctx_.full_potential()) {
rho_pseudo_core_ = std::unique_ptr<Smooth_periodic_function<double>>(new Smooth_periodic_function<double>(ctx_.fft(), ctx_.gvec_partition()));
rho_pseudo_core_->zero();
bool is_empty{true};
for (int iat = 0; iat < unit_cell_.num_atom_types(); iat++) {
is_empty &= unit_cell_.atom_type(iat).ps_core_charge_density().empty();
}
if (!is_empty) {
generate_pseudo_core_charge_density();
}
}
if (ctx_.full_potential()) {
using gc_z = Gaunt_coefficients<double_complex>;
gaunt_coefs_ = std::unique_ptr<gc_z>(new gc_z(ctx_.lmax_apw(), ctx_.lmax_rho(), ctx_.lmax_apw(), SHT::gaunt_hybrid));
}
l_by_lm_ = Utils::l_by_lm(ctx_.lmax_rho());
density_matrix_ = mdarray<double_complex, 4>(unit_cell_.max_mt_basis_size(), unit_cell_.max_mt_basis_size(),
ctx_.num_mag_comp(), unit_cell_.num_atoms());
density_matrix_.zero();
/* split local G-vectors to low-frequency and high-frequency */
for (int igloc = 0; igloc < ctx_.gvec().count(); igloc++) {
int ig = ctx_.gvec().offset() + igloc;
auto gv = ctx_.gvec().gvec_cart(ig);
if (gv.length() <= 2 * ctx_.gk_cutoff()) {
lf_gvec_.push_back(igloc);
if (ig) {
lf_gvec_weights_.push_back(fourpi * unit_cell_.omega() / std::pow(gv.length(), 2));
} else {
lf_gvec_weights_.push_back(0);
}
} else {
hf_gvec_.push_back(igloc);
}
}
}
/// Set pointers to muffin-tin and interstitial charge density arrays
void set_charge_density_ptr(double* rhomt, double* rhorg)
{
if (ctx_.full_potential() && rhomt) {
rho_->set_mt_ptr(rhomt);
}
if (rhorg) {
rho_->set_rg_ptr(rhorg);
}
}
/// Set pointers to muffin-tin and interstitial magnetization arrays
void set_magnetization_ptr(double* magmt, double* magir)
{
if (ctx_.num_mag_dims() == 0) {
return;
}
assert(ctx_.num_spins() == 2);
// set temporary array wrapper
mdarray<double, 4> magmt_tmp(magmt, ctx_.lmmax_rho(), unit_cell_.max_num_mt_points(),
unit_cell_.num_atoms(), ctx_.num_mag_dims());
mdarray<double, 2> magir_tmp(magir, ctx_.fft().size(), ctx_.num_mag_dims());
if (ctx_.num_mag_dims() == 1) {
/* z component is the first and only one */
if (magmt) {
magnetization_[0]->set_mt_ptr(&magmt_tmp(0, 0, 0, 0));
}
if (magir) {
magnetization_[0]->set_rg_ptr(&magir_tmp(0, 0));
}
}
if (ctx_.num_mag_dims() == 3) {
if (magmt) {
/* z component is the first */
magnetization_[0]->set_mt_ptr(&magmt_tmp(0, 0, 0, 2));
/* x component is the second */
magnetization_[1]->set_mt_ptr(&magmt_tmp(0, 0, 0, 0));
/* y component is the third */
magnetization_[2]->set_mt_ptr(&magmt_tmp(0, 0, 0, 1));
}
if (magir) {
/* z component is the first */
magnetization_[0]->set_rg_ptr(&magir_tmp(0, 2));
/* x component is the second */
magnetization_[1]->set_rg_ptr(&magir_tmp(0, 0));
/* y component is the third */
magnetization_[2]->set_rg_ptr(&magir_tmp(0, 1));
}
}
}
/// Zero density and magnetization
void zero()
{
rho_->zero();
for (int i = 0; i < ctx_.num_mag_dims(); i++) {
magnetization_[i]->zero();
}
}
/// Find the total leakage of the core states out of the muffin-tins
double core_leakage()
{
double sum = 0.0;
for (int ic = 0; ic < unit_cell_.num_atom_symmetry_classes(); ic++) {
sum += core_leakage(ic) * unit_cell_.atom_symmetry_class(ic).num_atoms();
}
return sum;
}
/// Return core leakage for a specific atom symmetry class
double core_leakage(int ic)
{
return unit_cell_.atom_symmetry_class(ic).core_leakage();
}
/// Generate initial charge density and magnetization
void initial_density();
void initial_density_pseudo();
void initial_density_full_pot();
/// Check total density for the correct number of electrons.
inline void check_num_electrons()
{
double nel{0};
if (ctx_.full_potential()) {
std::vector<double> nel_mt;
double nel_it;
nel = rho_->integrate(nel_mt, nel_it);
} else {
nel = rho_->f_0().real() * unit_cell_.omega();
}
/* check the number of electrons */
if (std::abs(nel - unit_cell_.num_electrons()) > 1e-5 && ctx_.comm().rank() == 0) {
std::stringstream s;
s << "wrong number of electrons" << std::endl
<< " obtained value : " << nel << std::endl
<< " target value : " << unit_cell_.num_electrons() << std::endl
<< " difference : " << std::abs(nel - unit_cell_.num_electrons()) << std::endl;
if (ctx_.full_potential()) {
s << " total core leakage : " << core_leakage();
for (int ic = 0; ic < unit_cell_.num_atom_symmetry_classes(); ic++) {
s << std::endl << " atom class : " << ic << ", core leakage : " << core_leakage(ic);
}
}
WARNING(s);
}
}
/// Generate full charge density (valence + core) and magnetization from the wave functions.
/** This function calls generate_valence() and then in case of full-potential LAPW method adds a core density
* to get the full charge density of the system. */
inline void generate(K_point_set& ks__)
{
PROFILE("sirius::Density::generate");
generate_valence(ks__);
if (ctx_.full_potential()) {
/* find the core states */
generate_core_charge_density();
/* add core contribution */
for (int ialoc = 0; ialoc < (int)unit_cell_.spl_num_atoms().local_size(); ialoc++) {
int ia = unit_cell_.spl_num_atoms(ialoc);
for (int ir = 0; ir < unit_cell_.atom(ia).num_mt_points(); ir++) {
rho_->f_mt<index_domain_t::local>(0, ir, ialoc) +=
unit_cell_.atom(ia).symmetry_class().ae_core_charge_density(ir) / y00;
}
}
/* synchronize muffin-tin part */
rho_->sync_mt();
for (int j = 0; j < ctx_.num_mag_dims(); j++) {
magnetization_[j]->sync_mt();
}
}
}
/// Generate valence charge density and magnetization from the wave functions.
/** The interstitial density is generated on the coarse FFT grid and then transformed to the PW domain.
* After symmetrization and mixing and before the generation of the XC potential density is transformted to the
* real-space domain and checked for the number of electrons. */
inline void generate_valence(K_point_set& ks__);
/// Add augmentation charge Q(r).
/** Restore valence density by adding the Q-operator constribution.
* The following term is added to the valence density, generated by the pseudo wave-functions:
* \f[
* \tilde \rho({\bf G}) = \sum_{\alpha} \sum_{\xi \xi'} d_{\xi \xi'}^{\alpha} Q_{\xi' \xi}^{\alpha}({\bf G})
* \f]
* Plane-wave coefficients of the Q-operator for a given atom \f$ \alpha \f$ can be obtained from the
* corresponding coefficients of the Q-operator for a given atom \a type A:
* \f[
* Q_{\xi' \xi}^{\alpha(A)}({\bf G}) = e^{-i{\bf G}\tau_{\alpha(A)}} Q_{\xi' \xi}^{A}({\bf G})
* \f]
* We use this property to split the sum over atoms into sum over atom types and inner sum over atoms of the
* same type:
* \f[
* \tilde \rho({\bf G}) = \sum_{A} \sum_{\xi \xi'} Q_{\xi' \xi}^{A}({\bf G}) \sum_{\alpha(A)}
* d_{\xi \xi'}^{\alpha(A)} e^{-i{\bf G}\tau_{\alpha(A)}} =
* \sum_{A} \sum_{\xi \xi'} Q_{\xi' \xi}^{A}({\bf G}) d_{\xi \xi'}^{A}({\bf G})
* \f]
* where
* \f[
* d_{\xi \xi'}^{A}({\bf G}) = \sum_{\alpha(A)} d_{\xi \xi'}^{\alpha(A)} e^{-i{\bf G}\tau_{\alpha(A)}}
* \f]
*/
void augment(K_point_set& ks__)
{
PROFILE("sirius::Density::augment");
/*check if we need to augment charge density and magnetization */
bool need_to_augment{false};
for (int iat = 0; iat < unit_cell_.num_atom_types(); iat++) {
need_to_augment |= unit_cell_.atom_type(iat).augment();
}
if (!need_to_augment) {
return;
}
//if (ctx_.control().print_checksum_) {
// for (auto e: rho_vec_) {
// auto cs = e->checksum_pw();
// DUMP("checksum(rho_vec_pw): %20.14f %20.14f", cs.real(), cs.imag());
// }
//}
mdarray<double_complex, 2> rho_aug(ctx_.gvec().count(), ctx_.num_mag_dims() + 1, ctx_.dual_memory_t());
switch (ctx_.processing_unit()) {
case CPU: {
generate_rho_aug<CPU>(rho_aug);
break;
}
case GPU: {
generate_rho_aug<GPU>(rho_aug);
break;
}
}
for (int iv = 0; iv < ctx_.num_mag_dims() + 1; iv++) {
#pragma omp parallel for schedule(static)
for (int igloc = 0; igloc < ctx_.gvec().count(); igloc++) {
rho_vec_[iv]->f_pw_local(igloc) += rho_aug(igloc, iv);
}
}
}
template <device_t pu>
inline void generate_rho_aug(mdarray<double_complex, 2>& rho_aug__);
/// Check density at MT boundary
void check_density_continuity_at_mt();
void save()
{
rho_->hdf5_write(storage_file_name, "density");
for (int j = 0; j < ctx_.num_mag_dims(); j++) {
std::stringstream s;
s << "magnetization/" << j;
magnetization_[j]->hdf5_write(storage_file_name, s.str());
}
ctx_.comm().barrier();
}
void load()
{
HDF5_tree fin(storage_file_name, hdf5_access_t::read_only);
int ngv;
fin.read("/parameters/num_gvec", &ngv, 1);
if (ngv != ctx_.gvec().num_gvec()) {
TERMINATE("wrong number of G-vectors");
}
mdarray<int, 2> gv(3, ngv);
fin.read("/parameters/gvec", gv);
rho_->hdf5_read(fin["density"], gv);
rho_->fft_transform(1);
for (int j = 0; j < ctx_.num_mag_dims(); j++) {
magnetization_[j]->hdf5_read(fin["magnetization"][j], gv);
magnetization_[j]->fft_transform(1);
}
}
void save_to_xsf()
{
//== FILE* fout = fopen("unit_cell.xsf", "w");
//== fprintf(fout, "CRYSTAL\n");
//== fprintf(fout, "PRIMVEC\n");
//== auto& lv = unit_cell_.lattice_vectors();
//== for (int i = 0; i < 3; i++)
//== {
//== fprintf(fout, "%18.12f %18.12f %18.12f\n", lv(0, i), lv(1, i), lv(2, i));
//== }
//== fprintf(fout, "CONVVEC\n");
//== for (int i = 0; i < 3; i++)
//== {
//== fprintf(fout, "%18.12f %18.12f %18.12f\n", lv(0, i), lv(1, i), lv(2, i));
//== }
//== fprintf(fout, "PRIMCOORD\n");
//== fprintf(fout, "%i 1\n", unit_cell_.num_atoms());
//== for (int ia = 0; ia < unit_cell_.num_atoms(); ia++)
//== {
//== auto pos = unit_cell_.get_cartesian_coordinates(unit_cell_.atom(ia).position());
//== fprintf(fout, "%i %18.12f %18.12f %18.12f\n", unit_cell_.atom(ia).zn(), pos[0], pos[1], pos[2]);
//== }
//== fclose(fout);
}
void save_to_ted()
{
//== void write_periodic_function()
//== {
//== //== mdarray<double, 3> vloc_3d_map(&vloc_it[0], fft_->size(0), fft_->size(1), fft_->size(2));
//== //== int nx = fft_->size(0);
//== //== int ny = fft_->size(1);
//== //== int nz = fft_->size(2);
//== //== auto p = parameters_.unit_cell()->unit_cell_parameters();
//== //== FILE* fout = fopen("potential.ted", "w");
//== //== fprintf(fout, "%s\n", parameters_.unit_cell()->chemical_formula().c_str());
//== //== fprintf(fout, "%16.10f %16.10f %16.10f %16.10f %16.10f %16.10f\n", p.a, p.b, p.c, p.alpha, p.beta, p.gamma);
//== //== fprintf(fout, "%i %i %i\n", nx + 1, ny + 1, nz + 1);
//== //== for (int i0 = 0; i0 <= nx; i0++)
//== //== {
//== //== for (int i1 = 0; i1 <= ny; i1++)
//== //== {
//== //== for (int i2 = 0; i2 <= nz; i2++)
//== //== {
//== //== fprintf(fout, "%14.8f\n", vloc_3d_map(i0 % nx, i1 % ny, i2 % nz));
//== //== }
//== //== }
//== //== }
//== //== fclose(fout);
//== }
}
void save_to_xdmf()
{
//== mdarray<double, 3> rho_grid(&rho_->f_it<global>(0), fft_->size(0), fft_->size(1), fft_->size(2));
//== mdarray<double, 4> pos_grid(3, fft_->size(0), fft_->size(1), fft_->size(2));
//== mdarray<double, 4> mag_grid(3, fft_->size(0), fft_->size(1), fft_->size(2));
//== mag_grid.zero();
//== // loop over 3D array (real space)
//== for (int j0 = 0; j0 < fft_->size(0); j0++)
//== {
//== for (int j1 = 0; j1 < fft_->size(1); j1++)
//== {
//== for (int j2 = 0; j2 < fft_->size(2); j2++)
//== {
//== int ir = static_cast<int>(j0 + j1 * fft_->size(0) + j2 * fft_->size(0) * fft_->size(1));
//== // get real space fractional coordinate
//== double frv[] = {double(j0) / fft_->size(0),
//== double(j1) / fft_->size(1),
//== double(j2) / fft_->size(2)};
//== vector3d<double> rv = ctx_.unit_cell()->get_cartesian_coordinates(vector3d<double>(frv));
//== for (int x = 0; x < 3; x++) pos_grid(x, j0, j1, j2) = rv[x];
//== if (ctx_.num_mag_dims() == 1) mag_grid(2, j0, j1, j2) = magnetization_[0]->f_it<global>(ir);
//== if (ctx_.num_mag_dims() == 3)
//== {
//== mag_grid(0, j0, j1, j2) = magnetization_[1]->f_it<global>(ir);
//== mag_grid(1, j0, j1, j2) = magnetization_[2]->f_it<global>(ir);
//== }
//== }
//== }
//== }
//== HDF5_tree h5_rho("rho.hdf5", true);
//== h5_rho.write("rho", rho_grid);
//== h5_rho.write("pos", pos_grid);
//== h5_rho.write("mag", mag_grid);
//== FILE* fout = fopen("rho.xdmf", "w");
//== //== fprintf(fout, "<?xml version=\"1.0\" ?>\n"
//== //== "<!DOCTYPE Xdmf SYSTEM \"Xdmf.dtd\">\n"
//== //== "<Xdmf>\n"
//== //== " <Domain Name=\"name1\">\n"
//== //== " <Grid Name=\"fft_fine_grid\" Collection=\"Unknown\">\n"
//== //== " <Topology TopologyType=\"3DSMesh\" NumberOfElements=\" %i %i %i \"/>\n"
//== //== " <Geometry GeometryType=\"XYZ\">\n"
//== //== " <DataItem Dimensions=\"%i %i %i 3\" NumberType=\"Float\" Precision=\"8\" Format=\"HDF\">rho.hdf5:/pos</DataItem>\n"
//== //== " </Geometry>\n"
//== //== " <Attribute\n"
//== //== " AttributeType=\"Scalar\"\n"
//== //== " Center=\"Node\"\n"
//== //== " Name=\"rho\">\n"
//== //== " <DataItem\n"
//== //== " NumberType=\"Float\"\n"
//== //== " Precision=\"8\"\n"
//== //== " Dimensions=\"%i %i %i\"\n"
//== //== " Format=\"HDF\">\n"
//== //== " rho.hdf5:/rho\n"
//== //== " </DataItem>\n"
//== //== " </Attribute>\n"
//== //== " </Grid>\n"
//== //== " </Domain>\n"
//== //== "</Xdmf>\n", fft_->size(0), fft_->size(1), fft_->size(2), fft_->size(0), fft_->size(1), fft_->size(2), fft_->size(0), fft_->size(1), fft_->size(2));
//== fprintf(fout, "<?xml version=\"1.0\" ?>\n"
//== "<!DOCTYPE Xdmf SYSTEM \"Xdmf.dtd\">\n"
//== "<Xdmf>\n"
//== " <Domain Name=\"name1\">\n"
//== " <Grid Name=\"fft_fine_grid\" Collection=\"Unknown\">\n"
//== " <Topology TopologyType=\"3DSMesh\" NumberOfElements=\" %i %i %i \"/>\n"
//== " <Geometry GeometryType=\"XYZ\">\n"
//== " <DataItem Dimensions=\"%i %i %i 3\" NumberType=\"Float\" Precision=\"8\" Format=\"HDF\">rho.hdf5:/pos</DataItem>\n"
//== " </Geometry>\n"
//== " <Attribute\n"
//== " AttributeType=\"Vector\"\n"
//== " Center=\"Node\"\n"
//== " Name=\"mag\">\n"
//== " <DataItem\n"
//== " NumberType=\"Float\"\n"
//== " Precision=\"8\"\n"
//== " Dimensions=\"%i %i %i 3\"\n"
//== " Format=\"HDF\">\n"
//== " rho.hdf5:/mag\n"
//== " </DataItem>\n"
//== " </Attribute>\n"
//== " </Grid>\n"
//== " </Domain>\n"
//== "</Xdmf>\n", fft_->size(0), fft_->size(1), fft_->size(2), fft_->size(0), fft_->size(1), fft_->size(2), fft_->size(0), fft_->size(1), fft_->size(2));
//== fclose(fout);
}
Periodic_function<double>& rho()
{
return *rho_;
}
Periodic_function<double> const& rho() const
{
return *rho_;
}
Smooth_periodic_function<double>& rho_pseudo_core()
{
return *rho_pseudo_core_;
}
Smooth_periodic_function<double> const& rho_pseudo_core() const
{
return *rho_pseudo_core_;
}
std::array<Periodic_function<double>*, 3> magnetization()
{
return {magnetization_[0].get(), magnetization_[1].get(), magnetization_[2].get()};
}
Periodic_function<double>& magnetization(int i)
{
return *(magnetization_[i]);
}
Periodic_function<double> const& magnetization(int i) const
{
return *(magnetization_[i]);
}
Spheric_function<spectral, double> const& density_mt(int ialoc) const
{
return rho_->f_mt(ialoc);
}
/// Generate \f$ n_1 \f$ and \f$ \tilde{n}_1 \f$ in lm components.
void generate_paw_loc_density();
std::vector<Spheric_function<spectral, double>> const& ae_paw_atom_density(int spl_paw_ind) const
{
return paw_density_data_[spl_paw_ind].ae_density_;
}
std::vector<Spheric_function<spectral, double>> const& ps_paw_atom_density(int spl_paw_ind) const
{
return paw_density_data_[spl_paw_ind].ps_density_;
}
// mdarray<double, 3> const& ae_paw_atom_magn(int spl_paw_ind) const
// {
// return paw_density_data_[spl_paw_ind].ae_magnetization_;
// }
//
// mdarray<double, 3> const& ps_paw_atom_magn(int spl_paw_ind) const
// {
// return paw_density_data_[spl_paw_ind].ps_magnetization_;
// }
void allocate()
{
rho_->allocate_mt(true);
for (int j = 0; j < ctx_.num_mag_dims(); j++) {
magnetization_[j]->allocate_mt(true);
}
}
void mixer_input()
{
if (ctx_.full_potential()) {
STOP();
} else {
int ld = static_cast<int>(hf_gvec_.size());
/* input high-frequency components */
for (int j = 0; j < ctx_.num_mag_dims() + 1; j++) {
for (int i = 0; i < static_cast<int>(hf_gvec_.size()); i++) {
int igloc = hf_gvec_[i];
hf_mixer_->input_local(i + j * ld, rho_vec_[j]->f_pw_local(igloc));
}
}
ld = static_cast<int>(lf_gvec_.size());
/* input low-frequency components */
for (int j = 0; j < ctx_.num_mag_dims() + 1; j++) {
if (j == 0) {
for (int i = 0; i < static_cast<int>(lf_gvec_.size()); i++) {
int igloc = lf_gvec_[i];
lf_mixer_->input_local(i + j * ld, rho_vec_[j]->f_pw_local(igloc), lf_gvec_weights_[i]);
}
} else {
for (int i = 0; i < static_cast<int>(lf_gvec_.size()); i++) {
int igloc = lf_gvec_[i];
lf_mixer_->input_local(i + j * ld, rho_vec_[j]->f_pw_local(igloc));
}
}
}
/* input commonly shared data */
for (int i = 0; i < static_cast<int>(density_matrix_.size()); i++) {
lf_mixer_->input_shared(i, density_matrix_[i], 0);
}
}
}
void mixer_output()
{
if (ctx_.full_potential()) {
STOP();
} else {
int ld = static_cast<int>(hf_gvec_.size());
/* get high-frequency components */
for (int j = 0; j < ctx_.num_mag_dims() + 1; j++) {
for (int i = 0; i < static_cast<int>(hf_gvec_.size()); i++) {
int igloc = hf_gvec_[i];
rho_vec_[j]->f_pw_local(igloc) = hf_mixer_->output_local(i + j * ld);
}
}
ld = static_cast<int>(lf_gvec_.size());
/* get low-frequency components */
for (int j = 0; j < ctx_.num_mag_dims() + 1; j++) {
for (int i = 0; i < static_cast<int>(lf_gvec_.size()); i++) {
int igloc = lf_gvec_[i];
rho_vec_[j]->f_pw_local(igloc) = lf_mixer_->output_local(i + j * ld);
}
}
for (int i = 0; i < static_cast<int>(density_matrix_.size()); i++) {
density_matrix_[i] = lf_mixer_->output_shared(i);
}
}
}
void mixer_init()
{
if (!ctx_.full_potential()) {
hf_mixer_ = Mixer_factory<double_complex>("linear",
0,
static_cast<int>(hf_gvec_.size() * (1 + ctx_.num_mag_dims())),
ctx_.mixer_input(),
ctx_.comm());
lf_mixer_ = Mixer_factory<double_complex>(ctx_.mixer_input().type_,
static_cast<int>(density_matrix_.size()),
static_cast<int>(lf_gvec_.size() * (1 + ctx_.num_mag_dims())),
ctx_.mixer_input(),
ctx_.comm());
} else {
//mixer_ = Mixer_factory<double>(ctx_.mixer_input().type_, size(), ctx_.mixer_input(), ctx_.comm());
}
mixer_input();
if (ctx_.full_potential()) {
mixer_->initialize();
} else {
lf_mixer_->initialize();
if (hf_mixer_) {
hf_mixer_->initialize();
}
}
}
double mix()
{
double rms;
if (ctx_.full_potential()) {
STOP();
/* mix in real-space in case of FP-LAPW */
mixer_input();
rms = mixer_->mix(ctx_.settings().mixer_rss_min_);
mixer_output();
/* get rho(G) after mixing */
rho_->fft_transform(-1);
} else {
/* mix in G-space in case of PP */
mixer_input();
rms = lf_mixer_->mix(ctx_.settings().mixer_rss_min_);
if (hf_mixer_) {
rms += hf_mixer_->mix(ctx_.settings().mixer_rss_min_);
}
mixer_output();
}
return rms;
}
inline double dr2()
{
return lf_mixer_->rss();
}
mdarray<double_complex, 4> const& density_matrix() const
{
return density_matrix_;
}
mdarray<double_complex, 4>& density_matrix()
{
return density_matrix_;
}
inline void fft_transform(int direction__)
{
rho_->fft_transform(direction__);
for (int j = 0; j < ctx_.num_mag_dims(); j++) {
magnetization_[j]->fft_transform(direction__);
}
}
/// Return density matrix in auxiliary form.
inline mdarray<double, 3> density_matrix_aux(int iat__)
{
auto& atom_type = unit_cell_.atom_type(iat__);
int nbf = atom_type.mt_basis_size();
/* convert to real matrix */
mdarray<double, 3> dm(nbf * (nbf + 1) / 2, atom_type.num_atoms(), ctx_.num_mag_dims() + 1);
#pragma omp parallel for
for (int i = 0; i < atom_type.num_atoms(); i++) {
int ia = atom_type.atom_id(i);
for (int xi2 = 0; xi2 < nbf; xi2++) {
for (int xi1 = 0; xi1 <= xi2; xi1++) {
int idx12 = xi2 * (xi2 + 1) / 2 + xi1;
switch (ctx_.num_mag_dims()) {
case 3: {
dm(idx12, i, 2) = 2 * std::real(density_matrix_(xi2, xi1, 2, ia));
dm(idx12, i, 3) = -2 * std::imag(density_matrix_(xi2, xi1, 2, ia));
}
case 1: {
dm(idx12, i, 0) = std::real(density_matrix_(xi2, xi1, 0, ia) + density_matrix_(xi2, xi1, 1, ia));
dm(idx12, i, 1) = std::real(density_matrix_(xi2, xi1, 0, ia) - density_matrix_(xi2, xi1, 1, ia));
break;
}
case 0: {
dm(idx12, i, 0) = density_matrix_(xi2, xi1, 0, ia).real();
break;
}
}
}
}
}
return std::move(dm);
}
/// Calculate magnetic moment of the atoms
/// Compute approximate atomic magnetic moments in case of PW-PP.
mdarray<double, 2> compute_atomic_mag_mom() const
{
PROFILE("sirius::DFT_ground_state::compute_atomic_mag_mom");
mdarray<double, 2> mmom(3, unit_cell_.num_atoms());
mmom.zero();
#pragma omp parallel for
for (int ia = 0; ia < unit_cell_.num_atoms(); ia++) {
auto& atom_to_grid_map = ctx_.atoms_to_grid_idx_map()[ia];
for (auto coord : atom_to_grid_map)
{
int ir = coord.first;
for (int j = 0; j < ctx_.num_mag_dims(); j++) {
mmom(j, ia) += magnetization(j).f_rg(ir);
}
}
for (int j: {0, 1, 2}) {
mmom(j, ia) *= (unit_cell_.omega() / ctx_.fft().size());
}
}
ctx_.fft().comm().allreduce(&mmom(0, 0), static_cast<int>(mmom.size()));
return std::move(mmom);
}
/// Symmetrize density matrix.
/** Initially, density matrix is obtained with summation over irreducible BZ:
* \f[
* \tilde n_{\ell \lambda m \sigma, \ell' \lambda' m' \sigma'}^{\alpha} =
* \sum_{j} \sum_{{\bf k}}^{IBZ} \langle Y_{\ell m} u_{\ell \lambda}^{\alpha}| \Psi_{j{\bf k}}^{\sigma} \rangle w_{\bf k} n_{j{\bf k}}
* \langle \Psi_{j{\bf k}}^{\sigma'} | u_{\ell' \lambda'}^{\alpha} Y_{\ell' m'} \rangle
* \f]
* In order to symmetrize it, the following operation is performed:
* \f[
* n_{\ell \lambda m \sigma, \ell' \lambda' m' \sigma'}^{\alpha} = \sum_{{\bf P}}
* \sum_{j} \sum_{\bf k}^{IBZ} \langle Y_{\ell m} u_{\ell \lambda}^{\alpha}| \Psi_{j{\bf P}{\bf k}}^{\sigma} \rangle w_{\bf k} n_{j{\bf k}}
* \langle \Psi_{j{\bf P}{\bf k}}^{\sigma'} | u_{\ell' \lambda'}^{\alpha} Y_{\ell' m'} \rangle
* \f]
* where \f$ {\bf P} \f$ is the space-group symmetry operation. The inner product between wave-function and
* local orbital is transformed as:
* \f[
* \langle \Psi_{j{\bf P}{\bf k}}^{\sigma} | u_{\ell \lambda}^{\alpha} Y_{\ell m} \rangle =
* \int \Psi_{j{\bf P}{\bf k}}^{\sigma *}({\bf r}) u_{\ell \lambda}^{\alpha}(r) Y_{\ell m}(\hat {\bf r}) dr =
* \int \Psi_{j{\bf k}}^{\sigma *}({\bf P}^{-1}{\bf r}) u_{\ell \lambda}^{\alpha}(r) Y_{\ell m}(\hat {\bf r}) dr =
* \int \Psi_{j{\bf k}}^{\sigma *}({\bf r}) u_{\ell \lambda}^{{\bf P}\alpha}(r) Y_{\ell m}({\bf P} \hat{\bf r}) dr
* \f]
* Under rotation the spherical harmonic is transformed as:
* \f[
* Y_{\ell m}({\bf P} \hat{\bf r}) = {\bf P}^{-1}Y_{\ell m}(\hat {\bf r}) = \sum_{m'} D_{m'm}^{\ell}({\bf P}^{-1}) Y_{\ell m'}(\hat {\bf r}) =
* \sum_{m'} D_{mm'}^{\ell}({\bf P}) Y_{\ell m'}(\hat {\bf r})
* \f]
* The inner-product integral is then rewritten as:
* \f[
* \langle \Psi_{j{\bf P}{\bf k}}^{\sigma} | u_{\ell \lambda}^{\alpha} Y_{\ell m} \rangle =
* \sum_{m'} D_{mm'}^{\ell}({\bf P}) \langle \Psi_{j{\bf k}}^{\sigma} | u_{\ell \lambda}^{{\bf P}\alpha} Y_{\ell m} \rangle
* \f]
* and the final expression for density matrix gets the following form:
* \f[
* n_{\ell \lambda m \sigma, \ell' \lambda' m' \sigma'}^{\alpha} = \sum_{{\bf P}}
* \sum_{j} \sum_{\bf k}^{IBZ} \sum_{m_1 m_2} D_{mm_1}^{\ell *}({\bf P}) D_{m'm_2}^{\ell'}({\bf P})
* \langle Y_{\ell m_1} u_{\ell \lambda}^{{\bf P} \alpha}|
* \Psi_{j{\bf k}}^{\sigma} \rangle w_{\bf k} n_{j{\bf k}} \langle \Psi_{j{\bf k}}^{\sigma'} |
* u_{\ell' \lambda'}^{{\bf P}\alpha} Y_{\ell' m_2} \rangle = \sum_{{\bf P}}
* \sum_{m_1 m_2} D_{mm_1}^{\ell *}({\bf P}) D_{m'm_2}^{\ell'}({\bf P})
* \tilde n_{\ell \lambda m_1 \sigma, \ell' \lambda' m_2 \sigma'}^{{\bf P}\alpha}
* \f]
*/
void symmetrize_density_matrix();
};
#include "Density/initial_density.hpp"
#include "Density/add_k_point_contribution_rg.hpp"
#include "Density/add_k_point_contribution_dm.hpp"
#include "Density/generate_valence.hpp"
#include "Density/generate_rho_aug.hpp"
#include "Density/symmetrize_density_matrix.hpp"
#include "Density/generate_valence_mt.hpp"
#include "Density/check_density_continuity_at_mt.hpp"
#include "Density/paw_density.hpp"
}
#endif // __DENSITY_H__
|
atax.c | /**
* This version is stamped on May 10, 2016
*
* Contact:
* Louis-Noel Pouchet <pouchet.ohio-state.edu>
* Tomofumi Yuki <tomofumi.yuki.fr>
*
* Web address: http://polybench.sourceforge.net
*/
/* atax.c: this file is part of PolyBench/C */
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
#include "atax.h"
/* Array initialization. */
static
void init_array (int m, int n,
DATA_TYPE POLYBENCH_2D(A, M, N, m, n),
DATA_TYPE POLYBENCH_1D(x, N, n))
{
int i, j;
DATA_TYPE fn;
fn = (DATA_TYPE)n;
for (i = 0; i < n; i++)
x[i] = 1 + (i / fn);
for (i = 0; i < m; i++)
for (j = 0; j < n; j++)
A[i][j] = (DATA_TYPE) ((i + j) % n) / (5 * m);
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int n,
DATA_TYPE POLYBENCH_1D(y, N, n))
{
int i;
POLYBENCH_DUMP_START;
POLYBENCH_DUMP_BEGIN("y");
for (i = 0; i < n; i++)
{
if (i % 20 == 0) fprintf (POLYBENCH_DUMP_TARGET, "\n");
fprintf (POLYBENCH_DUMP_TARGET, DATA_PRINTF_MODIFIER, y[i]);
}
POLYBENCH_DUMP_END("y");
POLYBENCH_DUMP_FINISH;
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_atax(int m, int n,
DATA_TYPE POLYBENCH_2D(A, M, N, m, n),
DATA_TYPE POLYBENCH_1D(x, N, n),
DATA_TYPE POLYBENCH_1D(y, N, n),
DATA_TYPE POLYBENCH_1D(tmp, M, m))
{
int i, j;
#pragma omp parallel for default(shared) private(i) firstprivate(n)
for (i = 0; i < _PB_N; i++)
y[i] = 0;
#pragma omp parallel for default(shared) private(i, j) firstprivate(m, n, A, x) reduction(+ : y[:2100])
for (i = 0; i < _PB_M; i++)
{
tmp[i] = SCALAR_VAL(0.0);
for (j = 0; j < _PB_N; j++)
tmp[i] = tmp[i] + A[i][j] * x[j];
for (j = 0; j < _PB_N; j++)
y[j] = y[j] + A[i][j] * tmp[i];
}
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int m = M;
int n = N;
/* Variable declaration/allocation. */
POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, M, N, m, n);
POLYBENCH_1D_ARRAY_DECL(x, DATA_TYPE, N, n);
POLYBENCH_1D_ARRAY_DECL(y, DATA_TYPE, N, n);
POLYBENCH_1D_ARRAY_DECL(tmp, DATA_TYPE, M, m);
/* Initialize array(s). */
init_array (m, n, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(x));
/* Start timer. */
polybench_start_instruments;
/* Run kernel. */
kernel_atax (m, n,
POLYBENCH_ARRAY(A),
POLYBENCH_ARRAY(x),
POLYBENCH_ARRAY(y),
POLYBENCH_ARRAY(tmp));
/* Stop and print timer. */
polybench_stop_instruments;
polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(n, POLYBENCH_ARRAY(y)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(A);
POLYBENCH_FREE_ARRAY(x);
POLYBENCH_FREE_ARRAY(y);
POLYBENCH_FREE_ARRAY(tmp);
return 0;
}
|
MM1fu.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#include "matrixUtils/matrixUtils.h"
#include "benchmarkUtils/timeUtils.h"
// Reserva de memoria
#define SIZE_DATA (1024*1024*64*3)
static double MEM_CHUNK[SIZE_DATA];
// Version 6. version del algoritmo 2 filas por 2 filas.
int main(int argc, char **argv){
int N = (int) atoi(argv[1]); // matrix size NxN
int NUM_T = (int) atoi(argv[2]); //number of threads
//#pragma omp parallel
int i, j, k;
double *matrixA, *matrixB, *matrixC;
matrixA = MEM_CHUNK;
matrixB = matrixA + (N * N);
matrixC = matrixB + (N * N);
// The main process make the init routines
//#pragma omp master
matrixInitN(N, matrixA, matrixB, matrixC);
// printf("Matrix A: \n");
// matrixPrint(N, N, matrixA);
// printf("Matrix B: \n");
// matrixPrint(N, N, matrixB);
omp_set_num_threads(NUM_T);
sampleStart();
// Test matrix multiplication with OpenMP
#pragma omp parallel for
for(i=0; i<N; i++){
for(j=0; j<N; j++){
double *ptra, *ptrb;
double c0, c1, c2, c3;
c0 = c1 = c2 =c3 = 0.0;
ptra = matrixA + (i*N);
ptrb = matrixB + (j*N);
k = N;
while(k&3){
c0 += (*ptra * *ptrb);
k--;
ptra++, ptrb++;
}
for(; k>0; k-=4, ptra+=4, ptrb+=4){
c0 += (*ptra * *ptrb);
c1 += *(ptra+1) * *(ptrb+1);
c2 += *(ptra+2) * *(ptrb+2);
c3 += *(ptra+3) * *(ptrb+3);
}
matrixC[j+i*N] = c0 + c1 + c2 + c3;
}
}
sampleStop();
// printf("Matrix C: \n");
// matrixPrint(N, N, matrixC);
printTime();
return 0;
}
|
main.c | #include <stdlib.h>
#include <stdio.h>
#include <stdbool.h>
#include <unistd.h>
#include <time.h>
#include <omp.h>
#define TO_NS( ts ) ((ts.tv_sec * 1000000000) + ts.tv_nsec)
static bool verbose = false;
static size_t nthreads = 8;
static size_t vecsize = 1048576;
static size_t niters = 100;
void vector_init(int *vec, size_t size) {
size_t i;
#pragma omp parallel for
for(i = 0; i < size; i++)
vec[i] = rand() % 256;
}
int vector_reduce(int *vec, size_t size) {
size_t i;
int reduced = 0;
#pragma omp parallel for reduction(+:reduced)
for(i = 0; i < size; i++) {
reduced += vec[i];
vec[i] = rand() % 256;
}
return reduced;
}
int main(int argc, char **argv) {
size_t i;
int c, ret, *vec;
struct timespec start, iter_start, end;
while((c = getopt(argc, argv, "t:s:i:vh")) != -1) {
switch(c) {
case 't': nthreads = strtoul(optarg, NULL, 10); break;
case 's': vecsize = strtoul(optarg, NULL, 10); break;
case 'i': niters = strtoul(optarg, NULL, 10); break;
case 'v': verbose = true; break;
case 'h':
default:
printf("Usage: vector_reduce -t THREADS -s VECSIZE -i ITERS\n");
exit(0);
}
}
omp_set_num_threads(nthreads);
#ifdef _ALIGN_LAYOUT
c = posix_memalign(&vec, 4096, sizeof(int) * vecsize);
if(c) {
perror("Could not allocate aligned vector\n");
exit(1);
}
#else
vec = (int *)malloc(sizeof(int) * vecsize);
if(!vec) {
fprintf(stderr, "Could not allocate vector\n");
exit(1);
}
#endif
vector_init(vec, vecsize);
clock_gettime(CLOCK_MONOTONIC, &start);
for(i = 0; i < niters; i++) {
if(verbose) clock_gettime(CLOCK_MONOTONIC, &iter_start);
ret = vector_reduce(vec, vecsize);
if(verbose) {
clock_gettime(CLOCK_MONOTONIC, &end);
printf("Iteration %lu: %lu ns\n", i, TO_NS(end) - TO_NS(iter_start));
}
}
clock_gettime(CLOCK_MONOTONIC, &end);
printf("Computation took %lu ns\n", TO_NS(end) - TO_NS(start));
free(vec);
return ret;
}
|
GB_binop__le_int32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__le_int32
// A.*B function (eWiseMult): GB_AemultB__le_int32
// A*D function (colscale): GB_AxD__le_int32
// D*A function (rowscale): GB_DxB__le_int32
// C+=B function (dense accum): GB_Cdense_accumB__le_int32
// C+=b function (dense accum): GB_Cdense_accumb__le_int32
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__le_int32
// C=scalar+B GB_bind1st__le_int32
// C=scalar+B' GB_bind1st_tran__le_int32
// C=A+scalar GB_bind2nd__le_int32
// C=A'+scalar GB_bind2nd_tran__le_int32
// C type: bool
// A type: int32_t
// B,b type: int32_t
// BinaryOp: cij = (aij <= bij)
#define GB_ATYPE \
int32_t
#define GB_BTYPE \
int32_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int32_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x <= y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LE || GxB_NO_INT32 || GxB_NO_LE_INT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__le_int32
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__le_int32
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__le_int32
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type int32_t
int32_t bwork = (*((int32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__le_int32
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *GB_RESTRICT Cx = (bool *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__le_int32
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *GB_RESTRICT Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__le_int32
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__le_int32
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__le_int32
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
int32_t x = (*((int32_t *) x_input)) ;
int32_t *Bx = (int32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
int32_t bij = Bx [p] ;
Cx [p] = (x <= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__le_int32
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
int32_t *Ax = (int32_t *) Ax_input ;
int32_t y = (*((int32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int32_t aij = Ax [p] ;
Cx [p] = (aij <= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = Ax [pA] ; \
Cx [pC] = (x <= aij) ; \
}
GrB_Info GB_bind1st_tran__le_int32
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t x = (*((const int32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = Ax [pA] ; \
Cx [pC] = (aij <= y) ; \
}
GrB_Info GB_bind2nd_tran__le_int32
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t y = (*((const int32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
trmm_x_dia_n_hi_col.c | #include "alphasparse/kernel.h"
#include "alphasparse/util.h"
#include "alphasparse/opt.h"
#ifdef _OPENMP
#include <omp.h>
#endif
alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_DIA *mat, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, const ALPHA_Number beta, ALPHA_Number *y, const ALPHA_INT ldy)
{
ALPHA_INT num_threads = alpha_get_thread_num();
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_threads)
#endif
for (ALPHA_INT cc = 0; cc < columns; ++cc)
{
ALPHA_Number* Y = &y[index2(cc,0,ldy)];
for (ALPHA_INT i = 0; i < mat->rows; i++)
alpha_mul(Y[i],Y[i],beta);
const ALPHA_Number* X = &x[index2(cc,0,ldx)];
for(ALPHA_INT di = 0; di < mat->ndiag;++di){
ALPHA_INT d = mat->distance[di];
if(d >= 0){
ALPHA_INT ars = alpha_max(0,-d);
ALPHA_INT acs = alpha_max(0,d);
ALPHA_INT an = alpha_min(mat->rows - ars,mat->cols - acs);
for(ALPHA_INT i = 0; i < an; ++i){
ALPHA_INT ar = ars + i;
ALPHA_INT ac = acs + i;
ALPHA_Number val;
alpha_mul(val,mat->values[index2(di,ar,mat->lval)],alpha);
alpha_madde(Y[ar],val,X[ac]);
}
}
}
}
return ALPHA_SPARSE_STATUS_SUCCESS;
}
|
main.c | // this creates an input device transform (idt) lookup table for a given camera
// based on spectral sensitivity functions (ssf). works best with a high-res
// high-quality spectral upsampling table, create via
// `mkspectra 1024 /dev/null XYZ`. (note the lack of `-b`)
// #define USE_LEVMAR
#include "core/inpaint.h"
#include "core/half.h"
#include "core/clip.h"
#include "core/solve.h"
#include "q2t.h"
#include <strings.h>
#include "../../pipe/modules/i-mlv/adobe_coeff.h"
#include <stdlib.h>
#include <stdio.h>
#include <stdint.h>
#include <math.h>
#include <string.h>
#include <assert.h>
#include <alloca.h>
#ifdef USE_LEVMAR
#include "levmar-2.6/levmar.h"
#endif
#define MIN(a,b) \
({ __typeof__ (a) _a = (a); \
__typeof__ (b) _b = (b); \
_a < _b ? _a : _b; })
#define MAX(a,b) \
({ __typeof__ (a) _a = (a); \
__typeof__ (b) _b = (b); \
_a > _b ? _a : _b; })
#define CLAMP(a,m,M) (MIN(MAX((a), (m)), (M)))
#define CIE_SAMPLES 95
#define CIE_LAMBDA_MIN 360.0
#define CIE_LAMBDA_MAX 830.0
#define CIE_FINE_SAMPLES 95
typedef struct header_t
{ // header of .lut files
uint32_t magic;
uint16_t version;
uint8_t channels;
uint8_t datatype;
uint32_t wd;
uint32_t ht;
}
header_t;
static int num_coeff = 6;
static uint32_t seed = 1337;
static const double srgb_to_xyz[] = {
0.412453, 0.357580, 0.180423,
0.212671, 0.715160, 0.072169,
0.019334, 0.119193, 0.950227
};
static const double xyz_to_rec2020[] = {
1.7166511880, -0.3556707838, -0.2533662814,
-0.6666843518, 1.6164812366, 0.0157685458,
0.0176398574, -0.0427706133, 0.9421031212
};
static const double rec2020_to_xyz[] = {
0.6369580483, 0.1446169036, 0.1688809752,
0.2627002120, 0.6779980715, 0.0593017165,
0.0000000000, 0.0280726930, 1.0609850577,
};
static inline double
xrand()
{ // Algorithm "xor" from p. 4 of Marsaglia, "Xorshift RNGs"
seed ^= seed << 13;
seed ^= seed >> 17;
seed ^= seed << 5;
return seed / 4294967296.0;
}
// bilinear lookup
static inline void
fetch_coeff(
const double *xy, // cie xy chromaticities
const float *spectra, // loaded spectral coeffs, 4-strided
const int wd, // width of texture
const int ht, // height of texture
double *out) // bilinear lookup will end up here
{
out[0] = out[1] = out[2] = 0.0;
if(xy[0] < 0 || xy[1] < 0 || xy[0] > 1.0 || xy[1] > 1.0) return;
double tc[] = {xy[0], xy[1]};
tri2quad(tc+0, tc+1);
double xf = tc[0]*wd, yf = tc[1]*ht;
int x0 = (int)CLAMP(xf, 0, wd-1), y0 = (int)CLAMP(yf, 0, ht-1);
int x1 = (int)CLAMP(x0+1, 0, wd-1), y1 = (int)CLAMP(y0+1, 0, ht-1);
int dx = x1 - x0, dy = y1 - y0;
double u = xf - x0, v = yf - y0;
const float *c = spectra + 4*(y0*wd + x0);
out[0] = out[1] = out[2] = 0.0;
for(int k=0;k<3;k++) out[k] += (1.0-u)*(1.0-v)*c[k];
for(int k=0;k<3;k++) out[k] += ( u)*(1.0-v)*c[k + 4*dx];
for(int k=0;k<3;k++) out[k] += (1.0-u)*( v)*c[k + 4*wd*dy];
for(int k=0;k<3;k++) out[k] += ( u)*( v)*c[k + 4*(wd*dy+dx)];
}
// nearest neighbour lookup
static inline void
fetch_coeffi(
const double *xy,
const float *spectra,
const int wd,
const int ht,
double *out)
{
out[0] = out[1] = out[2] = 0.0;
if(xy[0] < 0 || xy[1] < 0 || xy[0] > 1.0 || xy[1] > 1.0) return;
double tc[] = {xy[0], xy[1]};
tri2quad(tc+0, tc+1);
int xi = (int)CLAMP(tc[0]*wd+0.5, 0, wd-1), yi = (int)CLAMP(tc[1]*ht+0.5, 0, ht-1);
const float *c = spectra + 4*(yi*wd + xi);
out[0] = c[0]; out[1] = c[1]; out[2] = c[2];
}
// sample a cubic b-spline around (0.5, 0.5, 0.5)
static inline void
sample_rgb(double *rgb, double delta, int clamp)
{
for(int k=0;k<3;k++)
{
rgb[k] = 0.5;
for(int i=0;i<3;i++)
rgb[k] += delta*(2.0*xrand()-1.0);
if(clamp) rgb[k] = CLAMP(rgb[k], 0.0, 1.0);
}
}
static inline double
normalise1(double *col)
{
const double b = col[0] + col[1] + col[2];
for(int k=0;k<3;k++) col[k] /= b;
return b;
}
static inline double
poly(const double *c, double lambda, int num)
{
double r = 0.0;
for(int i=0;i<num;i++)
r = r * lambda + c[i];
return r;
}
static inline double
sigmoid(double x)
{
return 0.5 * x / sqrt(1.0 + x * x) + 0.5;
}
static inline double
ddx_sigmoid(double x)
{
return 0.5 * pow(x*x+1.0, -3.0/2.0);
}
static inline double
eval_ref(
const double (*cfa_spec)[4],
const int channel,
const int cnt,
const double *cf)
{
double out = 0.0;
for(int i=0;i<cnt;i++)
{
double lambda = cfa_spec[i][0];
double s = sigmoid(poly(cf, lambda, 3));
double t = cfa_spec[i][1+channel];
out += s*t;
}
return out * (cfa_spec[cnt-1][0] - cfa_spec[0][0]) / (double) cnt;
}
static inline double
eval(
const double *cp, // spectrum with np coeffs, use normalised lambda (for optimiser)
const double *cf, // spectrum with nf coeffs, use lambda in nanometers
int np,
int nf)
{
double out = 0.0;
for (int i = 0; i < CIE_FINE_SAMPLES; ++i)
{
double lambda0 = (i+.5)/(double)CIE_FINE_SAMPLES;
double lambda1 = lambda0 * (CIE_LAMBDA_MAX - CIE_LAMBDA_MIN) + CIE_LAMBDA_MIN;
double s = sigmoid(poly(cf, lambda1, nf)); // from map, use real lambda
double t = sigmoid(poly(cp, lambda0, np)); // optimising this, use normalised for smaller values (assumption of hessian approx)
out += s * t;
}
return out * (CIE_LAMBDA_MAX - CIE_LAMBDA_MIN) / (double)CIE_FINE_SAMPLES;
}
static inline double
ddp_eval(
const double *cp, // parameters, cfa spectrum
const double *cf, // coefficients of data point
int np, // number of parameter coeffs
int nf, // number of data point coeffs
double *jac) // output: gradient dx/dp{0..np-1} (i.e. np elements)
{
double out = 0.0;
double ddp_poly[20];
double ddp_t[20];
for(int j=0;j<np;j++) jac[j] = 0.0;
for (int i = 0; i < CIE_FINE_SAMPLES; ++i)
{
double lambda0 = (i+.5)/(double)CIE_FINE_SAMPLES;
double lambda1 = lambda0 * (CIE_LAMBDA_MAX - CIE_LAMBDA_MIN) + CIE_LAMBDA_MIN;
double x = poly(cf, lambda1, nf); // from map, use real lambda
double y = poly(cp, lambda0, np); // optimising this, use normalised for smaller values (assumption of hessian approx)
double s = sigmoid(x);
double t = sigmoid(y);
double ddx_sig = ddx_sigmoid(y);
// ddp t = ddx_sigmoid(poly(cp, lambda0, np)) * { ddp_poly(cp, lambda0, np) }
// where ddp_poly = (lambda0^{num_coeff-1}, lambda0^{num_coeff-2}, .., lambda0, 1)
ddp_poly[0] = 1.0;
ddp_poly[1] = lambda0;
ddp_poly[2] = lambda0 * lambda0;
for(int j=3;j<np;j++) ddp_poly[j] = (j&1? ddp_poly[j/2] * ddp_poly[j/2+1] : ddp_poly[j/2]*ddp_poly[j/2]);
for(int j=0;j<np;j++) ddp_t[j] = ddx_sig * ddp_poly[np-j-1];
// now out = sum(s * t)
// ddp out = sum ddp s*t = sum s * ddp t
out += s * t;
for(int j=0;j<np;j++) jac[j] += s * ddp_t[j];
}
// apply final scale:
for(int j=0;j<np;j++) jac[j] *= (CIE_LAMBDA_MAX - CIE_LAMBDA_MIN) / (double)CIE_FINE_SAMPLES;
// also return regular value, we need it for the chain rule
return out * (CIE_LAMBDA_MAX - CIE_LAMBDA_MIN) / (double)CIE_FINE_SAMPLES;
}
static inline double
mat3_det(const double *const restrict a)
{
#define A(y, x) a[(y - 1) * 3 + (x - 1)]
return
A(1, 1) * (A(3, 3) * A(2, 2) - A(3, 2) * A(2, 3)) -
A(2, 1) * (A(3, 3) * A(1, 2) - A(3, 2) * A(1, 3)) +
A(3, 1) * (A(2, 3) * A(1, 2) - A(2, 2) * A(1, 3));
}
static inline double
mat3_inv(const double *const restrict a, double *const restrict inv)
{
const double det = mat3_det(a);
if(!(det != 0.0)) return 0.0;
const double invdet = 1.0 / det;
inv[3*0+0] = invdet * (A(3, 3) * A(2, 2) - A(3, 2) * A(2, 3));
inv[3*0+1] = -invdet * (A(3, 3) * A(1, 2) - A(3, 2) * A(1, 3));
inv[3*0+2] = invdet * (A(2, 3) * A(1, 2) - A(2, 2) * A(1, 3));
inv[3*1+0] = -invdet * (A(3, 3) * A(2, 1) - A(3, 1) * A(2, 3));
inv[3*1+1] = invdet * (A(3, 3) * A(1, 1) - A(3, 1) * A(1, 3));
inv[3*1+2] = -invdet * (A(2, 3) * A(1, 1) - A(2, 1) * A(1, 3));
inv[3*2+0] = invdet * (A(3, 2) * A(2, 1) - A(3, 1) * A(2, 2));
inv[3*2+1] = -invdet * (A(3, 2) * A(1, 1) - A(3, 1) * A(1, 2));
inv[3*2+2] = invdet * (A(2, 2) * A(1, 1) - A(2, 1) * A(1, 2));
return det;
#undef A
}
static inline void
mat3_mulv(
const double *const restrict a,
const double *const restrict v,
double *const restrict res)
{
res[0] = res[1] = res[2] = 0.0f;
for(int j=0;j<3;j++)
for(int i=0;i<3;i++)
res[j] += a[3*j+i] * v[i];
}
// jacobian for levmar:
void lm_jacobian(
double *p, // parameters: num_coeff * 3 for camera cfa spectra
double *jac, // output: derivative dx / dp (n x m entries, n-major, i.e. (dx[0]/dp[0], dx[0]/dp[1], ..)
int m, // number of parameters (=num_coeff*3)
int n, // number of data points
void *data)
{
double *cf = data;
memset(jac, 0, sizeof(jac[0])*m*n);
double *Je = alloca(3*m*sizeof(double)); // dxdp
int num = n/2;
for(int i=0;i<num;i++) // for all rgb data points
{
memset(Je, 0, sizeof(double)*3*m);
// get derivative dx[3*i+{0,1,2}] / dp[3 * num_coeff]
double rgb[3];
rgb[0] = ddp_eval(p+0*num_coeff, cf + 3*i, num_coeff, 3, Je + 0*m);
rgb[1] = ddp_eval(p+1*num_coeff, cf + 3*i, num_coeff, 3, Je + 1*m+1*num_coeff);
rgb[2] = ddp_eval(p+2*num_coeff, cf + 3*i, num_coeff, 3, Je + 2*m+2*num_coeff);
double b = rgb[0]+rgb[1]+rgb[2];
double ib = 1.0/(b*b);
// account for normalise1:
// we got n = rgb/sum(rgb), so
// ddr r/(r+g+b) => ddx x/(x+c) = c / (x+c)^2
// ddr g/(r+g+b) => ddx a/(x+b) = - a / (x+b)^2
// / ddr r, ddg r, ddb r \
// Jn = | ddr g, ddg g, ddb g |
// \ ddr b, ddg b, ddb b /
double Jn[] = {
(rgb[1]+rgb[2])*ib, -rgb[0]*ib, -rgb[0]*ib,
-rgb[1]*ib, (rgb[0]+rgb[2])*ib, -rgb[1]*ib,
-rgb[2]*ib, -rgb[2]*ib, (rgb[0]+rgb[1])*ib,
};
// ddp n(rgb) = ddx n(rgb) * ddp rgb
// Jn `-----' =Je see above
// 3x3 (3*num_coeff)x3
for(int j=0;j<m;j++) // parameter number
for(int k=0;k<2;k++) // rgb colour channel
for(int l=0;l<3;l++)
jac[(2*i + k)*m + j] += Jn[3*k+l] * Je[m*l + j];
}
}
// callback for levmar:
void lm_callback(
double *p, // parameers: num_coeff * 3 for camera cfa spectra
double *x, // output data, write here
int m, // number of parameters
int n, // number of data points, i.e. colour spots
void *data)
{
double *cf = data;
int num = n/2;
for(int i=0;i<num;i++)
{
double rgb[3] = {
eval(p+0*num_coeff, cf + 3*i, num_coeff, 3),
eval(p+1*num_coeff, cf + 3*i, num_coeff, 3),
eval(p+2*num_coeff, cf + 3*i, num_coeff, 3)};
normalise1(rgb);
x[2*i+0] = rgb[0];
x[2*i+1] = rgb[1];
}
}
void lm_jacobian_dif(
double *p, // parameters: num_coeff * 3 for camera cfa spectra
double *jac, // output: derivative dx / dp (n x m entries, n-major, i.e. (dx[0]/dp[0], dx[0]/dp[1], ..)
int m, // number of parameters (=num_coeff*3)
int n, // number of data points
void *data)
{
for(int j=0;j<m;j++)
{
double X1[n];
double X2[n];
double cfa2[m];
const double h = 1e-10;
memcpy(cfa2, p, sizeof(cfa2));
cfa2[j] += h;
lm_callback(cfa2, X1, m, n, data);
memcpy(cfa2, p, sizeof(cfa2));
cfa2[j] -= h;
lm_callback(cfa2, X2, m, n, data);
for(int k=0;k<n;k++)
jac[m*k + j] = (X1[k] - X2[k]) / (2.0*h);
}
}
static inline int
load_reference_cfa_spectra(
const char *model,
double cfa_spec[100][4])
{
char filename[256];
snprintf(filename, sizeof(filename), "%s.txt", model);
int len = strlen(filename), cfa_spec_cnt = 0;
for(int i=0;i<len;i++) if(filename[i]==' ') filename[i] = '_';
FILE *fr = fopen(filename, "rb");
if(fr)
{
while(!feof(fr))
{
if(4 != fscanf(fr, "%lg %lg %lg %lg",
cfa_spec[cfa_spec_cnt] + 0, cfa_spec[cfa_spec_cnt] + 1,
cfa_spec[cfa_spec_cnt] + 2, cfa_spec[cfa_spec_cnt] + 3))
cfa_spec_cnt--; // do nothing, we'll ignore comments and stuff gone wrong
fscanf(fr, "*[^\n]"); fgetc(fr);
cfa_spec_cnt++;
}
fclose(fr);
}
else fprintf(stderr, "[vkdt-mkidt] can't open reference response curves! `%s'\n", filename);
return cfa_spec_cnt;
}
// print xyz and rgb pairs for vector plots:
static inline void
write_sample_points(
const char *basename,
const double *cfa,
const float *spectra,
const header_t *sh,
const double (*cfa_spec)[4],
const int cfa_spec_cnt,
const double *xyz_to_cam,
const double *cam_to_xyz,
float *chroma,
const int cwd,
const int cht)
{
char filename[256] = {0};
snprintf(filename, sizeof(filename), "%s_points.dat", basename);
FILE *f0 = fopen(filename, "wb");
if(!f0) return;
for(int i=0;i<2000;i++)
{ // pick a random srgb colour inside gamut
double rgb[3] = {0.0}, xyz[3] = {0.0}, xyz2[3], xyz3[3], cf[3], cam_rgb[3], cam_rgb_spec[3], cam_rgb_rspec[3];
sample_rgb(rgb, 0.9, 0);
mat3_mulv(srgb_to_xyz, rgb, xyz);
mat3_mulv(xyz_to_cam, xyz, cam_rgb); // convert to camera by matrix
normalise1(xyz); // convert to chromaticity
fetch_coeff(xyz, spectra, sh->wd, sh->ht, cf);
if(cf[0] == 0.0) continue; // discard out of spectral locus
for(int k=0;k<3;k++) // camera rgb by processing spectrum * cfa spectrum
cam_rgb_spec[k] = eval(cfa+num_coeff*k, cf, num_coeff, 3);
mat3_mulv(cam_to_xyz, cam_rgb_spec, xyz2); // spectral camera to xyz via matrix
for(int k=0;k<3;k++) // also compute a reference
cam_rgb_rspec[k] = eval_ref(cfa_spec, k, cfa_spec_cnt, cf);
mat3_mulv(cam_to_xyz, cam_rgb_rspec, xyz3);
double rec2020_from_mat[3] = {0.0};
mat3_mulv(xyz_to_rec2020, xyz2, rec2020_from_mat);
normalise1(cam_rgb);
normalise1(cam_rgb_spec);
normalise1(cam_rgb_rspec);
normalise1(xyz2);
normalise1(xyz3);
// also write exactly the thing we'll do runtime: camera rgb -> matrix and camera rgb -> lut
double tc[2] = {cam_rgb_spec[0], cam_rgb_spec[2]}; // normalised already
double rec2020[4] = {0.0}, xyz_spec[3] = {0.0};
fetch_coeff(tc, chroma, cwd, cht, rec2020); // fetch rb
rec2020[2] = rec2020[1]; // convert to rgb
rec2020[1] = 1.0-rec2020[0]-rec2020[2];
mat3_mulv(rec2020_to_xyz, rec2020, xyz_spec);
normalise1(rec2020_from_mat);
normalise1(rec2020);
normalise1(xyz_spec);
fprintf(f0, "%g %g %g %g %g %g %g %g %g %g %g %g %g %g %g %g %g %g %g\n",
xyz[0], xyz[1], xyz2[0], xyz2[1], xyz3[0], xyz3[1],
cam_rgb[0], cam_rgb[1], cam_rgb[2],
cam_rgb_spec[0], cam_rgb_spec[1], cam_rgb_spec[2],
cam_rgb_rspec[0], cam_rgb_rspec[1], cam_rgb_rspec[2],
xyz_spec[0], xyz_spec[1], xyz2[0], xyz2[1]);
}
fclose(f0);
}
#if 0
static inline void
write_identity_lut(
int wd,
int ht)
{
header_t hout = {
.magic = 1234,
.version = 2,
.channels = 2,
.datatype = 0,
.wd = wd,
.ht = ht,
};
FILE *f = fopen("id.lut", "wb");
fwrite(&hout, sizeof(hout), 1, f);
uint16_t *b16 = calloc(sizeof(uint16_t), wd*ht*2);
for(int j=0;j<ht;j++)
for(int i=0;i<wd;i++)
{
const int k=j*wd+i;
double rb[2] = {(i+0.5)/wd, (j+0.5)/ht};
quad2tri(rb, rb+1);
b16[2*k+0] = float_to_half(rb[0]);
b16[2*k+1] = float_to_half(rb[1]);
}
fwrite(b16, sizeof(uint16_t), wd*ht*2, f);
fclose(f);
free(b16);
}
#endif
// create 2.5D chroma lut
static inline float*
create_chroma_lut(
int *wd_out,
int *ht_out,
const float *spectra,
const header_t *sh,
const double *cfa,
const double (*cfa_spec)[4],
const int cfa_spec_cnt,
const double *xyz_to_cam,
const double *cam_to_xyz)
{
const int ssf = 0; // DEBUG output lut based on measured curve
// to avoid interpolation artifacts we only want to place straight pixel
// center values of our spectra.lut in the output:
int swd = sh->wd, sht = sh->ht; // sampling dimensions
int wd = swd, ht = sht; // output dimensions
float *buf = calloc(sizeof(float)*4, wd*ht+1);
// do two passes over the data
// get illum E white point (lowest saturation) in camera rgb and quad param:
const double white_xyz[3] = {1.0f/3.0f, 1.0f/3.0f, 1.0f/3.0f};
double white_cam_rgb[3];
mat3_mulv(xyz_to_cam, white_xyz, white_cam_rgb);
normalise1(white_cam_rgb); // should not be needed
tri2quad(white_cam_rgb, white_cam_rgb+2);
// first pass: get rough idea about max deviation from white and the saturation we got there
double *angular_ds = calloc(sizeof(double), 360*2);
int sample_wd = swd, sample_ht = sht;
for(int j=0;j<sample_ht;j++) for(int i=0;i<sample_wd;i++)
{
double xy[2] = {(i+0.5)/sample_wd, (j+0.5)/sample_ht};
quad2tri(xy+0, xy+1);
double cf[3]; // look up the coeffs for the sampled colour spectrum
fetch_coeffi(xy, spectra, sh->wd, sh->ht, cf); // nearest
if(cf[0] == 0) continue; // discard out of spectral locus
double cam_rgb_spec[3] = {0.0}; // camera rgb by processing spectrum * cfa spectrum
for(int k=0;k<3;k++)
if(ssf) cam_rgb_spec[k] = eval_ref(cfa_spec, k, cfa_spec_cnt, cf);
else cam_rgb_spec[k] = eval(cfa+num_coeff*k, cf, num_coeff, 3);
normalise1(cam_rgb_spec);
double u0 = cam_rgb_spec[0], u1 = cam_rgb_spec[2];
tri2quad(&u0, &u1);
float fxy[] = {xy[0], xy[1]}, white[] = {1.0f/3.0f, 1.0f/3.0f};
float sat = dt_spectrum_saturation(fxy, white);
// find angular max dist + sat
int bin = CLAMP(180.0/M_PI * (M_PI + atan2(u1-white_cam_rgb[2], u0-white_cam_rgb[0])), 0, 359);
double dist2 =
(u1-white_cam_rgb[2])*(u1-white_cam_rgb[2])+
(u0-white_cam_rgb[0])*(u0-white_cam_rgb[0]);
if(dist2 > angular_ds[2*bin])
{
angular_ds[2*bin+0] = dist2;
angular_ds[2*bin+1] = sat;
}
}
double white_norm = 1.0;
{
double coeff[3] = {0.0};
coeff[2] = 100000.0;
double white_cam_rgb[3] = {0.0};
white_cam_rgb[0] = eval(cfa+num_coeff*0, coeff, num_coeff, 3);
white_cam_rgb[1] = eval(cfa+num_coeff*1, coeff, num_coeff, 3);
white_cam_rgb[2] = eval(cfa+num_coeff*2, coeff, num_coeff, 3);
white_norm = normalise1(white_cam_rgb);
}
// 2nd pass:
// #pragma omp parallel for schedule(dynamic) collapse(2) default(shared)
for(int j=0;j<sht;j++) for(int i=0;i<swd;i++)
{
double xy[2] = {(i+0.5)/swd, (j+0.5)/sht};
quad2tri(xy+0, xy+1);
const double xyz[3] = {xy[0], xy[1], 1.0-xy[0]-xy[1]};
double cf[3]; // look up the coeffs for the sampled colour spectrum
fetch_coeff(xy, spectra, sh->wd, sh->ht, cf); // interpolate
// fetch_coeffi(xy, spectra, sh->wd, sh->ht, cf); // nearest
if(cf[0] == 0) continue; // discard out of spectral locus
double cam_rgb_spec[3] = {0.0}; // camera rgb by processing spectrum * cfa spectrum
for(int k=0;k<3;k++)
if(ssf) cam_rgb_spec[k] = eval_ref(cfa_spec, k, cfa_spec_cnt, cf);
else cam_rgb_spec[k] = eval(cfa+num_coeff*k, cf, num_coeff, 3);
double norm = normalise1(cam_rgb_spec);
float fxy[] = {xy[0], xy[1]}, white[2] = {1.0f/3.0f, 1.0f/3.0f};
float sat = dt_spectrum_saturation(fxy, white);
// convert tri t to quad u:
double u0 = cam_rgb_spec[0], u1 = cam_rgb_spec[2];
tri2quad(&u0, &u1);
int bin = CLAMP(180.0/M_PI * (M_PI + atan2(u1-white_cam_rgb[2], u0-white_cam_rgb[0])), 0, 359);
double dist2 =
(u1-white_cam_rgb[2])*(u1-white_cam_rgb[2])+
(u0-white_cam_rgb[0])*(u0-white_cam_rgb[0]);
if(dist2 < angular_ds[2*bin] && sat > angular_ds[2*bin+1])
continue; // discard higher xy sat for lower rgb sat
if(dist2 < 0.8*0.8*angular_ds[2*bin] && sat > 0.95*angular_ds[2*bin+1])
continue; // be harsh to values straddling our bounds
// sort this into rb/sum(rgb) map in camera rgb
int ii = CLAMP(u0 * wd + 0.5, 0, wd-1);
int jj = CLAMP(u1 * ht + 0.5, 0, ht-1);
double rec2020[3];
mat3_mulv(xyz_to_rec2020, xyz, rec2020);
normalise1(rec2020);
buf[4*(jj*wd + ii)+0] = rec2020[0];
buf[4*(jj*wd + ii)+1] = rec2020[2];
buf[4*(jj*wd + ii)+2] = norm/white_norm; // store relative norm too!
}
free(angular_ds);
*wd_out = wd;
*ht_out = ht;
return buf;
}
// write look up table based on hue and chroma:
static inline void
write_chroma_lut(
const char *basename,
const float *buf,
const int wd,
const int ht)
{
char filename[256] = {0};
snprintf(filename, sizeof(filename), "%s.pfm", basename);
FILE *f = fopen(filename, "wb");
fprintf(f, "PF\n%d %d\n-1.0\n", wd, ht);
for(int k=0;k<wd*ht;k++)
{
float col[3] = {buf[4*k], buf[4*k+1], 1.0-buf[4*k]-buf[4*k+1]};
fwrite(col, sizeof(float), 3, f);
}
fclose(f);
header_t hout = {
.magic = 1234,
.version = 2,
.channels = 2,
.datatype = 0,
.wd = wd,
.ht = ht,
};
snprintf(filename, sizeof(filename), "%s.lut", basename);
f = fopen(filename, "wb");
fwrite(&hout, sizeof(hout), 1, f);
uint16_t *b16 = calloc(sizeof(uint16_t), wd*ht*2);
for(int k=0;k<wd*ht;k++)
{
b16[2*k+0] = float_to_half(buf[4*k+0]);
b16[2*k+1] = float_to_half(buf[4*k+1]);
}
fwrite(b16, sizeof(uint16_t), wd*ht*2, f);
fclose(f);
free(b16);
}
static inline void
print_cfa_coeffs(double *cfa)
{
fprintf(stderr, "[vkdt-mkidt] red cfa coeffs ");
for(int k=0;k<num_coeff;k++) fprintf(stderr, "%2.8g ", cfa[k]);
fprintf(stderr, "\n[vkdt-mkidt] green cfa coeffs ");
for(int k=0;k<num_coeff;k++) fprintf(stderr, "%2.8g ", cfa[num_coeff+k]);
fprintf(stderr, "\n[vkdt-mkidt] blue cfa coeffs ");
for(int k=0;k<num_coeff;k++) fprintf(stderr, "%2.8g ", cfa[2*num_coeff+k]);
fprintf(stderr, "\n");
}
static inline void
write_camera_curves(
const char *basename,
const double *cfa)
{ // plot 'dat' u 1:2 w l lw 4, '' u 1:3 w l lw 4, '' u 1:4 w l lw 4
char filename[256] = {0};
snprintf(filename, sizeof(filename), "%s_curves.dat", basename);
FILE *f = fopen(filename, "wb");
if(!f) return;
for(int i=0;i<CIE_SAMPLES;i++)
{ // plot the camera curves:
double lambda0 = (i+.5)/(double)CIE_FINE_SAMPLES;
double lambda1 = lambda0 * (CIE_LAMBDA_MAX - CIE_LAMBDA_MIN) + CIE_LAMBDA_MIN;
double s0 = sigmoid(poly(cfa+0*num_coeff, lambda0, num_coeff));
double s1 = sigmoid(poly(cfa+1*num_coeff, lambda0, num_coeff));
double s2 = sigmoid(poly(cfa+2*num_coeff, lambda0, num_coeff));
fprintf(f, "%g %g %g %g\n", lambda1, s0, s1, s2);
}
fclose(f);
}
int main(int argc, char *argv[])
{
// warm up random number generator
for(int k=0;k<10;k++) xrand();
// load spectra.lut:
header_t header;
float *spectra = 0;
{
FILE *f = fopen("spectra.lut", "rb");
if(!f) goto error;
if(fread(&header, sizeof(header_t), 1, f) != 1) goto error;
if(header.channels != 4) goto error;
if(header.version != 2) goto error;
spectra = calloc(4*sizeof(float), header.wd * header.ht);
fread(spectra, header.wd*header.ht, 4*sizeof(float), f);
fclose(f);
if(0)
{
error:
if(f) fclose(f);
fprintf(stderr, "[vkdt-mkidt] could not read spectra.lut!\n");
exit(2);
}
}
// these influence the problem size and the mode of operation of the fitter:
// low quality mode:
num_coeff = 6; // number of coefficients in the sigmoid/polynomial cfa spectra
int num_it = 500; // iterations per batch
int num = 10; // number of data points (spectrum/xy) per batch
int batch_cnt = 50; // number of batches
// initial thoughts: using 100 batches and 1000 iterations (num=50)
// improves the fit a bit, only not in the reds. probably not
// converged yet, but indeed it seems more data serves the process.
int der = 0, hq = 0;
const char *model = "Canon EOS 5D Mark II";
for(int k=1;k<argc;k++)
{
if (!strcmp(argv[k], "--hq" )) hq = 1; // high quality mode
else if(!strcmp(argv[k], "--der")) der = 1; // use analytic jacobian
else model = argv[k];
}
if(hq && !der)
{
fprintf(stderr, "[vkdt-mkidt] setting high quality mode, this can be slow..\n");
num_coeff = 6;
num_it = 1000; // 100k unfortunately it does improve from 10k, not by much but notably. this is 10x the cost :(
batch_cnt = 100;
num = 70;
}
else if(hq && der)
{ // these need way more iterations because they often bail out early
fprintf(stderr, "[vkdt-mkidt] setting high quality mode and analytic jacobian. this can be slow..\n");
num_coeff = 6;
num_it = 1000;
batch_cnt = 100;
num = 70;
}
double xyz_to_cam[9];
float adobe_mat[12] = {1, 0, 0, 0, 1, 0, 0, 0, 1};
fprintf(stderr, "[vkdt-mkidt] using matrix for `%s'\n", model);
if(strcmp(model, "identity") && dt_dcraw_adobe_coeff(model, &adobe_mat))
{
fprintf(stderr, "[vkdt-mkidt] could not find this camera model (%s)! check your spelling?\n", model);
exit(3);
}
for(int i=0;i<9;i++) xyz_to_cam[i] = adobe_mat[i];
double cam_to_xyz[9] = {0.0};
mat3_inv(xyz_to_cam, cam_to_xyz);
// xyz -> camera rgb matrix. does it contain white balancing stuff?
fprintf(stderr, "[vkdt-mkidt] M = %g %g %g\n"
" %g %g %g\n"
" %g %g %g\n",
xyz_to_cam[0], xyz_to_cam[1], xyz_to_cam[2],
xyz_to_cam[3], xyz_to_cam[4], xyz_to_cam[5],
xyz_to_cam[6], xyz_to_cam[7], xyz_to_cam[8]);
// in particular, xyz white 1 1 1 maps to this camera rgb:
double white[3] = {0.0}, one[3] = {1.0, 1.0, 1.0};
mat3_mulv(xyz_to_cam, one, white);
double wbcoeff[3] = {1.0/white[0], 1.0/white[1], 1.0/white[2]};
fprintf(stderr, "[vkdt-mkidt] white = %g %g %g\n", white[0], white[1], white[2]);
fprintf(stderr, "[vkdt-mkidt] wb coeff = %g %g %g\n", wbcoeff[0], wbcoeff[1], wbcoeff[2]);
// init initial cfa params and lower/upper bounds:
double cfa[30] = {0.0};
double lb[30] = {0.0};
double ub[30] = {0.0};
// these bounds are important for regularisation. else we'll fit to box spectra to be
// able to match the matrix better. +-50 seem to work really well in most cases.
// fuji/nikon sometimes asks for extended range, but i think it would be better to increase
// the number of coefficients/degree of the polynomial instead.
for(int k=0;k<3*num_coeff;k++) lb[k] = -50;
for(int k=0;k<3*num_coeff;k++) ub[k] = 50;
// for(int k=0;k<3*num_coeff;k++) lb[k] = -(ub[k] = 500); // for kodak need more complex blue
int e = num_coeff-3; // quadratic term
cfa[e+0] = cfa[e+num_coeff] = cfa[e+2*num_coeff] = -3.0;
// construct data point arrays for levmar, 3x for each tristimulus/camera rgb channel:
double *data = calloc(num, sizeof(double) * 3); // data point: spectral coeff for input
double *target = calloc(num, sizeof(double) * 2); // target chroma
fprintf(stderr, "[vkdt-mkidt] starting optimiser..");
double resid = 1.0;
// run optimisation in mini-batches
for (int batch=0;batch<batch_cnt;batch++)
{
for(int i=0;i<num;i++)
{
// pick a random srgb colour inside gamut
double rgb[3] = {0.0}, xyz[3] = {0.0};
sample_rgb(rgb, 0.17, 1); // chosen to reach all of srgb at least potentially
// sample_rgb(rgb, 0.3, 1); // relaxed for kodak
mat3_mulv(srgb_to_xyz, rgb, xyz);
normalise1(xyz);
double cf[3]; // look up the coeffs for the sampled colour spectrum
fetch_coeff(xyz, spectra, header.wd, header.ht, cf);
// apply xyz to camera rgb matrix:
double cam_rgb[3] = {0.0};
mat3_mulv(xyz_to_cam, xyz, cam_rgb);
normalise1(cam_rgb);
memcpy(target+2*i, cam_rgb, sizeof(double)*2);
memcpy(data+3*i, cf, sizeof(double)*3);
}
#ifdef USE_LEVMAR
double info[LM_INFO_SZ] = {0};
double opts[LM_OPTS_SZ] = {
// init-mu eps Jte eps Dp eps err eps diff
0.2, 1E-15, 1E-40, 1E-15, 1e-5};//LM_DIFF_DELTA};
if(der)
{
dlevmar_bc_der(
lm_callback, lm_jacobian, cfa, target, 3*num_coeff, 2*num,
lb, ub, NULL, // dscl, // diagonal scaling constants (?)
num_it, opts, info, NULL, NULL, data);
}
else
{
dlevmar_bc_dif(
lm_callback, cfa, target, 3*num_coeff, 2*num,
lb, ub, NULL, // dscl, // diagonal scaling constants (?)
num_it, opts, info, NULL, NULL, data);
}
// fprintf(stderr, " ||e||_2, ||J^T e||_inf, ||Dp||_2, mu/max[J^T J]_ii\n");
// fprintf(stderr, "info %g %g %g %g\n", info[1], info[2], info[3], info[4]);
fprintf(stderr, "\r[vkdt-mkidt] batch %d/%d it %04d/%d reason %g resid %g -> %g ",
batch+1, batch_cnt, (int)info[5], num_it, info[6], info[0], info[1]);
fprintf(stdout, "%g %g\n", info[0], info[1]);
#else
fprintf(stdout, "%g ", resid);
resid = dt_gauss_newton_cg(
lm_callback,
// lm_jacobian, // does not like our analytic jacobian
lm_jacobian_dif,
cfa, target, 3*num_coeff, 2*num,
lb, ub, num_it, data);
fprintf(stderr, "\r[vkdt-mkidt] batch %d/%d resid %g ",
batch+1, batch_cnt, resid);
// fprintf(stdout, "%g\n", resid); // write convergence history
#endif
} // end mini batches
fprintf(stderr, "\n");
// now we're done, prepare the data for some useful output:
// load reference spectra from txt, if we can
double cfa_spec[100][4] = {{0.0}};
const int cfa_spec_cnt = load_reference_cfa_spectra(model, cfa_spec);
// create the actual 2D chroma lut
int wd, ht;
float *buf = create_chroma_lut(&wd, &ht, spectra, &header, cfa, cfa_spec, cfa_spec_cnt, xyz_to_cam, cam_to_xyz);
#if 1 // then hole fill it
dt_inpaint_buf_t inpaint_buf = {
.dat = buf,
.wd = wd,
.ht = ht,
.cpp = 4,
};
dt_inpaint(&inpaint_buf);
#endif
char basename[256] = {0}; // get sanitised basename
snprintf(basename, sizeof(basename), "%s", model);
int len = strnlen(basename, sizeof(basename));
if(!strcasecmp(".txt", basename + len - 4))
basename[len-4] = 0;
for(int i=0;i<len;i++) if(basename[i] == ' ') basename[i] = '_';
// write the chroma lut to half float .lut as well as .pfm for debugging:
write_chroma_lut(basename, buf, wd, ht);
// write a couple of sample points for debug vector plots
write_sample_points(basename, cfa, spectra, &header, cfa_spec, cfa_spec_cnt, xyz_to_cam, cam_to_xyz, buf, wd, ht);
// output the coefficients to console
print_cfa_coeffs(cfa);
// write the cfa curves to a file for plotting
write_camera_curves(basename, cfa);
// write a camera rgb == rec2020 identity lut for debugging
// write_identity_lut(wd, ht);
free(buf);
exit(0);
}
|
CGOpenMPRuntime.h | //===----- CGOpenMPRuntime.h - Interface to OpenMP Runtimes -----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This provides a class for OpenMP runtime code generation.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_LIB_CODEGEN_CGOPENMPRUNTIME_H
#define LLVM_CLANG_LIB_CODEGEN_CGOPENMPRUNTIME_H
#include "CGValue.h"
#include "clang/AST/DeclOpenMP.h"
#include "clang/AST/GlobalDecl.h"
#include "clang/AST/Type.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/SourceLocation.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringSet.h"
#include "llvm/Frontend/OpenMP/OMPConstants.h"
#include "llvm/Frontend/OpenMP/OMPIRBuilder.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/ValueHandle.h"
#include "llvm/Support/AtomicOrdering.h"
namespace llvm {
class ArrayType;
class Constant;
class FunctionType;
class GlobalVariable;
class StructType;
class Type;
class Value;
class OpenMPIRBuilder;
} // namespace llvm
namespace clang {
class Expr;
class OMPDependClause;
class OMPExecutableDirective;
class OMPLoopDirective;
class VarDecl;
class OMPDeclareReductionDecl;
class IdentifierInfo;
namespace CodeGen {
class Address;
class CodeGenFunction;
class CodeGenModule;
/// A basic class for pre|post-action for advanced codegen sequence for OpenMP
/// region.
class PrePostActionTy {
public:
explicit PrePostActionTy() {}
virtual void Enter(CodeGenFunction &CGF) {}
virtual void Exit(CodeGenFunction &CGF) {}
virtual ~PrePostActionTy() {}
};
/// Class provides a way to call simple version of codegen for OpenMP region, or
/// an advanced with possible pre|post-actions in codegen.
class RegionCodeGenTy final {
intptr_t CodeGen;
typedef void (*CodeGenTy)(intptr_t, CodeGenFunction &, PrePostActionTy &);
CodeGenTy Callback;
mutable PrePostActionTy *PrePostAction;
RegionCodeGenTy() = delete;
template <typename Callable>
static void CallbackFn(intptr_t CodeGen, CodeGenFunction &CGF,
PrePostActionTy &Action) {
return (*reinterpret_cast<Callable *>(CodeGen))(CGF, Action);
}
public:
template <typename Callable>
RegionCodeGenTy(
Callable &&CodeGen,
std::enable_if_t<!std::is_same<std::remove_reference_t<Callable>,
RegionCodeGenTy>::value> * = nullptr)
: CodeGen(reinterpret_cast<intptr_t>(&CodeGen)),
Callback(CallbackFn<std::remove_reference_t<Callable>>),
PrePostAction(nullptr) {}
void setAction(PrePostActionTy &Action) const { PrePostAction = &Action; }
void operator()(CodeGenFunction &CGF) const;
};
struct OMPTaskDataTy final {
SmallVector<const Expr *, 4> PrivateVars;
SmallVector<const Expr *, 4> PrivateCopies;
SmallVector<const Expr *, 4> FirstprivateVars;
SmallVector<const Expr *, 4> FirstprivateCopies;
SmallVector<const Expr *, 4> FirstprivateInits;
SmallVector<const Expr *, 4> LastprivateVars;
SmallVector<const Expr *, 4> LastprivateCopies;
SmallVector<const Expr *, 4> ReductionVars;
SmallVector<const Expr *, 4> ReductionOrigs;
SmallVector<const Expr *, 4> ReductionCopies;
SmallVector<const Expr *, 4> ReductionOps;
SmallVector<CanonicalDeclPtr<const VarDecl>, 4> PrivateLocals;
struct DependData {
OpenMPDependClauseKind DepKind = OMPC_DEPEND_unknown;
const Expr *IteratorExpr = nullptr;
SmallVector<const Expr *, 4> DepExprs;
explicit DependData() = default;
DependData(OpenMPDependClauseKind DepKind, const Expr *IteratorExpr)
: DepKind(DepKind), IteratorExpr(IteratorExpr) {}
};
SmallVector<DependData, 4> Dependences;
llvm::PointerIntPair<llvm::Value *, 1, bool> Final;
llvm::PointerIntPair<llvm::Value *, 1, bool> Schedule;
llvm::PointerIntPair<llvm::Value *, 1, bool> Priority;
llvm::Value *Reductions = nullptr;
unsigned NumberOfParts = 0;
bool Tied = true;
bool Nogroup = false;
bool IsReductionWithTaskMod = false;
bool IsWorksharingReduction = false;
};
/// Class intended to support codegen of all kind of the reduction clauses.
class ReductionCodeGen {
private:
/// Data required for codegen of reduction clauses.
struct ReductionData {
/// Reference to the item shared between tasks to reduce into.
const Expr *Shared = nullptr;
/// Reference to the original item.
const Expr *Ref = nullptr;
/// Helper expression for generation of private copy.
const Expr *Private = nullptr;
/// Helper expression for generation reduction operation.
const Expr *ReductionOp = nullptr;
ReductionData(const Expr *Shared, const Expr *Ref, const Expr *Private,
const Expr *ReductionOp)
: Shared(Shared), Ref(Ref), Private(Private), ReductionOp(ReductionOp) {
}
};
/// List of reduction-based clauses.
SmallVector<ReductionData, 4> ClausesData;
/// List of addresses of shared variables/expressions.
SmallVector<std::pair<LValue, LValue>, 4> SharedAddresses;
/// List of addresses of original variables/expressions.
SmallVector<std::pair<LValue, LValue>, 4> OrigAddresses;
/// Sizes of the reduction items in chars.
SmallVector<std::pair<llvm::Value *, llvm::Value *>, 4> Sizes;
/// Base declarations for the reduction items.
SmallVector<const VarDecl *, 4> BaseDecls;
/// Emits lvalue for shared expression.
LValue emitSharedLValue(CodeGenFunction &CGF, const Expr *E);
/// Emits upper bound for shared expression (if array section).
LValue emitSharedLValueUB(CodeGenFunction &CGF, const Expr *E);
/// Performs aggregate initialization.
/// \param N Number of reduction item in the common list.
/// \param PrivateAddr Address of the corresponding private item.
/// \param SharedLVal Address of the original shared variable.
/// \param DRD Declare reduction construct used for reduction item.
void emitAggregateInitialization(CodeGenFunction &CGF, unsigned N,
Address PrivateAddr, LValue SharedLVal,
const OMPDeclareReductionDecl *DRD);
public:
ReductionCodeGen(ArrayRef<const Expr *> Shareds, ArrayRef<const Expr *> Origs,
ArrayRef<const Expr *> Privates,
ArrayRef<const Expr *> ReductionOps);
/// Emits lvalue for the shared and original reduction item.
/// \param N Number of the reduction item.
void emitSharedOrigLValue(CodeGenFunction &CGF, unsigned N);
/// Emits the code for the variable-modified type, if required.
/// \param N Number of the reduction item.
void emitAggregateType(CodeGenFunction &CGF, unsigned N);
/// Emits the code for the variable-modified type, if required.
/// \param N Number of the reduction item.
/// \param Size Size of the type in chars.
void emitAggregateType(CodeGenFunction &CGF, unsigned N, llvm::Value *Size);
/// Performs initialization of the private copy for the reduction item.
/// \param N Number of the reduction item.
/// \param PrivateAddr Address of the corresponding private item.
/// \param DefaultInit Default initialization sequence that should be
/// performed if no reduction specific initialization is found.
/// \param SharedLVal Address of the original shared variable.
void
emitInitialization(CodeGenFunction &CGF, unsigned N, Address PrivateAddr,
LValue SharedLVal,
llvm::function_ref<bool(CodeGenFunction &)> DefaultInit);
/// Returns true if the private copy requires cleanups.
bool needCleanups(unsigned N);
/// Emits cleanup code for the reduction item.
/// \param N Number of the reduction item.
/// \param PrivateAddr Address of the corresponding private item.
void emitCleanups(CodeGenFunction &CGF, unsigned N, Address PrivateAddr);
/// Adjusts \p PrivatedAddr for using instead of the original variable
/// address in normal operations.
/// \param N Number of the reduction item.
/// \param PrivateAddr Address of the corresponding private item.
Address adjustPrivateAddress(CodeGenFunction &CGF, unsigned N,
Address PrivateAddr);
/// Returns LValue for the reduction item.
LValue getSharedLValue(unsigned N) const { return SharedAddresses[N].first; }
/// Returns LValue for the original reduction item.
LValue getOrigLValue(unsigned N) const { return OrigAddresses[N].first; }
/// Returns the size of the reduction item (in chars and total number of
/// elements in the item), or nullptr, if the size is a constant.
std::pair<llvm::Value *, llvm::Value *> getSizes(unsigned N) const {
return Sizes[N];
}
/// Returns the base declaration of the reduction item.
const VarDecl *getBaseDecl(unsigned N) const { return BaseDecls[N]; }
/// Returns the base declaration of the reduction item.
const Expr *getRefExpr(unsigned N) const { return ClausesData[N].Ref; }
/// Returns true if the initialization of the reduction item uses initializer
/// from declare reduction construct.
bool usesReductionInitializer(unsigned N) const;
};
class CGOpenMPRuntime {
public:
/// Allows to disable automatic handling of functions used in target regions
/// as those marked as `omp declare target`.
class DisableAutoDeclareTargetRAII {
CodeGenModule &CGM;
bool SavedShouldMarkAsGlobal;
public:
DisableAutoDeclareTargetRAII(CodeGenModule &CGM);
~DisableAutoDeclareTargetRAII();
};
/// Manages list of nontemporal decls for the specified directive.
class NontemporalDeclsRAII {
CodeGenModule &CGM;
const bool NeedToPush;
public:
NontemporalDeclsRAII(CodeGenModule &CGM, const OMPLoopDirective &S);
~NontemporalDeclsRAII();
};
/// Manages list of nontemporal decls for the specified directive.
class UntiedTaskLocalDeclsRAII {
CodeGenModule &CGM;
const bool NeedToPush;
public:
UntiedTaskLocalDeclsRAII(
CodeGenFunction &CGF,
const llvm::MapVector<CanonicalDeclPtr<const VarDecl>,
std::pair<Address, Address>> &LocalVars);
~UntiedTaskLocalDeclsRAII();
};
/// Maps the expression for the lastprivate variable to the global copy used
/// to store new value because original variables are not mapped in inner
/// parallel regions. Only private copies are captured but we need also to
/// store private copy in shared address.
/// Also, stores the expression for the private loop counter and it
/// threaprivate name.
struct LastprivateConditionalData {
llvm::MapVector<CanonicalDeclPtr<const Decl>, SmallString<16>>
DeclToUniqueName;
LValue IVLVal;
llvm::Function *Fn = nullptr;
bool Disabled = false;
};
/// Manages list of lastprivate conditional decls for the specified directive.
class LastprivateConditionalRAII {
enum class ActionToDo {
DoNotPush,
PushAsLastprivateConditional,
DisableLastprivateConditional,
};
CodeGenModule &CGM;
ActionToDo Action = ActionToDo::DoNotPush;
/// Check and try to disable analysis of inner regions for changes in
/// lastprivate conditional.
void tryToDisableInnerAnalysis(const OMPExecutableDirective &S,
llvm::DenseSet<CanonicalDeclPtr<const Decl>>
&NeedToAddForLPCsAsDisabled) const;
LastprivateConditionalRAII(CodeGenFunction &CGF,
const OMPExecutableDirective &S);
public:
explicit LastprivateConditionalRAII(CodeGenFunction &CGF,
const OMPExecutableDirective &S,
LValue IVLVal);
static LastprivateConditionalRAII disable(CodeGenFunction &CGF,
const OMPExecutableDirective &S);
~LastprivateConditionalRAII();
};
llvm::OpenMPIRBuilder &getOMPBuilder() { return OMPBuilder; }
protected:
CodeGenModule &CGM;
StringRef FirstSeparator, Separator;
/// An OpenMP-IR-Builder instance.
llvm::OpenMPIRBuilder OMPBuilder;
/// Constructor allowing to redefine the name separator for the variables.
explicit CGOpenMPRuntime(CodeGenModule &CGM, StringRef FirstSeparator,
StringRef Separator);
/// Creates offloading entry for the provided entry ID \a ID,
/// address \a Addr, size \a Size, and flags \a Flags.
virtual void createOffloadEntry(llvm::Constant *ID, llvm::Constant *Addr,
uint64_t Size, int32_t Flags,
llvm::GlobalValue::LinkageTypes Linkage);
/// Helper to emit outlined function for 'target' directive.
/// \param D Directive to emit.
/// \param ParentName Name of the function that encloses the target region.
/// \param OutlinedFn Outlined function value to be defined by this call.
/// \param OutlinedFnID Outlined function ID value to be defined by this call.
/// \param IsOffloadEntry True if the outlined function is an offload entry.
/// \param CodeGen Lambda codegen specific to an accelerator device.
/// An outlined function may not be an entry if, e.g. the if clause always
/// evaluates to false.
virtual void emitTargetOutlinedFunctionHelper(const OMPExecutableDirective &D,
StringRef ParentName,
llvm::Function *&OutlinedFn,
llvm::Constant *&OutlinedFnID,
bool IsOffloadEntry,
const RegionCodeGenTy &CodeGen);
/// Emits object of ident_t type with info for source location.
/// \param Flags Flags for OpenMP location.
///
llvm::Value *emitUpdateLocation(CodeGenFunction &CGF, SourceLocation Loc,
unsigned Flags = 0);
/// Emit the number of teams for a target directive. Inspect the num_teams
/// clause associated with a teams construct combined or closely nested
/// with the target directive.
///
/// Emit a team of size one for directives such as 'target parallel' that
/// have no associated teams construct.
///
/// Otherwise, return nullptr.
const Expr *getNumTeamsExprForTargetDirective(CodeGenFunction &CGF,
const OMPExecutableDirective &D,
int32_t &DefaultVal);
llvm::Value *emitNumTeamsForTargetDirective(CodeGenFunction &CGF,
const OMPExecutableDirective &D);
/// Emit the number of threads for a target directive. Inspect the
/// thread_limit clause associated with a teams construct combined or closely
/// nested with the target directive.
///
/// Emit the num_threads clause for directives such as 'target parallel' that
/// have no associated teams construct.
///
/// Otherwise, return nullptr.
const Expr *
getNumThreadsExprForTargetDirective(CodeGenFunction &CGF,
const OMPExecutableDirective &D,
int32_t &DefaultVal);
llvm::Value *
emitNumThreadsForTargetDirective(CodeGenFunction &CGF,
const OMPExecutableDirective &D);
/// Returns pointer to ident_t type.
llvm::Type *getIdentTyPointerTy();
/// Gets thread id value for the current thread.
///
llvm::Value *getThreadID(CodeGenFunction &CGF, SourceLocation Loc);
/// Get the function name of an outlined region.
// The name can be customized depending on the target.
//
virtual StringRef getOutlinedHelperName() const { return ".omp_outlined."; }
/// Emits \p Callee function call with arguments \p Args with location \p Loc.
void emitCall(CodeGenFunction &CGF, SourceLocation Loc,
llvm::FunctionCallee Callee,
ArrayRef<llvm::Value *> Args = llvm::None) const;
/// Emits address of the word in a memory where current thread id is
/// stored.
virtual Address emitThreadIDAddress(CodeGenFunction &CGF, SourceLocation Loc);
void setLocThreadIdInsertPt(CodeGenFunction &CGF,
bool AtCurrentPoint = false);
void clearLocThreadIdInsertPt(CodeGenFunction &CGF);
/// Check if the default location must be constant.
/// Default is false to support OMPT/OMPD.
virtual bool isDefaultLocationConstant() const { return false; }
/// Returns additional flags that can be stored in reserved_2 field of the
/// default location.
virtual unsigned getDefaultLocationReserved2Flags() const { return 0; }
/// Returns default flags for the barriers depending on the directive, for
/// which this barier is going to be emitted.
static unsigned getDefaultFlagsForBarriers(OpenMPDirectiveKind Kind);
/// Get the LLVM type for the critical name.
llvm::ArrayType *getKmpCriticalNameTy() const {return KmpCriticalNameTy;}
/// Returns corresponding lock object for the specified critical region
/// name. If the lock object does not exist it is created, otherwise the
/// reference to the existing copy is returned.
/// \param CriticalName Name of the critical region.
///
llvm::Value *getCriticalRegionLock(StringRef CriticalName);
private:
/// Map for SourceLocation and OpenMP runtime library debug locations.
typedef llvm::DenseMap<SourceLocation, llvm::Value *> OpenMPDebugLocMapTy;
OpenMPDebugLocMapTy OpenMPDebugLocMap;
/// The type for a microtask which gets passed to __kmpc_fork_call().
/// Original representation is:
/// typedef void (kmpc_micro)(kmp_int32 global_tid, kmp_int32 bound_tid,...);
llvm::FunctionType *Kmpc_MicroTy = nullptr;
/// Stores debug location and ThreadID for the function.
struct DebugLocThreadIdTy {
llvm::Value *DebugLoc;
llvm::Value *ThreadID;
/// Insert point for the service instructions.
llvm::AssertingVH<llvm::Instruction> ServiceInsertPt = nullptr;
};
/// Map of local debug location, ThreadId and functions.
typedef llvm::DenseMap<llvm::Function *, DebugLocThreadIdTy>
OpenMPLocThreadIDMapTy;
OpenMPLocThreadIDMapTy OpenMPLocThreadIDMap;
/// Map of UDRs and corresponding combiner/initializer.
typedef llvm::DenseMap<const OMPDeclareReductionDecl *,
std::pair<llvm::Function *, llvm::Function *>>
UDRMapTy;
UDRMapTy UDRMap;
/// Map of functions and locally defined UDRs.
typedef llvm::DenseMap<llvm::Function *,
SmallVector<const OMPDeclareReductionDecl *, 4>>
FunctionUDRMapTy;
FunctionUDRMapTy FunctionUDRMap;
/// Map from the user-defined mapper declaration to its corresponding
/// functions.
llvm::DenseMap<const OMPDeclareMapperDecl *, llvm::Function *> UDMMap;
/// Map of functions and their local user-defined mappers.
using FunctionUDMMapTy =
llvm::DenseMap<llvm::Function *,
SmallVector<const OMPDeclareMapperDecl *, 4>>;
FunctionUDMMapTy FunctionUDMMap;
/// Maps local variables marked as lastprivate conditional to their internal
/// types.
llvm::DenseMap<llvm::Function *,
llvm::DenseMap<CanonicalDeclPtr<const Decl>,
std::tuple<QualType, const FieldDecl *,
const FieldDecl *, LValue>>>
LastprivateConditionalToTypes;
/// Maps function to the position of the untied task locals stack.
llvm::DenseMap<llvm::Function *, unsigned> FunctionToUntiedTaskStackMap;
/// Type kmp_critical_name, originally defined as typedef kmp_int32
/// kmp_critical_name[8];
llvm::ArrayType *KmpCriticalNameTy;
/// An ordered map of auto-generated variables to their unique names.
/// It stores variables with the following names: 1) ".gomp_critical_user_" +
/// <critical_section_name> + ".var" for "omp critical" directives; 2)
/// <mangled_name_for_global_var> + ".cache." for cache for threadprivate
/// variables.
llvm::StringMap<llvm::AssertingVH<llvm::Constant>, llvm::BumpPtrAllocator>
InternalVars;
/// Type typedef kmp_int32 (* kmp_routine_entry_t)(kmp_int32, void *);
llvm::Type *KmpRoutineEntryPtrTy = nullptr;
QualType KmpRoutineEntryPtrQTy;
/// Type typedef struct kmp_task {
/// void * shareds; /**< pointer to block of pointers to
/// shared vars */
/// kmp_routine_entry_t routine; /**< pointer to routine to call for
/// executing task */
/// kmp_int32 part_id; /**< part id for the task */
/// kmp_routine_entry_t destructors; /* pointer to function to invoke
/// deconstructors of firstprivate C++ objects */
/// } kmp_task_t;
QualType KmpTaskTQTy;
/// Saved kmp_task_t for task directive.
QualType SavedKmpTaskTQTy;
/// Saved kmp_task_t for taskloop-based directive.
QualType SavedKmpTaskloopTQTy;
/// Type typedef struct kmp_depend_info {
/// kmp_intptr_t base_addr;
/// size_t len;
/// struct {
/// bool in:1;
/// bool out:1;
/// } flags;
/// } kmp_depend_info_t;
QualType KmpDependInfoTy;
/// Type typedef struct kmp_task_affinity_info {
/// kmp_intptr_t base_addr;
/// size_t len;
/// struct {
/// bool flag1 : 1;
/// bool flag2 : 1;
/// kmp_int32 reserved : 30;
/// } flags;
/// } kmp_task_affinity_info_t;
QualType KmpTaskAffinityInfoTy;
/// struct kmp_dim { // loop bounds info casted to kmp_int64
/// kmp_int64 lo; // lower
/// kmp_int64 up; // upper
/// kmp_int64 st; // stride
/// };
QualType KmpDimTy;
/// Type struct __tgt_offload_entry{
/// void *addr; // Pointer to the offload entry info.
/// // (function or global)
/// char *name; // Name of the function or global.
/// size_t size; // Size of the entry info (0 if it a function).
/// int32_t flags;
/// int32_t reserved;
/// };
QualType TgtOffloadEntryQTy;
/// Entity that registers the offloading constants that were emitted so
/// far.
class OffloadEntriesInfoManagerTy {
CodeGenModule &CGM;
/// Number of entries registered so far.
unsigned OffloadingEntriesNum = 0;
public:
/// Base class of the entries info.
class OffloadEntryInfo {
public:
/// Kind of a given entry.
enum OffloadingEntryInfoKinds : unsigned {
/// Entry is a target region.
OffloadingEntryInfoTargetRegion = 0,
/// Entry is a declare target variable.
OffloadingEntryInfoDeviceGlobalVar = 1,
/// Invalid entry info.
OffloadingEntryInfoInvalid = ~0u
};
protected:
OffloadEntryInfo() = delete;
explicit OffloadEntryInfo(OffloadingEntryInfoKinds Kind) : Kind(Kind) {}
explicit OffloadEntryInfo(OffloadingEntryInfoKinds Kind, unsigned Order,
uint32_t Flags)
: Flags(Flags), Order(Order), Kind(Kind) {}
~OffloadEntryInfo() = default;
public:
bool isValid() const { return Order != ~0u; }
unsigned getOrder() const { return Order; }
OffloadingEntryInfoKinds getKind() const { return Kind; }
uint32_t getFlags() const { return Flags; }
void setFlags(uint32_t NewFlags) { Flags = NewFlags; }
llvm::Constant *getAddress() const {
return cast_or_null<llvm::Constant>(Addr);
}
void setAddress(llvm::Constant *V) {
assert(!Addr.pointsToAliveValue() && "Address has been set before!");
Addr = V;
}
static bool classof(const OffloadEntryInfo *Info) { return true; }
private:
/// Address of the entity that has to be mapped for offloading.
llvm::WeakTrackingVH Addr;
/// Flags associated with the device global.
uint32_t Flags = 0u;
/// Order this entry was emitted.
unsigned Order = ~0u;
OffloadingEntryInfoKinds Kind = OffloadingEntryInfoInvalid;
};
/// Return true if a there are no entries defined.
bool empty() const;
/// Return number of entries defined so far.
unsigned size() const { return OffloadingEntriesNum; }
OffloadEntriesInfoManagerTy(CodeGenModule &CGM) : CGM(CGM) {}
//
// Target region entries related.
//
/// Kind of the target registry entry.
enum OMPTargetRegionEntryKind : uint32_t {
/// Mark the entry as target region.
OMPTargetRegionEntryTargetRegion = 0x0,
/// Mark the entry as a global constructor.
OMPTargetRegionEntryCtor = 0x02,
/// Mark the entry as a global destructor.
OMPTargetRegionEntryDtor = 0x04,
};
/// Target region entries info.
class OffloadEntryInfoTargetRegion final : public OffloadEntryInfo {
/// Address that can be used as the ID of the entry.
llvm::Constant *ID = nullptr;
public:
OffloadEntryInfoTargetRegion()
: OffloadEntryInfo(OffloadingEntryInfoTargetRegion) {}
explicit OffloadEntryInfoTargetRegion(unsigned Order,
llvm::Constant *Addr,
llvm::Constant *ID,
OMPTargetRegionEntryKind Flags)
: OffloadEntryInfo(OffloadingEntryInfoTargetRegion, Order, Flags),
ID(ID) {
setAddress(Addr);
}
llvm::Constant *getID() const { return ID; }
void setID(llvm::Constant *V) {
assert(!ID && "ID has been set before!");
ID = V;
}
static bool classof(const OffloadEntryInfo *Info) {
return Info->getKind() == OffloadingEntryInfoTargetRegion;
}
};
/// Initialize target region entry.
void initializeTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID,
StringRef ParentName, unsigned LineNum,
unsigned Order);
/// Register target region entry.
void registerTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID,
StringRef ParentName, unsigned LineNum,
llvm::Constant *Addr, llvm::Constant *ID,
OMPTargetRegionEntryKind Flags);
/// Return true if a target region entry with the provided information
/// exists.
bool hasTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID,
StringRef ParentName, unsigned LineNum,
bool IgnoreAddressId = false) const;
/// brief Applies action \a Action on all registered entries.
typedef llvm::function_ref<void(unsigned, unsigned, StringRef, unsigned,
const OffloadEntryInfoTargetRegion &)>
OffloadTargetRegionEntryInfoActTy;
void actOnTargetRegionEntriesInfo(
const OffloadTargetRegionEntryInfoActTy &Action);
//
// Device global variable entries related.
//
/// Kind of the global variable entry..
enum OMPTargetGlobalVarEntryKind : uint32_t {
/// Mark the entry as a to declare target.
OMPTargetGlobalVarEntryTo = 0x0,
/// Mark the entry as a to declare target link.
OMPTargetGlobalVarEntryLink = 0x1,
};
/// Device global variable entries info.
class OffloadEntryInfoDeviceGlobalVar final : public OffloadEntryInfo {
/// Type of the global variable.
CharUnits VarSize;
llvm::GlobalValue::LinkageTypes Linkage;
public:
OffloadEntryInfoDeviceGlobalVar()
: OffloadEntryInfo(OffloadingEntryInfoDeviceGlobalVar) {}
explicit OffloadEntryInfoDeviceGlobalVar(unsigned Order,
OMPTargetGlobalVarEntryKind Flags)
: OffloadEntryInfo(OffloadingEntryInfoDeviceGlobalVar, Order, Flags) {}
explicit OffloadEntryInfoDeviceGlobalVar(
unsigned Order, llvm::Constant *Addr, CharUnits VarSize,
OMPTargetGlobalVarEntryKind Flags,
llvm::GlobalValue::LinkageTypes Linkage)
: OffloadEntryInfo(OffloadingEntryInfoDeviceGlobalVar, Order, Flags),
VarSize(VarSize), Linkage(Linkage) {
setAddress(Addr);
}
CharUnits getVarSize() const { return VarSize; }
void setVarSize(CharUnits Size) { VarSize = Size; }
llvm::GlobalValue::LinkageTypes getLinkage() const { return Linkage; }
void setLinkage(llvm::GlobalValue::LinkageTypes LT) { Linkage = LT; }
static bool classof(const OffloadEntryInfo *Info) {
return Info->getKind() == OffloadingEntryInfoDeviceGlobalVar;
}
};
/// Initialize device global variable entry.
void initializeDeviceGlobalVarEntryInfo(StringRef Name,
OMPTargetGlobalVarEntryKind Flags,
unsigned Order);
/// Register device global variable entry.
void
registerDeviceGlobalVarEntryInfo(StringRef VarName, llvm::Constant *Addr,
CharUnits VarSize,
OMPTargetGlobalVarEntryKind Flags,
llvm::GlobalValue::LinkageTypes Linkage);
/// Checks if the variable with the given name has been registered already.
bool hasDeviceGlobalVarEntryInfo(StringRef VarName) const {
return OffloadEntriesDeviceGlobalVar.count(VarName) > 0;
}
/// Applies action \a Action on all registered entries.
typedef llvm::function_ref<void(StringRef,
const OffloadEntryInfoDeviceGlobalVar &)>
OffloadDeviceGlobalVarEntryInfoActTy;
void actOnDeviceGlobalVarEntriesInfo(
const OffloadDeviceGlobalVarEntryInfoActTy &Action);
private:
// Storage for target region entries kind. The storage is to be indexed by
// file ID, device ID, parent function name and line number.
typedef llvm::DenseMap<unsigned, OffloadEntryInfoTargetRegion>
OffloadEntriesTargetRegionPerLine;
typedef llvm::StringMap<OffloadEntriesTargetRegionPerLine>
OffloadEntriesTargetRegionPerParentName;
typedef llvm::DenseMap<unsigned, OffloadEntriesTargetRegionPerParentName>
OffloadEntriesTargetRegionPerFile;
typedef llvm::DenseMap<unsigned, OffloadEntriesTargetRegionPerFile>
OffloadEntriesTargetRegionPerDevice;
typedef OffloadEntriesTargetRegionPerDevice OffloadEntriesTargetRegionTy;
OffloadEntriesTargetRegionTy OffloadEntriesTargetRegion;
/// Storage for device global variable entries kind. The storage is to be
/// indexed by mangled name.
typedef llvm::StringMap<OffloadEntryInfoDeviceGlobalVar>
OffloadEntriesDeviceGlobalVarTy;
OffloadEntriesDeviceGlobalVarTy OffloadEntriesDeviceGlobalVar;
};
OffloadEntriesInfoManagerTy OffloadEntriesInfoManager;
bool ShouldMarkAsGlobal = true;
/// List of the emitted declarations.
llvm::DenseSet<CanonicalDeclPtr<const Decl>> AlreadyEmittedTargetDecls;
/// List of the global variables with their addresses that should not be
/// emitted for the target.
llvm::StringMap<llvm::WeakTrackingVH> EmittedNonTargetVariables;
/// List of variables that can become declare target implicitly and, thus,
/// must be emitted.
llvm::SmallDenseSet<const VarDecl *> DeferredGlobalVariables;
using NontemporalDeclsSet = llvm::SmallDenseSet<CanonicalDeclPtr<const Decl>>;
/// Stack for list of declarations in current context marked as nontemporal.
/// The set is the union of all current stack elements.
llvm::SmallVector<NontemporalDeclsSet, 4> NontemporalDeclsStack;
using UntiedLocalVarsAddressesMap =
llvm::MapVector<CanonicalDeclPtr<const VarDecl>,
std::pair<Address, Address>>;
llvm::SmallVector<UntiedLocalVarsAddressesMap, 4> UntiedLocalVarsStack;
/// Stack for list of addresses of declarations in current context marked as
/// lastprivate conditional. The set is the union of all current stack
/// elements.
llvm::SmallVector<LastprivateConditionalData, 4> LastprivateConditionalStack;
/// Flag for keeping track of weather a requires unified_shared_memory
/// directive is present.
bool HasRequiresUnifiedSharedMemory = false;
/// Atomic ordering from the omp requires directive.
llvm::AtomicOrdering RequiresAtomicOrdering = llvm::AtomicOrdering::Monotonic;
/// Flag for keeping track of weather a target region has been emitted.
bool HasEmittedTargetRegion = false;
/// Flag for keeping track of weather a device routine has been emitted.
/// Device routines are specific to the
bool HasEmittedDeclareTargetRegion = false;
/// Loads all the offload entries information from the host IR
/// metadata.
void loadOffloadInfoMetadata();
/// Returns __tgt_offload_entry type.
QualType getTgtOffloadEntryQTy();
/// Start scanning from statement \a S and and emit all target regions
/// found along the way.
/// \param S Starting statement.
/// \param ParentName Name of the function declaration that is being scanned.
void scanForTargetRegionsFunctions(const Stmt *S, StringRef ParentName);
/// Build type kmp_routine_entry_t (if not built yet).
void emitKmpRoutineEntryT(QualType KmpInt32Ty);
/// Returns pointer to kmpc_micro type.
llvm::Type *getKmpc_MicroPointerTy();
/// Returns __kmpc_for_static_init_* runtime function for the specified
/// size \a IVSize and sign \a IVSigned. Will create a distribute call
/// __kmpc_distribute_static_init* if \a IsGPUDistribute is set.
llvm::FunctionCallee createForStaticInitFunction(unsigned IVSize,
bool IVSigned,
bool IsGPUDistribute);
/// Returns __kmpc_dispatch_init_* runtime function for the specified
/// size \a IVSize and sign \a IVSigned.
llvm::FunctionCallee createDispatchInitFunction(unsigned IVSize,
bool IVSigned);
/// Returns __kmpc_dispatch_next_* runtime function for the specified
/// size \a IVSize and sign \a IVSigned.
llvm::FunctionCallee createDispatchNextFunction(unsigned IVSize,
bool IVSigned);
/// Returns __kmpc_dispatch_fini_* runtime function for the specified
/// size \a IVSize and sign \a IVSigned.
llvm::FunctionCallee createDispatchFiniFunction(unsigned IVSize,
bool IVSigned);
/// If the specified mangled name is not in the module, create and
/// return threadprivate cache object. This object is a pointer's worth of
/// storage that's reserved for use by the OpenMP runtime.
/// \param VD Threadprivate variable.
/// \return Cache variable for the specified threadprivate.
llvm::Constant *getOrCreateThreadPrivateCache(const VarDecl *VD);
/// Gets (if variable with the given name already exist) or creates
/// internal global variable with the specified Name. The created variable has
/// linkage CommonLinkage by default and is initialized by null value.
/// \param Ty Type of the global variable. If it is exist already the type
/// must be the same.
/// \param Name Name of the variable.
llvm::Constant *getOrCreateInternalVariable(llvm::Type *Ty,
const llvm::Twine &Name,
unsigned AddressSpace = 0);
/// Set of threadprivate variables with the generated initializer.
llvm::StringSet<> ThreadPrivateWithDefinition;
/// Set of declare target variables with the generated initializer.
llvm::StringSet<> DeclareTargetWithDefinition;
/// Emits initialization code for the threadprivate variables.
/// \param VDAddr Address of the global variable \a VD.
/// \param Ctor Pointer to a global init function for \a VD.
/// \param CopyCtor Pointer to a global copy function for \a VD.
/// \param Dtor Pointer to a global destructor function for \a VD.
/// \param Loc Location of threadprivate declaration.
void emitThreadPrivateVarInit(CodeGenFunction &CGF, Address VDAddr,
llvm::Value *Ctor, llvm::Value *CopyCtor,
llvm::Value *Dtor, SourceLocation Loc);
/// Emit the array initialization or deletion portion for user-defined mapper
/// code generation.
void emitUDMapperArrayInitOrDel(CodeGenFunction &MapperCGF,
llvm::Value *Handle, llvm::Value *BasePtr,
llvm::Value *Ptr, llvm::Value *Size,
llvm::Value *MapType, llvm::Value *MapName,
CharUnits ElementSize,
llvm::BasicBlock *ExitBB, bool IsInit);
struct TaskResultTy {
llvm::Value *NewTask = nullptr;
llvm::Function *TaskEntry = nullptr;
llvm::Value *NewTaskNewTaskTTy = nullptr;
LValue TDBase;
const RecordDecl *KmpTaskTQTyRD = nullptr;
llvm::Value *TaskDupFn = nullptr;
};
/// Emit task region for the task directive. The task region is emitted in
/// several steps:
/// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32
/// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
/// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the
/// function:
/// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) {
/// TaskFunction(gtid, tt->part_id, tt->shareds);
/// return 0;
/// }
/// 2. Copy a list of shared variables to field shareds of the resulting
/// structure kmp_task_t returned by the previous call (if any).
/// 3. Copy a pointer to destructions function to field destructions of the
/// resulting structure kmp_task_t.
/// \param D Current task directive.
/// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32
/// /*part_id*/, captured_struct */*__context*/);
/// \param SharedsTy A type which contains references the shared variables.
/// \param Shareds Context with the list of shared variables from the \p
/// TaskFunction.
/// \param Data Additional data for task generation like tiednsee, final
/// state, list of privates etc.
TaskResultTy emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc,
const OMPExecutableDirective &D,
llvm::Function *TaskFunction, QualType SharedsTy,
Address Shareds, const OMPTaskDataTy &Data);
/// Emit code that pushes the trip count of loops associated with constructs
/// 'target teams distribute' and 'teams distribute parallel for'.
/// \param SizeEmitter Emits the int64 value for the number of iterations of
/// the associated loop.
void emitTargetNumIterationsCall(
CodeGenFunction &CGF, const OMPExecutableDirective &D,
llvm::Value *DeviceID,
llvm::function_ref<llvm::Value *(CodeGenFunction &CGF,
const OMPLoopDirective &D)>
SizeEmitter);
/// Emit update for lastprivate conditional data.
void emitLastprivateConditionalUpdate(CodeGenFunction &CGF, LValue IVLVal,
StringRef UniqueDeclName, LValue LVal,
SourceLocation Loc);
/// Returns the number of the elements and the address of the depobj
/// dependency array.
/// \return Number of elements in depobj array and the pointer to the array of
/// dependencies.
std::pair<llvm::Value *, LValue> getDepobjElements(CodeGenFunction &CGF,
LValue DepobjLVal,
SourceLocation Loc);
public:
explicit CGOpenMPRuntime(CodeGenModule &CGM)
: CGOpenMPRuntime(CGM, ".", ".") {}
virtual ~CGOpenMPRuntime() {}
virtual void clear();
/// Emits code for OpenMP 'if' clause using specified \a CodeGen
/// function. Here is the logic:
/// if (Cond) {
/// ThenGen();
/// } else {
/// ElseGen();
/// }
void emitIfClause(CodeGenFunction &CGF, const Expr *Cond,
const RegionCodeGenTy &ThenGen,
const RegionCodeGenTy &ElseGen);
/// Checks if the \p Body is the \a CompoundStmt and returns its child
/// statement iff there is only one that is not evaluatable at the compile
/// time.
static const Stmt *getSingleCompoundChild(ASTContext &Ctx, const Stmt *Body);
/// Get the platform-specific name separator.
std::string getName(ArrayRef<StringRef> Parts) const;
/// Emit code for the specified user defined reduction construct.
virtual void emitUserDefinedReduction(CodeGenFunction *CGF,
const OMPDeclareReductionDecl *D);
/// Get combiner/initializer for the specified user-defined reduction, if any.
virtual std::pair<llvm::Function *, llvm::Function *>
getUserDefinedReduction(const OMPDeclareReductionDecl *D);
/// Emit the function for the user defined mapper construct.
void emitUserDefinedMapper(const OMPDeclareMapperDecl *D,
CodeGenFunction *CGF = nullptr);
/// Get the function for the specified user-defined mapper. If it does not
/// exist, create one.
llvm::Function *
getOrCreateUserDefinedMapperFunc(const OMPDeclareMapperDecl *D);
/// Emits outlined function for the specified OpenMP parallel directive
/// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID,
/// kmp_int32 BoundID, struct context_vars*).
/// \param D OpenMP directive.
/// \param ThreadIDVar Variable for thread id in the current OpenMP region.
/// \param InnermostKind Kind of innermost directive (for simple directives it
/// is a directive itself, for combined - its innermost directive).
/// \param CodeGen Code generation sequence for the \a D directive.
virtual llvm::Function *emitParallelOutlinedFunction(
const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen);
/// Emits outlined function for the specified OpenMP teams directive
/// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID,
/// kmp_int32 BoundID, struct context_vars*).
/// \param D OpenMP directive.
/// \param ThreadIDVar Variable for thread id in the current OpenMP region.
/// \param InnermostKind Kind of innermost directive (for simple directives it
/// is a directive itself, for combined - its innermost directive).
/// \param CodeGen Code generation sequence for the \a D directive.
virtual llvm::Function *emitTeamsOutlinedFunction(
const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen);
/// Emits outlined function for the OpenMP task directive \a D. This
/// outlined function has type void(*)(kmp_int32 ThreadID, struct task_t*
/// TaskT).
/// \param D OpenMP directive.
/// \param ThreadIDVar Variable for thread id in the current OpenMP region.
/// \param PartIDVar Variable for partition id in the current OpenMP untied
/// task region.
/// \param TaskTVar Variable for task_t argument.
/// \param InnermostKind Kind of innermost directive (for simple directives it
/// is a directive itself, for combined - its innermost directive).
/// \param CodeGen Code generation sequence for the \a D directive.
/// \param Tied true if task is generated for tied task, false otherwise.
/// \param NumberOfParts Number of parts in untied task. Ignored for tied
/// tasks.
///
virtual llvm::Function *emitTaskOutlinedFunction(
const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
const VarDecl *PartIDVar, const VarDecl *TaskTVar,
OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen,
bool Tied, unsigned &NumberOfParts);
/// Cleans up references to the objects in finished function.
///
virtual void functionFinished(CodeGenFunction &CGF);
/// Emits code for parallel or serial call of the \a OutlinedFn with
/// variables captured in a record which address is stored in \a
/// CapturedStruct.
/// \param OutlinedFn Outlined function to be run in parallel threads. Type of
/// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*).
/// \param CapturedVars A pointer to the record with the references to
/// variables used in \a OutlinedFn function.
/// \param IfCond Condition in the associated 'if' clause, if it was
/// specified, nullptr otherwise.
///
virtual void emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
llvm::Function *OutlinedFn,
ArrayRef<llvm::Value *> CapturedVars,
const Expr *IfCond);
/// Emits a critical region.
/// \param CriticalName Name of the critical region.
/// \param CriticalOpGen Generator for the statement associated with the given
/// critical region.
/// \param Hint Value of the 'hint' clause (optional).
virtual void emitCriticalRegion(CodeGenFunction &CGF, StringRef CriticalName,
const RegionCodeGenTy &CriticalOpGen,
SourceLocation Loc,
const Expr *Hint = nullptr);
/// Emits a master region.
/// \param MasterOpGen Generator for the statement associated with the given
/// master region.
virtual void emitMasterRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &MasterOpGen,
SourceLocation Loc);
/// Emits a masked region.
/// \param MaskedOpGen Generator for the statement associated with the given
/// masked region.
virtual void emitMaskedRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &MaskedOpGen,
SourceLocation Loc,
const Expr *Filter = nullptr);
/// Emits code for a taskyield directive.
virtual void emitTaskyieldCall(CodeGenFunction &CGF, SourceLocation Loc);
/// Emit a taskgroup region.
/// \param TaskgroupOpGen Generator for the statement associated with the
/// given taskgroup region.
virtual void emitTaskgroupRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &TaskgroupOpGen,
SourceLocation Loc);
/// Emits a single region.
/// \param SingleOpGen Generator for the statement associated with the given
/// single region.
virtual void emitSingleRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &SingleOpGen,
SourceLocation Loc,
ArrayRef<const Expr *> CopyprivateVars,
ArrayRef<const Expr *> DestExprs,
ArrayRef<const Expr *> SrcExprs,
ArrayRef<const Expr *> AssignmentOps);
/// Emit an ordered region.
/// \param OrderedOpGen Generator for the statement associated with the given
/// ordered region.
virtual void emitOrderedRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &OrderedOpGen,
SourceLocation Loc, bool IsThreads);
/// Emit an implicit/explicit barrier for OpenMP threads.
/// \param Kind Directive for which this implicit barrier call must be
/// generated. Must be OMPD_barrier for explicit barrier generation.
/// \param EmitChecks true if need to emit checks for cancellation barriers.
/// \param ForceSimpleCall true simple barrier call must be emitted, false if
/// runtime class decides which one to emit (simple or with cancellation
/// checks).
///
virtual void emitBarrierCall(CodeGenFunction &CGF, SourceLocation Loc,
OpenMPDirectiveKind Kind,
bool EmitChecks = true,
bool ForceSimpleCall = false);
/// Check if the specified \a ScheduleKind is static non-chunked.
/// This kind of worksharing directive is emitted without outer loop.
/// \param ScheduleKind Schedule kind specified in the 'schedule' clause.
/// \param Chunked True if chunk is specified in the clause.
///
virtual bool isStaticNonchunked(OpenMPScheduleClauseKind ScheduleKind,
bool Chunked) const;
/// Check if the specified \a ScheduleKind is static non-chunked.
/// This kind of distribute directive is emitted without outer loop.
/// \param ScheduleKind Schedule kind specified in the 'dist_schedule' clause.
/// \param Chunked True if chunk is specified in the clause.
///
virtual bool isStaticNonchunked(OpenMPDistScheduleClauseKind ScheduleKind,
bool Chunked) const;
/// Check if the specified \a ScheduleKind is static chunked.
/// \param ScheduleKind Schedule kind specified in the 'schedule' clause.
/// \param Chunked True if chunk is specified in the clause.
///
virtual bool isStaticChunked(OpenMPScheduleClauseKind ScheduleKind,
bool Chunked) const;
/// Check if the specified \a ScheduleKind is static non-chunked.
/// \param ScheduleKind Schedule kind specified in the 'dist_schedule' clause.
/// \param Chunked True if chunk is specified in the clause.
///
virtual bool isStaticChunked(OpenMPDistScheduleClauseKind ScheduleKind,
bool Chunked) const;
/// Check if the specified \a ScheduleKind is dynamic.
/// This kind of worksharing directive is emitted without outer loop.
/// \param ScheduleKind Schedule Kind specified in the 'schedule' clause.
///
virtual bool isDynamic(OpenMPScheduleClauseKind ScheduleKind) const;
/// struct with the values to be passed to the dispatch runtime function
struct DispatchRTInput {
/// Loop lower bound
llvm::Value *LB = nullptr;
/// Loop upper bound
llvm::Value *UB = nullptr;
/// Chunk size specified using 'schedule' clause (nullptr if chunk
/// was not specified)
llvm::Value *Chunk = nullptr;
DispatchRTInput() = default;
DispatchRTInput(llvm::Value *LB, llvm::Value *UB, llvm::Value *Chunk)
: LB(LB), UB(UB), Chunk(Chunk) {}
};
/// Call the appropriate runtime routine to initialize it before start
/// of loop.
/// This is used for non static scheduled types and when the ordered
/// clause is present on the loop construct.
/// Depending on the loop schedule, it is necessary to call some runtime
/// routine before start of the OpenMP loop to get the loop upper / lower
/// bounds \a LB and \a UB and stride \a ST.
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param ScheduleKind Schedule kind, specified by the 'schedule' clause.
/// \param IVSize Size of the iteration variable in bits.
/// \param IVSigned Sign of the iteration variable.
/// \param Ordered true if loop is ordered, false otherwise.
/// \param DispatchValues struct containing llvm values for lower bound, upper
/// bound, and chunk expression.
/// For the default (nullptr) value, the chunk 1 will be used.
///
virtual void emitForDispatchInit(CodeGenFunction &CGF, SourceLocation Loc,
const OpenMPScheduleTy &ScheduleKind,
unsigned IVSize, bool IVSigned, bool Ordered,
const DispatchRTInput &DispatchValues);
/// Struct with the values to be passed to the static runtime function
struct StaticRTInput {
/// Size of the iteration variable in bits.
unsigned IVSize = 0;
/// Sign of the iteration variable.
bool IVSigned = false;
/// true if loop is ordered, false otherwise.
bool Ordered = false;
/// Address of the output variable in which the flag of the last iteration
/// is returned.
Address IL = Address::invalid();
/// Address of the output variable in which the lower iteration number is
/// returned.
Address LB = Address::invalid();
/// Address of the output variable in which the upper iteration number is
/// returned.
Address UB = Address::invalid();
/// Address of the output variable in which the stride value is returned
/// necessary to generated the static_chunked scheduled loop.
Address ST = Address::invalid();
/// Value of the chunk for the static_chunked scheduled loop. For the
/// default (nullptr) value, the chunk 1 will be used.
llvm::Value *Chunk = nullptr;
StaticRTInput(unsigned IVSize, bool IVSigned, bool Ordered, Address IL,
Address LB, Address UB, Address ST,
llvm::Value *Chunk = nullptr)
: IVSize(IVSize), IVSigned(IVSigned), Ordered(Ordered), IL(IL), LB(LB),
UB(UB), ST(ST), Chunk(Chunk) {}
};
/// Call the appropriate runtime routine to initialize it before start
/// of loop.
///
/// This is used only in case of static schedule, when the user did not
/// specify a ordered clause on the loop construct.
/// Depending on the loop schedule, it is necessary to call some runtime
/// routine before start of the OpenMP loop to get the loop upper / lower
/// bounds LB and UB and stride ST.
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param DKind Kind of the directive.
/// \param ScheduleKind Schedule kind, specified by the 'schedule' clause.
/// \param Values Input arguments for the construct.
///
virtual void emitForStaticInit(CodeGenFunction &CGF, SourceLocation Loc,
OpenMPDirectiveKind DKind,
const OpenMPScheduleTy &ScheduleKind,
const StaticRTInput &Values);
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param SchedKind Schedule kind, specified by the 'dist_schedule' clause.
/// \param Values Input arguments for the construct.
///
virtual void emitDistributeStaticInit(CodeGenFunction &CGF,
SourceLocation Loc,
OpenMPDistScheduleClauseKind SchedKind,
const StaticRTInput &Values);
/// Call the appropriate runtime routine to notify that we finished
/// iteration of the ordered loop with the dynamic scheduling.
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param IVSize Size of the iteration variable in bits.
/// \param IVSigned Sign of the iteration variable.
///
virtual void emitForOrderedIterationEnd(CodeGenFunction &CGF,
SourceLocation Loc, unsigned IVSize,
bool IVSigned);
/// Call the appropriate runtime routine to notify that we finished
/// all the work with current loop.
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param DKind Kind of the directive for which the static finish is emitted.
///
virtual void emitForStaticFinish(CodeGenFunction &CGF, SourceLocation Loc,
OpenMPDirectiveKind DKind);
/// Call __kmpc_dispatch_next(
/// ident_t *loc, kmp_int32 tid, kmp_int32 *p_lastiter,
/// kmp_int[32|64] *p_lower, kmp_int[32|64] *p_upper,
/// kmp_int[32|64] *p_stride);
/// \param IVSize Size of the iteration variable in bits.
/// \param IVSigned Sign of the iteration variable.
/// \param IL Address of the output variable in which the flag of the
/// last iteration is returned.
/// \param LB Address of the output variable in which the lower iteration
/// number is returned.
/// \param UB Address of the output variable in which the upper iteration
/// number is returned.
/// \param ST Address of the output variable in which the stride value is
/// returned.
virtual llvm::Value *emitForNext(CodeGenFunction &CGF, SourceLocation Loc,
unsigned IVSize, bool IVSigned,
Address IL, Address LB,
Address UB, Address ST);
/// Emits call to void __kmpc_push_num_threads(ident_t *loc, kmp_int32
/// global_tid, kmp_int32 num_threads) to generate code for 'num_threads'
/// clause.
/// \param NumThreads An integer value of threads.
virtual void emitNumThreadsClause(CodeGenFunction &CGF,
llvm::Value *NumThreads,
SourceLocation Loc);
/// Emit call to void __kmpc_push_proc_bind(ident_t *loc, kmp_int32
/// global_tid, int proc_bind) to generate code for 'proc_bind' clause.
virtual void emitProcBindClause(CodeGenFunction &CGF,
llvm::omp::ProcBindKind ProcBind,
SourceLocation Loc);
/// Returns address of the threadprivate variable for the current
/// thread.
/// \param VD Threadprivate variable.
/// \param VDAddr Address of the global variable \a VD.
/// \param Loc Location of the reference to threadprivate var.
/// \return Address of the threadprivate variable for the current thread.
virtual Address getAddrOfThreadPrivate(CodeGenFunction &CGF,
const VarDecl *VD,
Address VDAddr,
SourceLocation Loc);
/// Returns the address of the variable marked as declare target with link
/// clause OR as declare target with to clause and unified memory.
virtual Address getAddrOfDeclareTargetVar(const VarDecl *VD);
/// Emit a code for initialization of threadprivate variable. It emits
/// a call to runtime library which adds initial value to the newly created
/// threadprivate variable (if it is not constant) and registers destructor
/// for the variable (if any).
/// \param VD Threadprivate variable.
/// \param VDAddr Address of the global variable \a VD.
/// \param Loc Location of threadprivate declaration.
/// \param PerformInit true if initialization expression is not constant.
virtual llvm::Function *
emitThreadPrivateVarDefinition(const VarDecl *VD, Address VDAddr,
SourceLocation Loc, bool PerformInit,
CodeGenFunction *CGF = nullptr);
/// Emit a code for initialization of declare target variable.
/// \param VD Declare target variable.
/// \param Addr Address of the global variable \a VD.
/// \param PerformInit true if initialization expression is not constant.
virtual bool emitDeclareTargetVarDefinition(const VarDecl *VD,
llvm::GlobalVariable *Addr,
bool PerformInit);
/// Creates artificial threadprivate variable with name \p Name and type \p
/// VarType.
/// \param VarType Type of the artificial threadprivate variable.
/// \param Name Name of the artificial threadprivate variable.
virtual Address getAddrOfArtificialThreadPrivate(CodeGenFunction &CGF,
QualType VarType,
StringRef Name);
/// Emit flush of the variables specified in 'omp flush' directive.
/// \param Vars List of variables to flush.
virtual void emitFlush(CodeGenFunction &CGF, ArrayRef<const Expr *> Vars,
SourceLocation Loc, llvm::AtomicOrdering AO);
/// Emit task region for the task directive. The task region is
/// emitted in several steps:
/// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32
/// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
/// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the
/// function:
/// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) {
/// TaskFunction(gtid, tt->part_id, tt->shareds);
/// return 0;
/// }
/// 2. Copy a list of shared variables to field shareds of the resulting
/// structure kmp_task_t returned by the previous call (if any).
/// 3. Copy a pointer to destructions function to field destructions of the
/// resulting structure kmp_task_t.
/// 4. Emit a call to kmp_int32 __kmpc_omp_task(ident_t *, kmp_int32 gtid,
/// kmp_task_t *new_task), where new_task is a resulting structure from
/// previous items.
/// \param D Current task directive.
/// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32
/// /*part_id*/, captured_struct */*__context*/);
/// \param SharedsTy A type which contains references the shared variables.
/// \param Shareds Context with the list of shared variables from the \p
/// TaskFunction.
/// \param IfCond Not a nullptr if 'if' clause was specified, nullptr
/// otherwise.
/// \param Data Additional data for task generation like tiednsee, final
/// state, list of privates etc.
virtual void emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc,
const OMPExecutableDirective &D,
llvm::Function *TaskFunction, QualType SharedsTy,
Address Shareds, const Expr *IfCond,
const OMPTaskDataTy &Data);
/// Emit task region for the taskloop directive. The taskloop region is
/// emitted in several steps:
/// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32
/// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
/// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the
/// function:
/// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) {
/// TaskFunction(gtid, tt->part_id, tt->shareds);
/// return 0;
/// }
/// 2. Copy a list of shared variables to field shareds of the resulting
/// structure kmp_task_t returned by the previous call (if any).
/// 3. Copy a pointer to destructions function to field destructions of the
/// resulting structure kmp_task_t.
/// 4. Emit a call to void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t
/// *task, int if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int
/// nogroup, int sched, kmp_uint64 grainsize, void *task_dup ), where new_task
/// is a resulting structure from
/// previous items.
/// \param D Current task directive.
/// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32
/// /*part_id*/, captured_struct */*__context*/);
/// \param SharedsTy A type which contains references the shared variables.
/// \param Shareds Context with the list of shared variables from the \p
/// TaskFunction.
/// \param IfCond Not a nullptr if 'if' clause was specified, nullptr
/// otherwise.
/// \param Data Additional data for task generation like tiednsee, final
/// state, list of privates etc.
virtual void emitTaskLoopCall(CodeGenFunction &CGF, SourceLocation Loc,
const OMPLoopDirective &D,
llvm::Function *TaskFunction,
QualType SharedsTy, Address Shareds,
const Expr *IfCond, const OMPTaskDataTy &Data);
/// Emit code for the directive that does not require outlining.
///
/// \param InnermostKind Kind of innermost directive (for simple directives it
/// is a directive itself, for combined - its innermost directive).
/// \param CodeGen Code generation sequence for the \a D directive.
/// \param HasCancel true if region has inner cancel directive, false
/// otherwise.
virtual void emitInlinedDirective(CodeGenFunction &CGF,
OpenMPDirectiveKind InnermostKind,
const RegionCodeGenTy &CodeGen,
bool HasCancel = false);
/// Emits reduction function.
/// \param ArgsType Array type containing pointers to reduction variables.
/// \param Privates List of private copies for original reduction arguments.
/// \param LHSExprs List of LHS in \a ReductionOps reduction operations.
/// \param RHSExprs List of RHS in \a ReductionOps reduction operations.
/// \param ReductionOps List of reduction operations in form 'LHS binop RHS'
/// or 'operator binop(LHS, RHS)'.
llvm::Function *emitReductionFunction(SourceLocation Loc,
llvm::Type *ArgsType,
ArrayRef<const Expr *> Privates,
ArrayRef<const Expr *> LHSExprs,
ArrayRef<const Expr *> RHSExprs,
ArrayRef<const Expr *> ReductionOps);
/// Emits single reduction combiner
void emitSingleReductionCombiner(CodeGenFunction &CGF,
const Expr *ReductionOp,
const Expr *PrivateRef,
const DeclRefExpr *LHS,
const DeclRefExpr *RHS);
struct ReductionOptionsTy {
bool WithNowait;
bool SimpleReduction;
OpenMPDirectiveKind ReductionKind;
};
/// Emit a code for reduction clause. Next code should be emitted for
/// reduction:
/// \code
///
/// static kmp_critical_name lock = { 0 };
///
/// void reduce_func(void *lhs[<n>], void *rhs[<n>]) {
/// ...
/// *(Type<i>*)lhs[i] = RedOp<i>(*(Type<i>*)lhs[i], *(Type<i>*)rhs[i]);
/// ...
/// }
///
/// ...
/// void *RedList[<n>] = {&<RHSExprs>[0], ..., &<RHSExprs>[<n>-1]};
/// switch (__kmpc_reduce{_nowait}(<loc>, <gtid>, <n>, sizeof(RedList),
/// RedList, reduce_func, &<lock>)) {
/// case 1:
/// ...
/// <LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]);
/// ...
/// __kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>);
/// break;
/// case 2:
/// ...
/// Atomic(<LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]));
/// ...
/// break;
/// default:;
/// }
/// \endcode
///
/// \param Privates List of private copies for original reduction arguments.
/// \param LHSExprs List of LHS in \a ReductionOps reduction operations.
/// \param RHSExprs List of RHS in \a ReductionOps reduction operations.
/// \param ReductionOps List of reduction operations in form 'LHS binop RHS'
/// or 'operator binop(LHS, RHS)'.
/// \param Options List of options for reduction codegen:
/// WithNowait true if parent directive has also nowait clause, false
/// otherwise.
/// SimpleReduction Emit reduction operation only. Used for omp simd
/// directive on the host.
/// ReductionKind The kind of reduction to perform.
virtual void emitReduction(CodeGenFunction &CGF, SourceLocation Loc,
ArrayRef<const Expr *> Privates,
ArrayRef<const Expr *> LHSExprs,
ArrayRef<const Expr *> RHSExprs,
ArrayRef<const Expr *> ReductionOps,
ReductionOptionsTy Options);
/// Emit a code for initialization of task reduction clause. Next code
/// should be emitted for reduction:
/// \code
///
/// _taskred_item_t red_data[n];
/// ...
/// red_data[i].shar = &shareds[i];
/// red_data[i].orig = &origs[i];
/// red_data[i].size = sizeof(origs[i]);
/// red_data[i].f_init = (void*)RedInit<i>;
/// red_data[i].f_fini = (void*)RedDest<i>;
/// red_data[i].f_comb = (void*)RedOp<i>;
/// red_data[i].flags = <Flag_i>;
/// ...
/// void* tg1 = __kmpc_taskred_init(gtid, n, red_data);
/// \endcode
/// For reduction clause with task modifier it emits the next call:
/// \code
///
/// _taskred_item_t red_data[n];
/// ...
/// red_data[i].shar = &shareds[i];
/// red_data[i].orig = &origs[i];
/// red_data[i].size = sizeof(origs[i]);
/// red_data[i].f_init = (void*)RedInit<i>;
/// red_data[i].f_fini = (void*)RedDest<i>;
/// red_data[i].f_comb = (void*)RedOp<i>;
/// red_data[i].flags = <Flag_i>;
/// ...
/// void* tg1 = __kmpc_taskred_modifier_init(loc, gtid, is_worksharing, n,
/// red_data);
/// \endcode
/// \param LHSExprs List of LHS in \a Data.ReductionOps reduction operations.
/// \param RHSExprs List of RHS in \a Data.ReductionOps reduction operations.
/// \param Data Additional data for task generation like tiedness, final
/// state, list of privates, reductions etc.
virtual llvm::Value *emitTaskReductionInit(CodeGenFunction &CGF,
SourceLocation Loc,
ArrayRef<const Expr *> LHSExprs,
ArrayRef<const Expr *> RHSExprs,
const OMPTaskDataTy &Data);
/// Emits the following code for reduction clause with task modifier:
/// \code
/// __kmpc_task_reduction_modifier_fini(loc, gtid, is_worksharing);
/// \endcode
virtual void emitTaskReductionFini(CodeGenFunction &CGF, SourceLocation Loc,
bool IsWorksharingReduction);
/// Required to resolve existing problems in the runtime. Emits threadprivate
/// variables to store the size of the VLAs/array sections for
/// initializer/combiner/finalizer functions.
/// \param RCG Allows to reuse an existing data for the reductions.
/// \param N Reduction item for which fixups must be emitted.
virtual void emitTaskReductionFixups(CodeGenFunction &CGF, SourceLocation Loc,
ReductionCodeGen &RCG, unsigned N);
/// Get the address of `void *` type of the privatue copy of the reduction
/// item specified by the \p SharedLVal.
/// \param ReductionsPtr Pointer to the reduction data returned by the
/// emitTaskReductionInit function.
/// \param SharedLVal Address of the original reduction item.
virtual Address getTaskReductionItem(CodeGenFunction &CGF, SourceLocation Loc,
llvm::Value *ReductionsPtr,
LValue SharedLVal);
/// Emit code for 'taskwait' directive.
virtual void emitTaskwaitCall(CodeGenFunction &CGF, SourceLocation Loc);
/// Emit code for 'cancellation point' construct.
/// \param CancelRegion Region kind for which the cancellation point must be
/// emitted.
///
virtual void emitCancellationPointCall(CodeGenFunction &CGF,
SourceLocation Loc,
OpenMPDirectiveKind CancelRegion);
/// Emit code for 'cancel' construct.
/// \param IfCond Condition in the associated 'if' clause, if it was
/// specified, nullptr otherwise.
/// \param CancelRegion Region kind for which the cancel must be emitted.
///
virtual void emitCancelCall(CodeGenFunction &CGF, SourceLocation Loc,
const Expr *IfCond,
OpenMPDirectiveKind CancelRegion);
/// Emit outilined function for 'target' directive.
/// \param D Directive to emit.
/// \param ParentName Name of the function that encloses the target region.
/// \param OutlinedFn Outlined function value to be defined by this call.
/// \param OutlinedFnID Outlined function ID value to be defined by this call.
/// \param IsOffloadEntry True if the outlined function is an offload entry.
/// \param CodeGen Code generation sequence for the \a D directive.
/// An outlined function may not be an entry if, e.g. the if clause always
/// evaluates to false.
virtual void emitTargetOutlinedFunction(const OMPExecutableDirective &D,
StringRef ParentName,
llvm::Function *&OutlinedFn,
llvm::Constant *&OutlinedFnID,
bool IsOffloadEntry,
const RegionCodeGenTy &CodeGen);
/// Emit the target offloading code associated with \a D. The emitted
/// code attempts offloading the execution to the device, an the event of
/// a failure it executes the host version outlined in \a OutlinedFn.
/// \param D Directive to emit.
/// \param OutlinedFn Host version of the code to be offloaded.
/// \param OutlinedFnID ID of host version of the code to be offloaded.
/// \param IfCond Expression evaluated in if clause associated with the target
/// directive, or null if no if clause is used.
/// \param Device Expression evaluated in device clause associated with the
/// target directive, or null if no device clause is used and device modifier.
/// \param SizeEmitter Callback to emit number of iterations for loop-based
/// directives.
virtual void emitTargetCall(
CodeGenFunction &CGF, const OMPExecutableDirective &D,
llvm::Function *OutlinedFn, llvm::Value *OutlinedFnID, const Expr *IfCond,
llvm::PointerIntPair<const Expr *, 2, OpenMPDeviceClauseModifier> Device,
llvm::function_ref<llvm::Value *(CodeGenFunction &CGF,
const OMPLoopDirective &D)>
SizeEmitter);
/// Emit the target regions enclosed in \a GD function definition or
/// the function itself in case it is a valid device function. Returns true if
/// \a GD was dealt with successfully.
/// \param GD Function to scan.
virtual bool emitTargetFunctions(GlobalDecl GD);
/// Emit the global variable if it is a valid device global variable.
/// Returns true if \a GD was dealt with successfully.
/// \param GD Variable declaration to emit.
virtual bool emitTargetGlobalVariable(GlobalDecl GD);
/// Checks if the provided global decl \a GD is a declare target variable and
/// registers it when emitting code for the host.
virtual void registerTargetGlobalVariable(const VarDecl *VD,
llvm::Constant *Addr);
/// Emit the global \a GD if it is meaningful for the target. Returns
/// if it was emitted successfully.
/// \param GD Global to scan.
virtual bool emitTargetGlobal(GlobalDecl GD);
/// Creates and returns a registration function for when at least one
/// requires directives was used in the current module.
llvm::Function *emitRequiresDirectiveRegFun();
/// Creates all the offload entries in the current compilation unit
/// along with the associated metadata.
void createOffloadEntriesAndInfoMetadata();
/// Emits code for teams call of the \a OutlinedFn with
/// variables captured in a record which address is stored in \a
/// CapturedStruct.
/// \param OutlinedFn Outlined function to be run by team masters. Type of
/// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*).
/// \param CapturedVars A pointer to the record with the references to
/// variables used in \a OutlinedFn function.
///
virtual void emitTeamsCall(CodeGenFunction &CGF,
const OMPExecutableDirective &D,
SourceLocation Loc, llvm::Function *OutlinedFn,
ArrayRef<llvm::Value *> CapturedVars);
/// Emits call to void __kmpc_push_num_teams(ident_t *loc, kmp_int32
/// global_tid, kmp_int32 num_teams, kmp_int32 thread_limit) to generate code
/// for num_teams clause.
/// \param NumTeams An integer expression of teams.
/// \param ThreadLimit An integer expression of threads.
virtual void emitNumTeamsClause(CodeGenFunction &CGF, const Expr *NumTeams,
const Expr *ThreadLimit, SourceLocation Loc);
/// Struct that keeps all the relevant information that should be kept
/// throughout a 'target data' region.
class TargetDataInfo {
/// Set to true if device pointer information have to be obtained.
bool RequiresDevicePointerInfo = false;
/// Set to true if Clang emits separate runtime calls for the beginning and
/// end of the region. These calls might have separate map type arrays.
bool SeparateBeginEndCalls = false;
public:
/// The array of base pointer passed to the runtime library.
llvm::Value *BasePointersArray = nullptr;
/// The array of section pointers passed to the runtime library.
llvm::Value *PointersArray = nullptr;
/// The array of sizes passed to the runtime library.
llvm::Value *SizesArray = nullptr;
/// The array of map types passed to the runtime library for the beginning
/// of the region or for the entire region if there are no separate map
/// types for the region end.
llvm::Value *MapTypesArray = nullptr;
/// The array of map types passed to the runtime library for the end of the
/// region, or nullptr if there are no separate map types for the region
/// end.
llvm::Value *MapTypesArrayEnd = nullptr;
/// The array of user-defined mappers passed to the runtime library.
llvm::Value *MappersArray = nullptr;
/// The array of original declaration names of mapped pointers sent to the
/// runtime library for debugging
llvm::Value *MapNamesArray = nullptr;
/// Indicate whether any user-defined mapper exists.
bool HasMapper = false;
/// The total number of pointers passed to the runtime library.
unsigned NumberOfPtrs = 0u;
/// Map between the a declaration of a capture and the corresponding base
/// pointer address where the runtime returns the device pointers.
llvm::DenseMap<const ValueDecl *, Address> CaptureDeviceAddrMap;
explicit TargetDataInfo() {}
explicit TargetDataInfo(bool RequiresDevicePointerInfo,
bool SeparateBeginEndCalls)
: RequiresDevicePointerInfo(RequiresDevicePointerInfo),
SeparateBeginEndCalls(SeparateBeginEndCalls) {}
/// Clear information about the data arrays.
void clearArrayInfo() {
BasePointersArray = nullptr;
PointersArray = nullptr;
SizesArray = nullptr;
MapTypesArray = nullptr;
MapTypesArrayEnd = nullptr;
MapNamesArray = nullptr;
MappersArray = nullptr;
HasMapper = false;
NumberOfPtrs = 0u;
}
/// Return true if the current target data information has valid arrays.
bool isValid() {
return BasePointersArray && PointersArray && SizesArray &&
MapTypesArray && (!HasMapper || MappersArray) && NumberOfPtrs;
}
bool requiresDevicePointerInfo() { return RequiresDevicePointerInfo; }
bool separateBeginEndCalls() { return SeparateBeginEndCalls; }
};
/// Emit the target data mapping code associated with \a D.
/// \param D Directive to emit.
/// \param IfCond Expression evaluated in if clause associated with the
/// target directive, or null if no device clause is used.
/// \param Device Expression evaluated in device clause associated with the
/// target directive, or null if no device clause is used.
/// \param Info A record used to store information that needs to be preserved
/// until the region is closed.
virtual void emitTargetDataCalls(CodeGenFunction &CGF,
const OMPExecutableDirective &D,
const Expr *IfCond, const Expr *Device,
const RegionCodeGenTy &CodeGen,
TargetDataInfo &Info);
/// Emit the data mapping/movement code associated with the directive
/// \a D that should be of the form 'target [{enter|exit} data | update]'.
/// \param D Directive to emit.
/// \param IfCond Expression evaluated in if clause associated with the target
/// directive, or null if no if clause is used.
/// \param Device Expression evaluated in device clause associated with the
/// target directive, or null if no device clause is used.
virtual void emitTargetDataStandAloneCall(CodeGenFunction &CGF,
const OMPExecutableDirective &D,
const Expr *IfCond,
const Expr *Device);
/// Marks function \a Fn with properly mangled versions of vector functions.
/// \param FD Function marked as 'declare simd'.
/// \param Fn LLVM function that must be marked with 'declare simd'
/// attributes.
virtual void emitDeclareSimdFunction(const FunctionDecl *FD,
llvm::Function *Fn);
/// Emit initialization for doacross loop nesting support.
/// \param D Loop-based construct used in doacross nesting construct.
virtual void emitDoacrossInit(CodeGenFunction &CGF, const OMPLoopDirective &D,
ArrayRef<Expr *> NumIterations);
/// Emit code for doacross ordered directive with 'depend' clause.
/// \param C 'depend' clause with 'sink|source' dependency kind.
virtual void emitDoacrossOrdered(CodeGenFunction &CGF,
const OMPDependClause *C);
/// Translates the native parameter of outlined function if this is required
/// for target.
/// \param FD Field decl from captured record for the parameter.
/// \param NativeParam Parameter itself.
virtual const VarDecl *translateParameter(const FieldDecl *FD,
const VarDecl *NativeParam) const {
return NativeParam;
}
/// Gets the address of the native argument basing on the address of the
/// target-specific parameter.
/// \param NativeParam Parameter itself.
/// \param TargetParam Corresponding target-specific parameter.
virtual Address getParameterAddress(CodeGenFunction &CGF,
const VarDecl *NativeParam,
const VarDecl *TargetParam) const;
/// Choose default schedule type and chunk value for the
/// dist_schedule clause.
virtual void getDefaultDistScheduleAndChunk(CodeGenFunction &CGF,
const OMPLoopDirective &S, OpenMPDistScheduleClauseKind &ScheduleKind,
llvm::Value *&Chunk) const {}
/// Choose default schedule type and chunk value for the
/// schedule clause.
virtual void getDefaultScheduleAndChunk(CodeGenFunction &CGF,
const OMPLoopDirective &S, OpenMPScheduleClauseKind &ScheduleKind,
const Expr *&ChunkExpr) const;
/// Emits call of the outlined function with the provided arguments,
/// translating these arguments to correct target-specific arguments.
virtual void
emitOutlinedFunctionCall(CodeGenFunction &CGF, SourceLocation Loc,
llvm::FunctionCallee OutlinedFn,
ArrayRef<llvm::Value *> Args = llvm::None) const;
/// Emits OpenMP-specific function prolog.
/// Required for device constructs.
virtual void emitFunctionProlog(CodeGenFunction &CGF, const Decl *D);
/// Gets the OpenMP-specific address of the local variable.
virtual Address getAddressOfLocalVariable(CodeGenFunction &CGF,
const VarDecl *VD);
/// Marks the declaration as already emitted for the device code and returns
/// true, if it was marked already, and false, otherwise.
bool markAsGlobalTarget(GlobalDecl GD);
/// Emit deferred declare target variables marked for deferred emission.
void emitDeferredTargetDecls() const;
/// Adjust some parameters for the target-based directives, like addresses of
/// the variables captured by reference in lambdas.
virtual void
adjustTargetSpecificDataForLambdas(CodeGenFunction &CGF,
const OMPExecutableDirective &D) const;
/// Perform check on requires decl to ensure that target architecture
/// supports unified addressing
virtual void processRequiresDirective(const OMPRequiresDecl *D);
/// Gets default memory ordering as specified in requires directive.
llvm::AtomicOrdering getDefaultMemoryOrdering() const;
/// Checks if the variable has associated OMPAllocateDeclAttr attribute with
/// the predefined allocator and translates it into the corresponding address
/// space.
virtual bool hasAllocateAttributeForGlobalVar(const VarDecl *VD, LangAS &AS);
/// Return whether the unified_shared_memory has been specified.
bool hasRequiresUnifiedSharedMemory() const;
/// Checks if the \p VD variable is marked as nontemporal declaration in
/// current context.
bool isNontemporalDecl(const ValueDecl *VD) const;
/// Create specialized alloca to handle lastprivate conditionals.
Address emitLastprivateConditionalInit(CodeGenFunction &CGF,
const VarDecl *VD);
/// Checks if the provided \p LVal is lastprivate conditional and emits the
/// code to update the value of the original variable.
/// \code
/// lastprivate(conditional: a)
/// ...
/// <type> a;
/// lp_a = ...;
/// #pragma omp critical(a)
/// if (last_iv_a <= iv) {
/// last_iv_a = iv;
/// global_a = lp_a;
/// }
/// \endcode
virtual void checkAndEmitLastprivateConditional(CodeGenFunction &CGF,
const Expr *LHS);
/// Checks if the lastprivate conditional was updated in inner region and
/// writes the value.
/// \code
/// lastprivate(conditional: a)
/// ...
/// <type> a;bool Fired = false;
/// #pragma omp ... shared(a)
/// {
/// lp_a = ...;
/// Fired = true;
/// }
/// if (Fired) {
/// #pragma omp critical(a)
/// if (last_iv_a <= iv) {
/// last_iv_a = iv;
/// global_a = lp_a;
/// }
/// Fired = false;
/// }
/// \endcode
virtual void checkAndEmitSharedLastprivateConditional(
CodeGenFunction &CGF, const OMPExecutableDirective &D,
const llvm::DenseSet<CanonicalDeclPtr<const VarDecl>> &IgnoredDecls);
/// Gets the address of the global copy used for lastprivate conditional
/// update, if any.
/// \param PrivLVal LValue for the private copy.
/// \param VD Original lastprivate declaration.
virtual void emitLastprivateConditionalFinalUpdate(CodeGenFunction &CGF,
LValue PrivLVal,
const VarDecl *VD,
SourceLocation Loc);
/// Emits list of dependecies based on the provided data (array of
/// dependence/expression pairs).
/// \returns Pointer to the first element of the array casted to VoidPtr type.
std::pair<llvm::Value *, Address>
emitDependClause(CodeGenFunction &CGF,
ArrayRef<OMPTaskDataTy::DependData> Dependencies,
SourceLocation Loc);
/// Emits list of dependecies based on the provided data (array of
/// dependence/expression pairs) for depobj construct. In this case, the
/// variable is allocated in dynamically. \returns Pointer to the first
/// element of the array casted to VoidPtr type.
Address emitDepobjDependClause(CodeGenFunction &CGF,
const OMPTaskDataTy::DependData &Dependencies,
SourceLocation Loc);
/// Emits the code to destroy the dependency object provided in depobj
/// directive.
void emitDestroyClause(CodeGenFunction &CGF, LValue DepobjLVal,
SourceLocation Loc);
/// Updates the dependency kind in the specified depobj object.
/// \param DepobjLVal LValue for the main depobj object.
/// \param NewDepKind New dependency kind.
void emitUpdateClause(CodeGenFunction &CGF, LValue DepobjLVal,
OpenMPDependClauseKind NewDepKind, SourceLocation Loc);
/// Initializes user defined allocators specified in the uses_allocators
/// clauses.
void emitUsesAllocatorsInit(CodeGenFunction &CGF, const Expr *Allocator,
const Expr *AllocatorTraits);
/// Destroys user defined allocators specified in the uses_allocators clause.
void emitUsesAllocatorsFini(CodeGenFunction &CGF, const Expr *Allocator);
/// Returns true if the variable is a local variable in untied task.
bool isLocalVarInUntiedTask(CodeGenFunction &CGF, const VarDecl *VD) const;
};
/// Class supports emissionof SIMD-only code.
class CGOpenMPSIMDRuntime final : public CGOpenMPRuntime {
public:
explicit CGOpenMPSIMDRuntime(CodeGenModule &CGM) : CGOpenMPRuntime(CGM) {}
~CGOpenMPSIMDRuntime() override {}
/// Emits outlined function for the specified OpenMP parallel directive
/// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID,
/// kmp_int32 BoundID, struct context_vars*).
/// \param D OpenMP directive.
/// \param ThreadIDVar Variable for thread id in the current OpenMP region.
/// \param InnermostKind Kind of innermost directive (for simple directives it
/// is a directive itself, for combined - its innermost directive).
/// \param CodeGen Code generation sequence for the \a D directive.
llvm::Function *
emitParallelOutlinedFunction(const OMPExecutableDirective &D,
const VarDecl *ThreadIDVar,
OpenMPDirectiveKind InnermostKind,
const RegionCodeGenTy &CodeGen) override;
/// Emits outlined function for the specified OpenMP teams directive
/// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID,
/// kmp_int32 BoundID, struct context_vars*).
/// \param D OpenMP directive.
/// \param ThreadIDVar Variable for thread id in the current OpenMP region.
/// \param InnermostKind Kind of innermost directive (for simple directives it
/// is a directive itself, for combined - its innermost directive).
/// \param CodeGen Code generation sequence for the \a D directive.
llvm::Function *
emitTeamsOutlinedFunction(const OMPExecutableDirective &D,
const VarDecl *ThreadIDVar,
OpenMPDirectiveKind InnermostKind,
const RegionCodeGenTy &CodeGen) override;
/// Emits outlined function for the OpenMP task directive \a D. This
/// outlined function has type void(*)(kmp_int32 ThreadID, struct task_t*
/// TaskT).
/// \param D OpenMP directive.
/// \param ThreadIDVar Variable for thread id in the current OpenMP region.
/// \param PartIDVar Variable for partition id in the current OpenMP untied
/// task region.
/// \param TaskTVar Variable for task_t argument.
/// \param InnermostKind Kind of innermost directive (for simple directives it
/// is a directive itself, for combined - its innermost directive).
/// \param CodeGen Code generation sequence for the \a D directive.
/// \param Tied true if task is generated for tied task, false otherwise.
/// \param NumberOfParts Number of parts in untied task. Ignored for tied
/// tasks.
///
llvm::Function *emitTaskOutlinedFunction(
const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
const VarDecl *PartIDVar, const VarDecl *TaskTVar,
OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen,
bool Tied, unsigned &NumberOfParts) override;
/// Emits code for parallel or serial call of the \a OutlinedFn with
/// variables captured in a record which address is stored in \a
/// CapturedStruct.
/// \param OutlinedFn Outlined function to be run in parallel threads. Type of
/// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*).
/// \param CapturedVars A pointer to the record with the references to
/// variables used in \a OutlinedFn function.
/// \param IfCond Condition in the associated 'if' clause, if it was
/// specified, nullptr otherwise.
///
void emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
llvm::Function *OutlinedFn,
ArrayRef<llvm::Value *> CapturedVars,
const Expr *IfCond) override;
/// Emits a critical region.
/// \param CriticalName Name of the critical region.
/// \param CriticalOpGen Generator for the statement associated with the given
/// critical region.
/// \param Hint Value of the 'hint' clause (optional).
void emitCriticalRegion(CodeGenFunction &CGF, StringRef CriticalName,
const RegionCodeGenTy &CriticalOpGen,
SourceLocation Loc,
const Expr *Hint = nullptr) override;
/// Emits a master region.
/// \param MasterOpGen Generator for the statement associated with the given
/// master region.
void emitMasterRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &MasterOpGen,
SourceLocation Loc) override;
/// Emits a masked region.
/// \param MaskedOpGen Generator for the statement associated with the given
/// masked region.
void emitMaskedRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &MaskedOpGen, SourceLocation Loc,
const Expr *Filter = nullptr) override;
/// Emits a masked region.
/// \param MaskedOpGen Generator for the statement associated with the given
/// masked region.
/// Emits code for a taskyield directive.
void emitTaskyieldCall(CodeGenFunction &CGF, SourceLocation Loc) override;
/// Emit a taskgroup region.
/// \param TaskgroupOpGen Generator for the statement associated with the
/// given taskgroup region.
void emitTaskgroupRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &TaskgroupOpGen,
SourceLocation Loc) override;
/// Emits a single region.
/// \param SingleOpGen Generator for the statement associated with the given
/// single region.
void emitSingleRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &SingleOpGen, SourceLocation Loc,
ArrayRef<const Expr *> CopyprivateVars,
ArrayRef<const Expr *> DestExprs,
ArrayRef<const Expr *> SrcExprs,
ArrayRef<const Expr *> AssignmentOps) override;
/// Emit an ordered region.
/// \param OrderedOpGen Generator for the statement associated with the given
/// ordered region.
void emitOrderedRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &OrderedOpGen,
SourceLocation Loc, bool IsThreads) override;
/// Emit an implicit/explicit barrier for OpenMP threads.
/// \param Kind Directive for which this implicit barrier call must be
/// generated. Must be OMPD_barrier for explicit barrier generation.
/// \param EmitChecks true if need to emit checks for cancellation barriers.
/// \param ForceSimpleCall true simple barrier call must be emitted, false if
/// runtime class decides which one to emit (simple or with cancellation
/// checks).
///
void emitBarrierCall(CodeGenFunction &CGF, SourceLocation Loc,
OpenMPDirectiveKind Kind, bool EmitChecks = true,
bool ForceSimpleCall = false) override;
/// This is used for non static scheduled types and when the ordered
/// clause is present on the loop construct.
/// Depending on the loop schedule, it is necessary to call some runtime
/// routine before start of the OpenMP loop to get the loop upper / lower
/// bounds \a LB and \a UB and stride \a ST.
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param ScheduleKind Schedule kind, specified by the 'schedule' clause.
/// \param IVSize Size of the iteration variable in bits.
/// \param IVSigned Sign of the iteration variable.
/// \param Ordered true if loop is ordered, false otherwise.
/// \param DispatchValues struct containing llvm values for lower bound, upper
/// bound, and chunk expression.
/// For the default (nullptr) value, the chunk 1 will be used.
///
void emitForDispatchInit(CodeGenFunction &CGF, SourceLocation Loc,
const OpenMPScheduleTy &ScheduleKind,
unsigned IVSize, bool IVSigned, bool Ordered,
const DispatchRTInput &DispatchValues) override;
/// Call the appropriate runtime routine to initialize it before start
/// of loop.
///
/// This is used only in case of static schedule, when the user did not
/// specify a ordered clause on the loop construct.
/// Depending on the loop schedule, it is necessary to call some runtime
/// routine before start of the OpenMP loop to get the loop upper / lower
/// bounds LB and UB and stride ST.
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param DKind Kind of the directive.
/// \param ScheduleKind Schedule kind, specified by the 'schedule' clause.
/// \param Values Input arguments for the construct.
///
void emitForStaticInit(CodeGenFunction &CGF, SourceLocation Loc,
OpenMPDirectiveKind DKind,
const OpenMPScheduleTy &ScheduleKind,
const StaticRTInput &Values) override;
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param SchedKind Schedule kind, specified by the 'dist_schedule' clause.
/// \param Values Input arguments for the construct.
///
void emitDistributeStaticInit(CodeGenFunction &CGF, SourceLocation Loc,
OpenMPDistScheduleClauseKind SchedKind,
const StaticRTInput &Values) override;
/// Call the appropriate runtime routine to notify that we finished
/// iteration of the ordered loop with the dynamic scheduling.
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param IVSize Size of the iteration variable in bits.
/// \param IVSigned Sign of the iteration variable.
///
void emitForOrderedIterationEnd(CodeGenFunction &CGF, SourceLocation Loc,
unsigned IVSize, bool IVSigned) override;
/// Call the appropriate runtime routine to notify that we finished
/// all the work with current loop.
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param DKind Kind of the directive for which the static finish is emitted.
///
void emitForStaticFinish(CodeGenFunction &CGF, SourceLocation Loc,
OpenMPDirectiveKind DKind) override;
/// Call __kmpc_dispatch_next(
/// ident_t *loc, kmp_int32 tid, kmp_int32 *p_lastiter,
/// kmp_int[32|64] *p_lower, kmp_int[32|64] *p_upper,
/// kmp_int[32|64] *p_stride);
/// \param IVSize Size of the iteration variable in bits.
/// \param IVSigned Sign of the iteration variable.
/// \param IL Address of the output variable in which the flag of the
/// last iteration is returned.
/// \param LB Address of the output variable in which the lower iteration
/// number is returned.
/// \param UB Address of the output variable in which the upper iteration
/// number is returned.
/// \param ST Address of the output variable in which the stride value is
/// returned.
llvm::Value *emitForNext(CodeGenFunction &CGF, SourceLocation Loc,
unsigned IVSize, bool IVSigned, Address IL,
Address LB, Address UB, Address ST) override;
/// Emits call to void __kmpc_push_num_threads(ident_t *loc, kmp_int32
/// global_tid, kmp_int32 num_threads) to generate code for 'num_threads'
/// clause.
/// \param NumThreads An integer value of threads.
void emitNumThreadsClause(CodeGenFunction &CGF, llvm::Value *NumThreads,
SourceLocation Loc) override;
/// Emit call to void __kmpc_push_proc_bind(ident_t *loc, kmp_int32
/// global_tid, int proc_bind) to generate code for 'proc_bind' clause.
void emitProcBindClause(CodeGenFunction &CGF,
llvm::omp::ProcBindKind ProcBind,
SourceLocation Loc) override;
/// Returns address of the threadprivate variable for the current
/// thread.
/// \param VD Threadprivate variable.
/// \param VDAddr Address of the global variable \a VD.
/// \param Loc Location of the reference to threadprivate var.
/// \return Address of the threadprivate variable for the current thread.
Address getAddrOfThreadPrivate(CodeGenFunction &CGF, const VarDecl *VD,
Address VDAddr, SourceLocation Loc) override;
/// Emit a code for initialization of threadprivate variable. It emits
/// a call to runtime library which adds initial value to the newly created
/// threadprivate variable (if it is not constant) and registers destructor
/// for the variable (if any).
/// \param VD Threadprivate variable.
/// \param VDAddr Address of the global variable \a VD.
/// \param Loc Location of threadprivate declaration.
/// \param PerformInit true if initialization expression is not constant.
llvm::Function *
emitThreadPrivateVarDefinition(const VarDecl *VD, Address VDAddr,
SourceLocation Loc, bool PerformInit,
CodeGenFunction *CGF = nullptr) override;
/// Creates artificial threadprivate variable with name \p Name and type \p
/// VarType.
/// \param VarType Type of the artificial threadprivate variable.
/// \param Name Name of the artificial threadprivate variable.
Address getAddrOfArtificialThreadPrivate(CodeGenFunction &CGF,
QualType VarType,
StringRef Name) override;
/// Emit flush of the variables specified in 'omp flush' directive.
/// \param Vars List of variables to flush.
void emitFlush(CodeGenFunction &CGF, ArrayRef<const Expr *> Vars,
SourceLocation Loc, llvm::AtomicOrdering AO) override;
/// Emit task region for the task directive. The task region is
/// emitted in several steps:
/// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32
/// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
/// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the
/// function:
/// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) {
/// TaskFunction(gtid, tt->part_id, tt->shareds);
/// return 0;
/// }
/// 2. Copy a list of shared variables to field shareds of the resulting
/// structure kmp_task_t returned by the previous call (if any).
/// 3. Copy a pointer to destructions function to field destructions of the
/// resulting structure kmp_task_t.
/// 4. Emit a call to kmp_int32 __kmpc_omp_task(ident_t *, kmp_int32 gtid,
/// kmp_task_t *new_task), where new_task is a resulting structure from
/// previous items.
/// \param D Current task directive.
/// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32
/// /*part_id*/, captured_struct */*__context*/);
/// \param SharedsTy A type which contains references the shared variables.
/// \param Shareds Context with the list of shared variables from the \p
/// TaskFunction.
/// \param IfCond Not a nullptr if 'if' clause was specified, nullptr
/// otherwise.
/// \param Data Additional data for task generation like tiednsee, final
/// state, list of privates etc.
void emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc,
const OMPExecutableDirective &D,
llvm::Function *TaskFunction, QualType SharedsTy,
Address Shareds, const Expr *IfCond,
const OMPTaskDataTy &Data) override;
/// Emit task region for the taskloop directive. The taskloop region is
/// emitted in several steps:
/// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32
/// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
/// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the
/// function:
/// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) {
/// TaskFunction(gtid, tt->part_id, tt->shareds);
/// return 0;
/// }
/// 2. Copy a list of shared variables to field shareds of the resulting
/// structure kmp_task_t returned by the previous call (if any).
/// 3. Copy a pointer to destructions function to field destructions of the
/// resulting structure kmp_task_t.
/// 4. Emit a call to void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t
/// *task, int if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int
/// nogroup, int sched, kmp_uint64 grainsize, void *task_dup ), where new_task
/// is a resulting structure from
/// previous items.
/// \param D Current task directive.
/// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32
/// /*part_id*/, captured_struct */*__context*/);
/// \param SharedsTy A type which contains references the shared variables.
/// \param Shareds Context with the list of shared variables from the \p
/// TaskFunction.
/// \param IfCond Not a nullptr if 'if' clause was specified, nullptr
/// otherwise.
/// \param Data Additional data for task generation like tiednsee, final
/// state, list of privates etc.
void emitTaskLoopCall(CodeGenFunction &CGF, SourceLocation Loc,
const OMPLoopDirective &D, llvm::Function *TaskFunction,
QualType SharedsTy, Address Shareds, const Expr *IfCond,
const OMPTaskDataTy &Data) override;
/// Emit a code for reduction clause. Next code should be emitted for
/// reduction:
/// \code
///
/// static kmp_critical_name lock = { 0 };
///
/// void reduce_func(void *lhs[<n>], void *rhs[<n>]) {
/// ...
/// *(Type<i>*)lhs[i] = RedOp<i>(*(Type<i>*)lhs[i], *(Type<i>*)rhs[i]);
/// ...
/// }
///
/// ...
/// void *RedList[<n>] = {&<RHSExprs>[0], ..., &<RHSExprs>[<n>-1]};
/// switch (__kmpc_reduce{_nowait}(<loc>, <gtid>, <n>, sizeof(RedList),
/// RedList, reduce_func, &<lock>)) {
/// case 1:
/// ...
/// <LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]);
/// ...
/// __kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>);
/// break;
/// case 2:
/// ...
/// Atomic(<LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]));
/// ...
/// break;
/// default:;
/// }
/// \endcode
///
/// \param Privates List of private copies for original reduction arguments.
/// \param LHSExprs List of LHS in \a ReductionOps reduction operations.
/// \param RHSExprs List of RHS in \a ReductionOps reduction operations.
/// \param ReductionOps List of reduction operations in form 'LHS binop RHS'
/// or 'operator binop(LHS, RHS)'.
/// \param Options List of options for reduction codegen:
/// WithNowait true if parent directive has also nowait clause, false
/// otherwise.
/// SimpleReduction Emit reduction operation only. Used for omp simd
/// directive on the host.
/// ReductionKind The kind of reduction to perform.
void emitReduction(CodeGenFunction &CGF, SourceLocation Loc,
ArrayRef<const Expr *> Privates,
ArrayRef<const Expr *> LHSExprs,
ArrayRef<const Expr *> RHSExprs,
ArrayRef<const Expr *> ReductionOps,
ReductionOptionsTy Options) override;
/// Emit a code for initialization of task reduction clause. Next code
/// should be emitted for reduction:
/// \code
///
/// _taskred_item_t red_data[n];
/// ...
/// red_data[i].shar = &shareds[i];
/// red_data[i].orig = &origs[i];
/// red_data[i].size = sizeof(origs[i]);
/// red_data[i].f_init = (void*)RedInit<i>;
/// red_data[i].f_fini = (void*)RedDest<i>;
/// red_data[i].f_comb = (void*)RedOp<i>;
/// red_data[i].flags = <Flag_i>;
/// ...
/// void* tg1 = __kmpc_taskred_init(gtid, n, red_data);
/// \endcode
/// For reduction clause with task modifier it emits the next call:
/// \code
///
/// _taskred_item_t red_data[n];
/// ...
/// red_data[i].shar = &shareds[i];
/// red_data[i].orig = &origs[i];
/// red_data[i].size = sizeof(origs[i]);
/// red_data[i].f_init = (void*)RedInit<i>;
/// red_data[i].f_fini = (void*)RedDest<i>;
/// red_data[i].f_comb = (void*)RedOp<i>;
/// red_data[i].flags = <Flag_i>;
/// ...
/// void* tg1 = __kmpc_taskred_modifier_init(loc, gtid, is_worksharing, n,
/// red_data);
/// \endcode
/// \param LHSExprs List of LHS in \a Data.ReductionOps reduction operations.
/// \param RHSExprs List of RHS in \a Data.ReductionOps reduction operations.
/// \param Data Additional data for task generation like tiedness, final
/// state, list of privates, reductions etc.
llvm::Value *emitTaskReductionInit(CodeGenFunction &CGF, SourceLocation Loc,
ArrayRef<const Expr *> LHSExprs,
ArrayRef<const Expr *> RHSExprs,
const OMPTaskDataTy &Data) override;
/// Emits the following code for reduction clause with task modifier:
/// \code
/// __kmpc_task_reduction_modifier_fini(loc, gtid, is_worksharing);
/// \endcode
void emitTaskReductionFini(CodeGenFunction &CGF, SourceLocation Loc,
bool IsWorksharingReduction) override;
/// Required to resolve existing problems in the runtime. Emits threadprivate
/// variables to store the size of the VLAs/array sections for
/// initializer/combiner/finalizer functions + emits threadprivate variable to
/// store the pointer to the original reduction item for the custom
/// initializer defined by declare reduction construct.
/// \param RCG Allows to reuse an existing data for the reductions.
/// \param N Reduction item for which fixups must be emitted.
void emitTaskReductionFixups(CodeGenFunction &CGF, SourceLocation Loc,
ReductionCodeGen &RCG, unsigned N) override;
/// Get the address of `void *` type of the privatue copy of the reduction
/// item specified by the \p SharedLVal.
/// \param ReductionsPtr Pointer to the reduction data returned by the
/// emitTaskReductionInit function.
/// \param SharedLVal Address of the original reduction item.
Address getTaskReductionItem(CodeGenFunction &CGF, SourceLocation Loc,
llvm::Value *ReductionsPtr,
LValue SharedLVal) override;
/// Emit code for 'taskwait' directive.
void emitTaskwaitCall(CodeGenFunction &CGF, SourceLocation Loc) override;
/// Emit code for 'cancellation point' construct.
/// \param CancelRegion Region kind for which the cancellation point must be
/// emitted.
///
void emitCancellationPointCall(CodeGenFunction &CGF, SourceLocation Loc,
OpenMPDirectiveKind CancelRegion) override;
/// Emit code for 'cancel' construct.
/// \param IfCond Condition in the associated 'if' clause, if it was
/// specified, nullptr otherwise.
/// \param CancelRegion Region kind for which the cancel must be emitted.
///
void emitCancelCall(CodeGenFunction &CGF, SourceLocation Loc,
const Expr *IfCond,
OpenMPDirectiveKind CancelRegion) override;
/// Emit outilined function for 'target' directive.
/// \param D Directive to emit.
/// \param ParentName Name of the function that encloses the target region.
/// \param OutlinedFn Outlined function value to be defined by this call.
/// \param OutlinedFnID Outlined function ID value to be defined by this call.
/// \param IsOffloadEntry True if the outlined function is an offload entry.
/// \param CodeGen Code generation sequence for the \a D directive.
/// An outlined function may not be an entry if, e.g. the if clause always
/// evaluates to false.
void emitTargetOutlinedFunction(const OMPExecutableDirective &D,
StringRef ParentName,
llvm::Function *&OutlinedFn,
llvm::Constant *&OutlinedFnID,
bool IsOffloadEntry,
const RegionCodeGenTy &CodeGen) override;
/// Emit the target offloading code associated with \a D. The emitted
/// code attempts offloading the execution to the device, an the event of
/// a failure it executes the host version outlined in \a OutlinedFn.
/// \param D Directive to emit.
/// \param OutlinedFn Host version of the code to be offloaded.
/// \param OutlinedFnID ID of host version of the code to be offloaded.
/// \param IfCond Expression evaluated in if clause associated with the target
/// directive, or null if no if clause is used.
/// \param Device Expression evaluated in device clause associated with the
/// target directive, or null if no device clause is used and device modifier.
void emitTargetCall(
CodeGenFunction &CGF, const OMPExecutableDirective &D,
llvm::Function *OutlinedFn, llvm::Value *OutlinedFnID, const Expr *IfCond,
llvm::PointerIntPair<const Expr *, 2, OpenMPDeviceClauseModifier> Device,
llvm::function_ref<llvm::Value *(CodeGenFunction &CGF,
const OMPLoopDirective &D)>
SizeEmitter) override;
/// Emit the target regions enclosed in \a GD function definition or
/// the function itself in case it is a valid device function. Returns true if
/// \a GD was dealt with successfully.
/// \param GD Function to scan.
bool emitTargetFunctions(GlobalDecl GD) override;
/// Emit the global variable if it is a valid device global variable.
/// Returns true if \a GD was dealt with successfully.
/// \param GD Variable declaration to emit.
bool emitTargetGlobalVariable(GlobalDecl GD) override;
/// Emit the global \a GD if it is meaningful for the target. Returns
/// if it was emitted successfully.
/// \param GD Global to scan.
bool emitTargetGlobal(GlobalDecl GD) override;
/// Emits code for teams call of the \a OutlinedFn with
/// variables captured in a record which address is stored in \a
/// CapturedStruct.
/// \param OutlinedFn Outlined function to be run by team masters. Type of
/// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*).
/// \param CapturedVars A pointer to the record with the references to
/// variables used in \a OutlinedFn function.
///
void emitTeamsCall(CodeGenFunction &CGF, const OMPExecutableDirective &D,
SourceLocation Loc, llvm::Function *OutlinedFn,
ArrayRef<llvm::Value *> CapturedVars) override;
/// Emits call to void __kmpc_push_num_teams(ident_t *loc, kmp_int32
/// global_tid, kmp_int32 num_teams, kmp_int32 thread_limit) to generate code
/// for num_teams clause.
/// \param NumTeams An integer expression of teams.
/// \param ThreadLimit An integer expression of threads.
void emitNumTeamsClause(CodeGenFunction &CGF, const Expr *NumTeams,
const Expr *ThreadLimit, SourceLocation Loc) override;
/// Emit the target data mapping code associated with \a D.
/// \param D Directive to emit.
/// \param IfCond Expression evaluated in if clause associated with the
/// target directive, or null if no device clause is used.
/// \param Device Expression evaluated in device clause associated with the
/// target directive, or null if no device clause is used.
/// \param Info A record used to store information that needs to be preserved
/// until the region is closed.
void emitTargetDataCalls(CodeGenFunction &CGF,
const OMPExecutableDirective &D, const Expr *IfCond,
const Expr *Device, const RegionCodeGenTy &CodeGen,
TargetDataInfo &Info) override;
/// Emit the data mapping/movement code associated with the directive
/// \a D that should be of the form 'target [{enter|exit} data | update]'.
/// \param D Directive to emit.
/// \param IfCond Expression evaluated in if clause associated with the target
/// directive, or null if no if clause is used.
/// \param Device Expression evaluated in device clause associated with the
/// target directive, or null if no device clause is used.
void emitTargetDataStandAloneCall(CodeGenFunction &CGF,
const OMPExecutableDirective &D,
const Expr *IfCond,
const Expr *Device) override;
/// Emit initialization for doacross loop nesting support.
/// \param D Loop-based construct used in doacross nesting construct.
void emitDoacrossInit(CodeGenFunction &CGF, const OMPLoopDirective &D,
ArrayRef<Expr *> NumIterations) override;
/// Emit code for doacross ordered directive with 'depend' clause.
/// \param C 'depend' clause with 'sink|source' dependency kind.
void emitDoacrossOrdered(CodeGenFunction &CGF,
const OMPDependClause *C) override;
/// Translates the native parameter of outlined function if this is required
/// for target.
/// \param FD Field decl from captured record for the parameter.
/// \param NativeParam Parameter itself.
const VarDecl *translateParameter(const FieldDecl *FD,
const VarDecl *NativeParam) const override;
/// Gets the address of the native argument basing on the address of the
/// target-specific parameter.
/// \param NativeParam Parameter itself.
/// \param TargetParam Corresponding target-specific parameter.
Address getParameterAddress(CodeGenFunction &CGF, const VarDecl *NativeParam,
const VarDecl *TargetParam) const override;
/// Gets the OpenMP-specific address of the local variable.
Address getAddressOfLocalVariable(CodeGenFunction &CGF,
const VarDecl *VD) override {
return Address::invalid();
}
};
} // namespace CodeGen
} // namespace clang
#endif
|
omp2-3.c | #include<math.h>
#include<stdio.h>
#define N 1000000
int main() {
int i;
double x, area = 0;
#pragma omp parallel for private(x)
for (i = 0; i < N; i++) {
x = (i + .5) / N;
x = 4 / (1 + x*x);
#pragma omp critical
area += x;
}
printf("%.10lf\n", area/N);
return 0;
}
|
GeometryConverterOCC.h | /* -*-c++-*- IfcQuery www.ifcquery.com
*
MIT License
Copyright (c) 2017 Fabian Gerold
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#pragma once
#include <BRepAdaptor_Curve.hxx>
#include <BRep_Tool.hxx>
#include <GCPnts_AbscissaPoint.hxx>
#include <GCPnts_UniformAbscissa.hxx>
#include <Geom_Line.hxx>
#include <TopExp.hxx>
#include <TopExp_Explorer.hxx>
#include <TopoDS.hxx>
#include <TopoDS_Edge.hxx>
#include <TopoDS_Shape.hxx>
#include <TopoDS_Vertex.hxx>
#include <ifcpp/geometry/GeometrySettings.h>
#include <ifcpp/model/BuildingModel.h>
#include <ifcpp/model/BasicTypes.h>
#include <ifcpp/model/StatusCallback.h>
#include <ifcpp/IFC4/include/IfcCurtainWall.h>
#include <ifcpp/IFC4/include/IfcPropertySetDefinitionSet.h>
#include <ifcpp/IFC4/include/IfcRelAggregates.h>
#include <ifcpp/IFC4/include/IfcRelContainedInSpatialStructure.h>
#include <ifcpp/IFC4/include/IfcRelDefinesByProperties.h>
#include <ifcpp/IFC4/include/IfcSpace.h>
#include <ifcpp/IFC4/include/IfcWindow.h>
#include "RepresentationConverterOCC.h"
#include "GeometryInputDataOCC.h"
class GeometryConverterOCC : public StatusCallback
{
protected:
shared_ptr<BuildingModel> m_ifc_model;
shared_ptr<GeometrySettings> m_geom_settings;
shared_ptr<RepresentationConverterOCC> m_representation_converter;
std::map<int, shared_ptr<ProductShapeDataOCC> > m_product_shape_data;
std::map<int, shared_ptr<BuildingObject> > m_map_outside_spatial_structure;
double m_recent_progress;
std::map<int, std::vector<shared_ptr<StatusCallback::Message> > > m_messages;
#ifdef ENABLE_OPENMP
Mutex m_writelock_messages;
Mutex m_writelock_appearance_cache;
#endif
public:
// getters and setters
shared_ptr<BuildingModel>& getBuildingModel() { return m_ifc_model; }
shared_ptr<RepresentationConverterOCC>& getRepresentationConverter() { return m_representation_converter; }
std::map<int, shared_ptr<ProductShapeDataOCC> >& getShapeInputData() { return m_product_shape_data; }
shared_ptr<GeometrySettings>& getGeomSettings() { return m_geom_settings; }
std::map<int, shared_ptr<BuildingObject> >& getObjectsOutsideSpatialStructure() { return m_map_outside_spatial_structure; }
GeometryConverterOCC( shared_ptr<BuildingModel>& ifc_model )
{
m_ifc_model = ifc_model;
m_geom_settings = shared_ptr<GeometrySettings>( new GeometrySettings() );
resetNumVerticesPerCircle();
shared_ptr<UnitConverter>& unit_converter = m_ifc_model->getUnitConverter();
m_representation_converter = shared_ptr<RepresentationConverterOCC>( new RepresentationConverterOCC( m_geom_settings, unit_converter ) );
// redirect all messages to this
m_ifc_model->setMessageTarget( this );
m_representation_converter->setMessageTarget( this );
}
virtual ~GeometryConverterOCC() {}
void resetModel()
{
progressTextCallback( L"Unloading model, cleaning up memory..." );
clearInputCache();
m_recent_progress = 0.0;
m_ifc_model->clearCache();
m_ifc_model->clearIfcModel();
progressTextCallback( L"Unloading model done" );
progressValueCallback( 0.0, "parse" );
#ifdef _DEBUG
//GeomDebugUtils::clearDebugDumpFile();
#endif
}
void clearInputCache()
{
m_product_shape_data.clear();
m_map_outside_spatial_structure.clear();
m_representation_converter->clearCache();
m_messages.clear();
}
void resetNumVerticesPerCircle()
{
m_geom_settings->resetNumVerticesPerCircle();
}
void setModel( shared_ptr<BuildingModel> model )
{
if( m_ifc_model )
{
m_ifc_model->unsetMessageCallBack();
}
clearInputCache();
m_ifc_model = model;
m_representation_converter->clearCache();
m_representation_converter->setUnitConverter( m_ifc_model->getUnitConverter() );
m_ifc_model->setMessageTarget( this );
}
/*\brief method convertGeometry: Creates geometry for OpenCascade from previously loaded BuildingModel model.
\param[out] parent_group Group to append the resulting geometry.
**/
void convertGeometry()
{
progressTextCallback( L"Creating geometry..." );
progressValueCallback( 0, "geometry" );
m_product_shape_data.clear();
m_map_outside_spatial_structure.clear();
m_representation_converter->clearCache();
shared_ptr<ProductShapeDataOCC> ifc_project_data;
std::vector<shared_ptr<IfcObjectDefinition> > vec_ifc_object_defs;
const double length_to_meter_factor = m_ifc_model->getUnitConverter()->getLengthInMeterFactor();
const std::map<int, shared_ptr<BuildingEntity> >& map_entities = m_ifc_model->getMapIfcEntities();
for( auto it = map_entities.begin(); it != map_entities.end(); ++it )
{
shared_ptr<BuildingEntity> obj = it->second;
shared_ptr<IfcObjectDefinition> object_def = dynamic_pointer_cast<IfcObjectDefinition>(obj);
if( object_def )
{
vec_ifc_object_defs.push_back( object_def );
}
}
// create geometry for for each IfcProduct independently, spatial structure will be resolved later
std::map<int, shared_ptr<ProductShapeDataOCC> >* map_products_ptr = &m_product_shape_data;
const int num_products = (int)vec_ifc_object_defs.size();
#ifdef ENABLE_OPENMP
Mutex writelock_map;
Mutex writelock_ifc_project;
#pragma omp parallel firstprivate(num_products) shared(map_products_ptr)
{
// time for one product may vary significantly, so schedule not so many
#pragma omp for schedule(dynamic,10)
#endif
for( int i = 0; i < num_products; ++i )
{
shared_ptr<IfcObjectDefinition> ifc_object_def = vec_ifc_object_defs[i];
const int product_id = ifc_object_def->m_entity_id;
shared_ptr<ProductShapeDataOCC> product_geom_input_data( new ProductShapeDataOCC( product_id ) );
product_geom_input_data->m_ifc_object_definition = ifc_object_def;
std::stringstream thread_err;
if( dynamic_pointer_cast<IfcFeatureElementSubtraction>(ifc_object_def) )
{
// geometry will be created in method subtractOpenings
continue;
}
else if( dynamic_pointer_cast<IfcProject>(ifc_object_def) )
{
#ifdef ENABLE_OPENMP
ScopedLock scoped_lock( writelock_ifc_project );
#endif
ifc_project_data = product_geom_input_data;
}
try
{
convertIfcProductShape( product_geom_input_data );
}
catch( OutOfMemoryException& e )
{
throw e;
}
catch( BuildingException& e )
{
thread_err << e.what();
}
catch( Standard_Failure& sf )
{
thread_err << sf.GetMessageString();
}
catch( std::exception& e )
{
thread_err << e.what();
}
catch( ... )
{
thread_err << "undefined error, product id " << product_id;
}
{
#ifdef ENABLE_OPENMP
ScopedLock scoped_lock( writelock_map );
#endif
map_products_ptr->insert( std::make_pair( product_id, product_geom_input_data ) );
if( thread_err.tellp() > 0 )
{
messageCallback( thread_err.str().c_str(), StatusCallback::MESSAGE_TYPE_ERROR, __FUNC__ );
}
}
// progress callback
double progress = (double)i / (double)num_products;
if( progress - m_recent_progress > 0.02 )
{
#ifdef ENABLE_OPENMP
if( omp_get_thread_num() == 0 )
#endif
{
// leave 10% of progress to openscenegraph internals
progressValueCallback( progress*0.9, "geometry" );
m_recent_progress = progress;
}
}
}
#ifdef ENABLE_OPENMP
} // implicit barrier
#endif
try
{
// now resolve spatial structure
if( ifc_project_data )
{
resolveProjectStructure( ifc_project_data );
}
// check if there are entities that are not in spatial structure
for( auto it_product_shapes = m_product_shape_data.begin(); it_product_shapes != m_product_shape_data.end(); ++it_product_shapes )
{
shared_ptr<ProductShapeDataOCC> product_shape = it_product_shapes->second;
if( !product_shape )
{
continue;
}
if( !product_shape->m_added_to_spatial_structure )
{
if( !product_shape->m_ifc_object_definition.expired() )
{
shared_ptr<IfcObjectDefinition> ifc_product( product_shape->m_ifc_object_definition );
shared_ptr<IfcFeatureElementSubtraction> opening = dynamic_pointer_cast<IfcFeatureElementSubtraction>(ifc_product);
if( opening )
{
continue;
}
m_map_outside_spatial_structure[ifc_product->m_entity_id] = ifc_product;
}
}
}
}
catch( OutOfMemoryException& e )
{
throw e;
}
catch( BuildingException& e )
{
messageCallback( e.what(), StatusCallback::MESSAGE_TYPE_ERROR, "" );
}
catch( std::exception& e )
{
messageCallback( e.what(), StatusCallback::MESSAGE_TYPE_ERROR, "" );
}
catch( ... )
{
messageCallback( "undefined error", StatusCallback::MESSAGE_TYPE_ERROR, __FUNC__ );
}
m_representation_converter->getProfileCache()->clearProfileCache();
progressTextCallback( L"Loading file done" );
progressValueCallback( 1.0, "geometry" );
}
//\brief method convertIfcProduct: Creates geometry objects (meshset with connected vertex-edge-face graph) from an IfcProduct object
// caution: when using OpenMP, this method runs in parallel threads, so every write access to member variables needs a write lock
void convertIfcProductShape( shared_ptr<ProductShapeDataOCC>& product_shape )
{
if( product_shape->m_ifc_object_definition.expired() )
{
return;
}
shared_ptr<IfcObjectDefinition> ifc_object_def( product_shape->m_ifc_object_definition );
shared_ptr<IfcProduct> ifc_product = dynamic_pointer_cast<IfcProduct>(ifc_object_def);
if( !ifc_product )
{
return;
}
if( !ifc_product->m_Representation )
{
return;
}
// evaluate IFC geometry
const double length_factor = m_ifc_model->getUnitConverter()->getLengthInMeterFactor();
shared_ptr<IfcProductRepresentation>& product_representation = ifc_product->m_Representation;
std::vector<shared_ptr<IfcRepresentation> >& vec_representations = product_representation->m_Representations;
for( size_t i_representations = 0; i_representations < vec_representations.size(); ++i_representations )
{
const shared_ptr<IfcRepresentation>& representation = vec_representations[i_representations];
try
{
shared_ptr<RepresentationDataOCC> representation_data( new RepresentationDataOCC() );
m_representation_converter->convertIfcRepresentation( representation, representation_data );
product_shape->m_vec_representations.push_back( representation_data );
}
catch( OutOfMemoryException& e )
{
throw e;
}
catch( BuildingException& e )
{
messageCallback( e.what(), StatusCallback::MESSAGE_TYPE_ERROR, "" );
}
catch( std::exception& e )
{
messageCallback( e.what(), StatusCallback::MESSAGE_TYPE_ERROR, "" );
}
}
// IfcProduct has an ObjectPlacement that can be local or global
gp_Trsf product_placement_matrix;
if( ifc_product->m_ObjectPlacement )
{
// IfcPlacement2Matrix follows related placements in case of local coordinate systems
std::unordered_set<IfcObjectPlacement*> placement_already_applied;
PlacementConverterOCC::convertIfcObjectPlacement( ifc_product->m_ObjectPlacement, length_factor, product_placement_matrix, this, placement_already_applied );
product_shape->applyTransformToProduct( product_placement_matrix );
}
// handle openings
std::vector<shared_ptr<ProductShapeDataOCC> > vec_opening_data;
const shared_ptr<IfcElement> ifc_element = dynamic_pointer_cast<IfcElement>(ifc_product);
if( ifc_element )
{
m_representation_converter->subtractOpenings( ifc_element, product_shape );
}
// Fetch the IFCProduct relationships
if( ifc_product->m_IsDefinedBy_inverse.size() > 0 )
{
std::vector<weak_ptr<IfcRelDefinesByProperties> >& vec_IsDefinedBy_inverse = ifc_product->m_IsDefinedBy_inverse;
for( size_t i = 0; i < vec_IsDefinedBy_inverse.size(); ++i )
{
shared_ptr<IfcRelDefinesByProperties> rel_def( vec_IsDefinedBy_inverse[i] );
shared_ptr<IfcPropertySetDefinitionSelect> relating_property_definition_select = rel_def->m_RelatingPropertyDefinition;
if( relating_property_definition_select )
{
// TYPE IfcPropertySetDefinitionSelect = SELECT (IfcPropertySetDefinition ,IfcPropertySetDefinitionSet);
shared_ptr<IfcPropertySetDefinition> property_set_def = dynamic_pointer_cast<IfcPropertySetDefinition>(relating_property_definition_select);
if( property_set_def )
{
shared_ptr<IfcPropertySet> property_set = dynamic_pointer_cast<IfcPropertySet>(property_set_def);
if( property_set )
{
readAppearanceFromPropertySet( property_set, product_shape );
}
continue;
}
shared_ptr<IfcPropertySetDefinitionSet> property_set_def_set = dynamic_pointer_cast<IfcPropertySetDefinitionSet>(relating_property_definition_select);
if( property_set_def_set )
{
std::vector<shared_ptr<IfcPropertySetDefinition> >& vec_propterty_set_def = property_set_def_set->m_vec;
std::vector<shared_ptr<IfcPropertySetDefinition> >::iterator it_property_set_def;
for( it_property_set_def = vec_propterty_set_def.begin(); it_property_set_def != vec_propterty_set_def.end(); ++it_property_set_def )
{
shared_ptr<IfcPropertySetDefinition> property_set_def2 = (*it_property_set_def);
if( property_set_def2 )
{
shared_ptr<IfcPropertySet> property_set = dynamic_pointer_cast<IfcPropertySet>(property_set_def2);
if( property_set )
{
readAppearanceFromPropertySet( property_set, product_shape );
}
}
}
continue;
}
}
}
}
}
void readAppearanceFromPropertySet( const shared_ptr<IfcPropertySet>& prop_set, shared_ptr<ProductShapeDataOCC>& product_shape )
{
if( !prop_set )
{
return;
}
for( auto& ifc_property : prop_set->m_HasProperties )
{
if( !ifc_property )
{
continue;
}
shared_ptr<IfcSimpleProperty> simple_property = dynamic_pointer_cast<IfcSimpleProperty>(ifc_property);
if( simple_property )
{
// ENTITY IfcSimpleProperty ABSTRACT SUPERTYPE OF(ONEOF( IfcPropertyBoundedValue, IfcPropertyEnumeratedValue, IfcPropertyListValue,
// IfcPropertyReferenceValue, IfcPropertySingleValue, IfcPropertyTableValue))
shared_ptr<IfcIdentifier> property_name = simple_property->m_Name;
std::wstring name_str = property_name->m_value;
if( name_str.compare( L"LayerName" ) == 0 )
{
// TODO: implement layers
}
shared_ptr<IfcText> description = simple_property->m_Description;
shared_ptr<IfcPropertySingleValue> property_single_value = dynamic_pointer_cast<IfcPropertySingleValue>(simple_property);
if( property_single_value )
{
//shared_ptr<IfcValue>& nominal_value = property_single_value->m_NominalValue; //optional
//shared_ptr<IfcUnit>& unit = property_single_value->m_Unit; //optional
}
continue;
}
shared_ptr<IfcComplexProperty> complex_property = dynamic_pointer_cast<IfcComplexProperty>(ifc_property);
if( complex_property )
{
if( !complex_property->m_UsageName ) continue;
if( complex_property->m_UsageName->m_value.compare( L"Color" ) == 0 )
{
vec4 vec_color;
m_representation_converter->getStylesConverter()->convertIfcComplexPropertyColor( complex_property, vec_color );
shared_ptr<AppearanceData> appearance_data( new AppearanceData( -1 ) );
if( !appearance_data )
{
throw OutOfMemoryException( __FUNC__ );
}
appearance_data->m_apply_to_geometry_type = AppearanceData::GEOM_TYPE_ANY;
appearance_data->m_color_ambient = vec_color;
appearance_data->m_color_diffuse = vec_color;
appearance_data->m_color_specular = vec_color;
appearance_data->m_shininess = 35.f;
product_shape->addAppearance( appearance_data );
}
}
}
}
void resolveProjectStructure( shared_ptr<ProductShapeDataOCC>& product_data )
{
if( !product_data )
{
return;
}
if( product_data->m_ifc_object_definition.expired() )
{
return;
}
product_data->m_added_to_spatial_structure = true;
shared_ptr<IfcObjectDefinition> ifc_object_def( product_data->m_ifc_object_definition );
const int entity_id = ifc_object_def->m_entity_id;
const std::vector<weak_ptr<IfcRelAggregates> >& vec_IsDecomposedBy = ifc_object_def->m_IsDecomposedBy_inverse;
for( size_t ii = 0; ii < vec_IsDecomposedBy.size(); ++ii )
{
const weak_ptr<IfcRelAggregates>& rel_aggregates_weak_ptr = vec_IsDecomposedBy[ii];
if( rel_aggregates_weak_ptr.expired() )
{
continue;
}
shared_ptr<IfcRelAggregates> rel_aggregates( rel_aggregates_weak_ptr );
if( rel_aggregates )
{
const std::vector<shared_ptr<IfcObjectDefinition> >& vec_related_objects = rel_aggregates->m_RelatedObjects;
for( size_t jj = 0; jj < vec_related_objects.size(); ++jj )
{
const shared_ptr<IfcObjectDefinition>& related_obj_def = vec_related_objects[jj];
if( related_obj_def )
{
auto it_product_map = m_product_shape_data.find( related_obj_def->m_entity_id );
if( it_product_map != m_product_shape_data.end() )
{
shared_ptr<ProductShapeDataOCC>& related_product_shape = it_product_map->second;
if( related_product_shape )
{
product_data->addChildProduct( related_product_shape, product_data );
resolveProjectStructure( related_product_shape );
}
}
}
}
}
}
shared_ptr<IfcSpatialStructureElement> spatial_ele = dynamic_pointer_cast<IfcSpatialStructureElement>(ifc_object_def);
if( spatial_ele )
{
const std::vector<weak_ptr<IfcRelContainedInSpatialStructure> >& vec_contains = spatial_ele->m_ContainsElements_inverse;
for( size_t ii = 0; ii < vec_contains.size(); ++ii )
{
const weak_ptr<IfcRelContainedInSpatialStructure>& rel_contained_weak_ptr = vec_contains[ii];
if( rel_contained_weak_ptr.expired() )
{
continue;
}
shared_ptr<IfcRelContainedInSpatialStructure> rel_contained( rel_contained_weak_ptr );
if( rel_contained )
{
const std::vector<shared_ptr<IfcProduct> >& vec_related_elements = rel_contained->m_RelatedElements;
for( size_t jj = 0; jj < vec_related_elements.size(); ++jj )
{
const shared_ptr<IfcProduct>& related_product = vec_related_elements[jj];
if( related_product )
{
auto it_product_map = m_product_shape_data.find( related_product->m_entity_id );
if( it_product_map != m_product_shape_data.end() )
{
shared_ptr<ProductShapeDataOCC>& related_product_shape = it_product_map->second;
if( related_product_shape )
{
product_data->addChildProduct( related_product_shape, product_data );
resolveProjectStructure( related_product_shape );
}
}
}
}
}
}
}
// TODO: handle IfcRelAssignsToProduct
}
virtual void messageTarget( void* ptr, shared_ptr<StatusCallback::Message> m )
{
GeometryConverterOCC* myself = (GeometryConverterOCC*)ptr;
if( myself )
{
if( m->m_entity )
{
#ifdef ENABLE_OPENMP
ScopedLock lock( myself->m_writelock_messages );
#endif
// make sure that the same message for one entity does not appear several times
const int entity_id = m->m_entity->m_entity_id;
auto it = myself->m_messages.find( entity_id );
if( it != myself->m_messages.end() )
{
std::vector<shared_ptr<StatusCallback::Message> >& vec_message_for_entity = it->second;
for( size_t i = 0; i < vec_message_for_entity.size(); ++i )
{
shared_ptr<StatusCallback::Message>& existing_message = vec_message_for_entity[i];
if( existing_message->m_message_text.compare( m->m_message_text ) == 0 )
{
// same message for same entity is already there, so ignore message
return;
}
}
vec_message_for_entity.push_back( m );
}
else
{
std::vector<shared_ptr<StatusCallback::Message> >& vec = myself->m_messages.insert( std::make_pair( entity_id, std::vector<shared_ptr<StatusCallback::Message> >() ) ).first->second;
vec.push_back( m );
}
}
myself->messageCallback( m );
}
}
};
|
vmul.c | /*
This file is part of HiParTI!.
HiParTI! is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
HiParTI! is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with HiParTI!.
If not, see <http://www.gnu.org/licenses/>.
*/
#include <HiParTI.h>
#include <stdio.h>
/**
* SpMV, y = Ax
*/
int ptiSparseMatrixMulVectorCSR(ptiValueVector * y, ptiSparseMatrixCSR *csrmtx, ptiValueVector * x)
{
#ifdef HIPARTI_USE_OPENMP
#pragma omp parallel for
#endif
for(ptiIndex i = 0; i < csrmtx->nrows; ++i) {
for(ptiNnzIndex z = csrmtx->rowptr.data[i]; z < csrmtx->rowptr.data[i+1]; ++z) {
ptiIndex col = csrmtx->colind.data[z];
y->data[i] += csrmtx->values.data[z] * x->data[col];
}
}
return 0;
}
#ifdef HIPARTI_USE_OPENMP
int ptiOmpSparseMatrixMulVectorCSR(ptiValueVector * y, ptiSparseMatrixCSR *csrmtx, ptiValueVector * x)
{
#pragma omp parallel for
for(ptiIndex i = 0; i < csrmtx->nrows; ++i) {
for(ptiNnzIndex z = csrmtx->rowptr.data[i]; z < csrmtx->rowptr.data[i+1]; ++z) {
ptiIndex col = csrmtx->colind.data[z];
y->data[i] += csrmtx->values.data[z] * x->data[col];
}
}
return 0;
}
int ptiOmpSparseMatrixMulVectorCSRReduce(ptiValueVector * y, const ptiSparseMatrixCSR *mtx, ptiValueVector * x){
ptiValueVector * ybufs;
int nthreads =1;
#pragma omp parallel
nthreads=omp_get_num_threads();
ybufs = (ptiValueVector *) malloc(nthreads * sizeof(ptiValueVector));
for(int t=0; t<nthreads; ++t) {
ptiNewValueVector(&ybufs[t], mtx->nrows, mtx->nrows);
ptiConstantValueVector(&ybufs[t], 0);
}
ptiOmpSparseMatrixMulVectorCSR_Reduce(y, ybufs, mtx, x);
for(int t=0; t<nthreads; ++t) {
ptiFreeValueVector(&ybufs[t]);
}
free(ybufs);
return 0;
}
int ptiOmpSparseMatrixMulVectorCSR_Reduce(ptiValueVector *y, ptiValueVector * ybufs, const ptiSparseMatrixCSR *csrmtx, ptiValueVector * x)
{
int nthreads;
#pragma omp parallel
nthreads=omp_get_num_threads();
#pragma omp parallel for // schedule(static)
for(ptiIndex i = 0; i < csrmtx->nrows; ++i) {
for(ptiNnzIndex z = csrmtx->rowptr.data[i]; z < csrmtx->rowptr.data[i+1]; ++z) {
int tid = omp_get_thread_num();
ptiIndex col = csrmtx->colind.data[z];
ybufs[tid].data[i] += csrmtx->values.data[z] * x->data[col];
}
}
/*Reduction*/
#pragma omp parallel for schedule(static)
for(ptiIndex r=0; r<y->len; ++r){
for (int t=0; t<nthreads; ++t){
y->data[r] +=ybufs[t].data[r];
}
}
return 0;
}
#endif
|
mypaint-tiled-surface.c | /* libmypaint - The MyPaint Brush Library
* Copyright (C) 2007-2014 Martin Renold <martinxyz@gmx.ch> et. al.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <config.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "mypaint-config.h"
#include "mypaint-tiled-surface.h"
#include "tiled-surface-private.h"
#include "helpers.h"
#include "brushmodes.h"
#include "operationqueue.h"
#ifndef M_PI
#define M_PI 3.14159265358979323846
#endif
void process_tile(MyPaintTiledSurface *self, int tx, int ty);
static void
begin_atomic_default(MyPaintSurface *surface)
{
mypaint_tiled_surface_begin_atomic((MyPaintTiledSurface *)surface);
}
static void
end_atomic_default(MyPaintSurface *surface, MyPaintRectangle *roi)
{
mypaint_tiled_surface_end_atomic((MyPaintTiledSurface *)surface, roi);
}
/**
* mypaint_tiled_surface_begin_atomic: (skip)
*
* Implementation of #MyPaintSurface::being_atomic vfunc
* Note: Only intended to be used from #MyPaintTiledSurface subclasses, which should chain up to this
* if implementing their own #MyPaintSurface::begin_atomic vfunc.
* Application code should only use mypaint_surface_being_atomic()
*/
void
mypaint_tiled_surface_begin_atomic(MyPaintTiledSurface *self)
{
self->dirty_bbox.height = 0;
self->dirty_bbox.width = 0;
self->dirty_bbox.y = 0;
self->dirty_bbox.x = 0;
}
/**
* mypaint_tiled_surface_end_atomic: (skip)
*
* Implementation of #MyPaintSurface::end_atomic vfunc
* Note: Only intended to be used from #MyPaintTiledSurface subclasses, which should chain up to this
* if implementing their own #MyPaintSurface::end_atomic vfunc.
* Application code should only use mypaint_surface_end_atomic().
*/
void
mypaint_tiled_surface_end_atomic(MyPaintTiledSurface *self, MyPaintRectangle *roi)
{
// Process tiles
TileIndex *tiles;
int tiles_n = operation_queue_get_dirty_tiles(self->operation_queue, &tiles);
#pragma omp parallel for schedule(static) if(self->threadsafe_tile_requests && tiles_n > 3)
for (int i = 0; i < tiles_n; i++) {
process_tile(self, tiles[i].x, tiles[i].y);
}
operation_queue_clear_dirty_tiles(self->operation_queue);
if (roi) {
*roi = self->dirty_bbox;
}
}
/**
* mypaint_tiled_surface_tile_request_start:
*
* Fetch a tile out from the underlying tile store.
* When successfull, request->data will be set to point to the fetched tile.
* Consumers must *always* call mypaint_tiled_surface_tile_request_end() with the same
* request to complete the transaction.
*/
void mypaint_tiled_surface_tile_request_start(MyPaintTiledSurface *self, MyPaintTileRequest *request)
{
assert(self->tile_request_start);
self->tile_request_start(self, request);
}
/**
* mypaint_tiled_surface_tile_request_end:
*
* Put a (potentially modified) tile back into the underlying tile store.
*
* Consumers must *always* call mypaint_tiled_surface_tile_request_start() with the same
* request to start the transaction before calling this function.
*/
void mypaint_tiled_surface_tile_request_end(MyPaintTiledSurface *self, MyPaintTileRequest *request)
{
assert(self->tile_request_end);
self->tile_request_end(self, request);
}
/* FIXME: either expose this through MyPaintSurface, or move it into the brush engine */
/**
* mypaint_tiled_surface_set_symmetry_state:
* @active: TRUE to enable, FALSE to disable.
* @center_x: X axis to mirror events across.
*
* Enable/Disable symmetric brush painting across an X axis.
*/
void
mypaint_tiled_surface_set_symmetry_state(MyPaintTiledSurface *self, gboolean active, float center_x)
{
self->surface_do_symmetry = active;
self->surface_center_x = center_x;
}
/**
* mypaint_tile_request_init:
*
* Initialize a request for use with mypaint_tiled_surface_tile_request_start()
* and mypaint_tiled_surface_tile_request_end()
*/
void
mypaint_tile_request_init(MyPaintTileRequest *data, int level,
int tx, int ty, gboolean readonly)
{
data->tx = tx;
data->ty = ty;
data->readonly = readonly;
data->buffer = NULL;
data->context = NULL;
#ifdef _OPENMP
data->thread_id = omp_get_thread_num();
#else
data->thread_id = -1;
#endif
data->mipmap_level = level;
}
// Must be threadsafe
static inline float
calculate_r_sample(float x, float y, float aspect_ratio,
float sn, float cs)
{
const float yyr=(y*cs-x*sn)*aspect_ratio;
const float xxr=y*sn+x*cs;
const float r = (yyr*yyr + xxr*xxr);
return r;
}
static inline float
calculate_rr(int xp, int yp, float x, float y, float aspect_ratio,
float sn, float cs, float one_over_radius2)
{
// code duplication, see brush::count_dabs_to()
const float yy = (yp + 0.5f - y);
const float xx = (xp + 0.5f - x);
const float yyr=(yy*cs-xx*sn)*aspect_ratio;
const float xxr=yy*sn+xx*cs;
const float rr = (yyr*yyr + xxr*xxr) * one_over_radius2;
// rr is in range 0.0..1.0*sqrt(2)
return rr;
}
static inline float
sign_point_in_line( float px, float py, float vx, float vy )
{
return (px - vx) * (-vy) - (vx) * (py - vy);
}
static inline void
closest_point_to_line( float lx, float ly, float px, float py, float *ox, float *oy )
{
const float l2 = lx*lx + ly*ly;
const float ltp_dot = px*lx + py*ly;
const float t = ltp_dot / l2;
*ox = lx * t;
*oy = ly * t;
}
// Must be threadsafe
//
// This works by taking the visibility at the nearest point
// and dividing by 1.0 + delta.
//
// - nearest point: point where the dab has more influence
// - farthest point: point at a fixed distance away from
// the nearest point
// - delta: how much occluded is the farthest point relative
// to the nearest point
static inline float
calculate_rr_antialiased(int xp, int yp, float x, float y, float aspect_ratio,
float sn, float cs, float one_over_radius2,
float r_aa_start)
{
// calculate pixel position and borders in a way
// that the dab's center is always at zero
float pixel_right = x - (float)xp;
float pixel_bottom = y - (float)yp;
float pixel_center_x = pixel_right - 0.5f;
float pixel_center_y = pixel_bottom - 0.5f;
float pixel_left = pixel_right - 1.0f;
float pixel_top = pixel_bottom - 1.0f;
float nearest_x, nearest_y; // nearest to origin, but still inside pixel
float farthest_x, farthest_y; // farthest from origin, but still inside pixel
float r_near, r_far, rr_near, rr_far;
// Dab's center is inside pixel?
if( pixel_left<0 && pixel_right>0 &&
pixel_top<0 && pixel_bottom>0 )
{
nearest_x = 0;
nearest_y = 0;
r_near = rr_near = 0;
}
else
{
closest_point_to_line( cs, sn, pixel_center_x, pixel_center_y, &nearest_x, &nearest_y );
nearest_x = CLAMP( nearest_x, pixel_left, pixel_right );
nearest_y = CLAMP( nearest_y, pixel_top, pixel_bottom );
// XXX: precision of "nearest" values could be improved
// by intersecting the line that goes from nearest_x/Y to 0
// with the pixel's borders here, however the improvements
// would probably not justify the perdormance cost.
r_near = calculate_r_sample( nearest_x, nearest_y, aspect_ratio, sn, cs );
rr_near = r_near * one_over_radius2;
}
// out of dab's reach?
if( rr_near > 1.0f )
return rr_near;
// check on which side of the dab's line is the pixel center
float center_sign = sign_point_in_line( pixel_center_x, pixel_center_y, cs, -sn );
// radius of a circle with area=1
// A = pi * r * r
// r = sqrt(1/pi)
const float rad_area_1 = sqrtf( 1.0f / M_PI );
// center is below dab
if( center_sign < 0 )
{
farthest_x = nearest_x - sn*rad_area_1;
farthest_y = nearest_y + cs*rad_area_1;
}
// above dab
else
{
farthest_x = nearest_x + sn*rad_area_1;
farthest_y = nearest_y - cs*rad_area_1;
}
r_far = calculate_r_sample( farthest_x, farthest_y, aspect_ratio, sn, cs );
rr_far = r_far * one_over_radius2;
// check if we can skip heavier AA
if( r_far < r_aa_start )
return (rr_far+rr_near) * 0.5f;
// calculate AA approximate
float visibilityNear = 1.0f - rr_near;
float delta = rr_far - rr_near;
float delta2 = 1.0f + delta;
visibilityNear /= delta2;
return 1.0f - visibilityNear;
}
static inline float
calculate_opa(float rr, float hardness,
float segment1_offset, float segment1_slope,
float segment2_offset, float segment2_slope) {
const float fac = rr <= hardness ? segment1_slope : segment2_slope;
float opa = rr <= hardness ? segment1_offset : segment2_offset;
opa += rr*fac;
if (rr > 1.0f) {
opa = 0.0f;
}
#ifdef HEAVY_DEBUG
assert(isfinite(opa));
assert(opa >= 0.0f && opa <= 1.0f);
#endif
return opa;
}
// Must be threadsafe
void render_dab_mask (uint16_t * mask,
float x, float y,
float radius,
float hardness,
float aspect_ratio, float angle
)
{
hardness = CLAMP(hardness, 0.0, 1.0);
if (aspect_ratio<1.0) aspect_ratio=1.0;
assert(hardness != 0.0); // assured by caller
// For a graphical explanation, see:
// http://wiki.mypaint.info/Development/Documentation/Brushlib
//
// The hardness calculation is explained below:
//
// Dab opacity gradually fades out from the center (rr=0) to
// fringe (rr=1) of the dab. How exactly depends on the hardness.
// We use two linear segments, for which we pre-calculate slope
// and offset here.
//
// opa
// ^
// * .
// | *
// | .
// +-----------*> rr = (distance_from_center/radius)^2
// 0 1
//
float segment1_offset = 1.0f;
float segment1_slope = -(1.0f/hardness - 1.0f);
float segment2_offset = hardness/(1.0f-hardness);
float segment2_slope = -hardness/(1.0f-hardness);
// for hardness == 1.0, segment2 will never be used
float angle_rad=angle/360*2*M_PI;
float cs=cos(angle_rad);
float sn=sin(angle_rad);
const float r_fringe = radius + 1.0f; // +1.0 should not be required, only to be sure
int x0 = floor (x - r_fringe);
int y0 = floor (y - r_fringe);
int x1 = floor (x + r_fringe);
int y1 = floor (y + r_fringe);
if (x0 < 0) x0 = 0;
if (y0 < 0) y0 = 0;
if (x1 > MYPAINT_TILE_SIZE-1) x1 = MYPAINT_TILE_SIZE-1;
if (y1 > MYPAINT_TILE_SIZE-1) y1 = MYPAINT_TILE_SIZE-1;
const float one_over_radius2 = 1.0f/(radius*radius);
// Pre-calculate rr and put it in the mask.
// This an optimization that makes use of auto-vectorization
// OPTIMIZE: if using floats for the brush engine, store these directly in the mask
float rr_mask[MYPAINT_TILE_SIZE*MYPAINT_TILE_SIZE+2*MYPAINT_TILE_SIZE];
if (radius < 3.0f)
{
const float aa_border = 1.0f;
float r_aa_start = ((radius>aa_border) ? (radius-aa_border) : 0);
r_aa_start *= r_aa_start / aspect_ratio;
for (int yp = y0; yp <= y1; yp++) {
for (int xp = x0; xp <= x1; xp++) {
const float rr = calculate_rr_antialiased(xp, yp,
x, y, aspect_ratio,
sn, cs, one_over_radius2,
r_aa_start);
rr_mask[(yp*MYPAINT_TILE_SIZE)+xp] = rr;
}
}
}
else
{
for (int yp = y0; yp <= y1; yp++) {
for (int xp = x0; xp <= x1; xp++) {
const float rr = calculate_rr(xp, yp,
x, y, aspect_ratio,
sn, cs, one_over_radius2);
rr_mask[(yp*MYPAINT_TILE_SIZE)+xp] = rr;
}
}
}
// we do run length encoding: if opacity is zero, the next
// value in the mask is the number of pixels that can be skipped.
uint16_t * mask_p = mask;
int skip=0;
skip += y0*MYPAINT_TILE_SIZE;
for (int yp = y0; yp <= y1; yp++) {
skip += x0;
int xp;
for (xp = x0; xp <= x1; xp++) {
const float rr = rr_mask[(yp*MYPAINT_TILE_SIZE)+xp];
const float opa = calculate_opa(rr, hardness,
segment1_offset, segment1_slope,
segment2_offset, segment2_slope);
const uint16_t opa_ = opa * (1<<15);
if (!opa_) {
skip++;
} else {
if (skip) {
*mask_p++ = 0;
*mask_p++ = skip*4;
skip = 0;
}
*mask_p++ = opa_;
}
}
skip += MYPAINT_TILE_SIZE-xp;
}
*mask_p++ = 0;
*mask_p++ = 0;
}
// Must be threadsafe
void
process_op(uint16_t *rgba_p, uint16_t *mask,
int tx, int ty, OperationDataDrawDab *op)
{
// first, we calculate the mask (opacity for each pixel)
render_dab_mask(mask,
op->x - tx*MYPAINT_TILE_SIZE,
op->y - ty*MYPAINT_TILE_SIZE,
op->radius,
op->hardness,
op->aspect_ratio, op->angle
);
// second, we use the mask to stamp a dab for each activated blend mode
if (op->normal) {
if (op->color_a == 1.0) {
draw_dab_pixels_BlendMode_Normal(mask, rgba_p,
op->color_r, op->color_g, op->color_b, op->normal*op->opaque*(1<<15));
} else {
// normal case for brushes that use smudging (eg. watercolor)
draw_dab_pixels_BlendMode_Normal_and_Eraser(mask, rgba_p,
op->color_r, op->color_g, op->color_b, op->color_a*(1<<15), op->normal*op->opaque*(1<<15));
}
}
if (op->lock_alpha) {
draw_dab_pixels_BlendMode_LockAlpha(mask, rgba_p,
op->color_r, op->color_g, op->color_b, op->lock_alpha*op->opaque*(1<<15));
}
if (op->colorize) {
draw_dab_pixels_BlendMode_Color(mask, rgba_p,
op->color_r, op->color_g, op->color_b,
op->colorize*op->opaque*(1<<15));
}
}
// Must be threadsafe
void
process_tile(MyPaintTiledSurface *self, int tx, int ty)
{
TileIndex tile_index = {tx, ty};
OperationDataDrawDab *op = operation_queue_pop(self->operation_queue, tile_index);
if (!op) {
return;
}
MyPaintTileRequest request_data;
const int mipmap_level = 0;
mypaint_tile_request_init(&request_data, mipmap_level, tx, ty, FALSE);
mypaint_tiled_surface_tile_request_start(self, &request_data);
uint16_t * rgba_p = request_data.buffer;
if (!rgba_p) {
printf("Warning: Unable to get tile!\n");
return;
}
uint16_t mask[MYPAINT_TILE_SIZE*MYPAINT_TILE_SIZE+2*MYPAINT_TILE_SIZE];
while (op) {
process_op(rgba_p, mask, tile_index.x, tile_index.y, op);
free(op);
op = operation_queue_pop(self->operation_queue, tile_index);
}
mypaint_tiled_surface_tile_request_end(self, &request_data);
}
// OPTIMIZE: send a list of the exact changed rects instead of a bounding box
// to minimize the area being composited? Profile to see the effect first.
void
update_dirty_bbox(MyPaintTiledSurface *self, OperationDataDrawDab *op)
{
int bb_x, bb_y, bb_w, bb_h;
float r_fringe = op->radius + 1.0f; // +1.0 should not be required, only to be sure
bb_x = floor (op->x - r_fringe);
bb_y = floor (op->y - r_fringe);
bb_w = floor (op->x + r_fringe) - bb_x + 1;
bb_h = floor (op->y + r_fringe) - bb_y + 1;
mypaint_rectangle_expand_to_include_point(&self->dirty_bbox, bb_x, bb_y);
mypaint_rectangle_expand_to_include_point(&self->dirty_bbox, bb_x+bb_w-1, bb_y+bb_h-1);
}
// returns TRUE if the surface was modified
gboolean draw_dab_internal (MyPaintTiledSurface *self, float x, float y,
float radius,
float color_r, float color_g, float color_b,
float opaque, float hardness,
float color_a,
float aspect_ratio, float angle,
float lock_alpha,
float colorize
)
{
OperationDataDrawDab op_struct;
OperationDataDrawDab *op = &op_struct;
op->x = x;
op->y = y;
op->radius = radius;
op->aspect_ratio = aspect_ratio;
op->angle = angle;
op->opaque = CLAMP(opaque, 0.0f, 1.0f);
op->hardness = CLAMP(hardness, 0.0f, 1.0f);
op->lock_alpha = CLAMP(lock_alpha, 0.0f, 1.0f);
op->colorize = CLAMP(colorize, 0.0f, 1.0f);
if (op->radius < 0.1f) return FALSE; // don't bother with dabs smaller than 0.1 pixel
if (op->hardness == 0.0f) return FALSE; // infintly small center point, fully transparent outside
if (op->opaque == 0.0f) return FALSE;
color_r = CLAMP(color_r, 0.0f, 1.0f);
color_g = CLAMP(color_g, 0.0f, 1.0f);
color_b = CLAMP(color_b, 0.0f, 1.0f);
color_a = CLAMP(color_a, 0.0f, 1.0f);
op->color_r = color_r * (1<<15);
op->color_g = color_g * (1<<15);
op->color_b = color_b * (1<<15);
op->color_a = color_a;
// blending mode preparation
op->normal = 1.0f;
op->normal *= 1.0f-op->lock_alpha;
op->normal *= 1.0f-op->colorize;
if (op->aspect_ratio<1.0f) op->aspect_ratio=1.0f;
// Determine the tiles influenced by operation, and queue it for processing for each tile
float r_fringe = radius + 1.0f; // +1.0 should not be required, only to be sure
int tx1 = floor(floor(x - r_fringe) / MYPAINT_TILE_SIZE);
int tx2 = floor(floor(x + r_fringe) / MYPAINT_TILE_SIZE);
int ty1 = floor(floor(y - r_fringe) / MYPAINT_TILE_SIZE);
int ty2 = floor(floor(y + r_fringe) / MYPAINT_TILE_SIZE);
for (int ty = ty1; ty <= ty2; ty++) {
for (int tx = tx1; tx <= tx2; tx++) {
const TileIndex tile_index = {tx, ty};
OperationDataDrawDab *op_copy = (OperationDataDrawDab *)malloc(sizeof(OperationDataDrawDab));
*op_copy = *op;
operation_queue_add(self->operation_queue, tile_index, op_copy);
}
}
update_dirty_bbox(self, op);
return TRUE;
}
// returns TRUE if the surface was modified
int draw_dab (MyPaintSurface *surface, float x, float y,
float radius,
float color_r, float color_g, float color_b,
float opaque, float hardness,
float color_a,
float aspect_ratio, float angle,
float lock_alpha,
float colorize)
{
MyPaintTiledSurface *self = (MyPaintTiledSurface *)surface;
gboolean surface_modified = FALSE;
// Normal pass
if (draw_dab_internal(self, x, y, radius, color_r, color_g, color_b,
opaque, hardness, color_a, aspect_ratio, angle,
lock_alpha, colorize)) {
surface_modified = TRUE;
}
// Symmetry pass
if(self->surface_do_symmetry) {
const float symm_x = self->surface_center_x + (self->surface_center_x - x);
if (draw_dab_internal(self, symm_x, y, radius, color_r, color_g, color_b,
opaque, hardness, color_a, aspect_ratio, -angle,
lock_alpha, colorize)) {
surface_modified = TRUE;
}
}
return surface_modified;
}
void get_color (MyPaintSurface *surface, float x, float y,
float radius,
float * color_r, float * color_g, float * color_b, float * color_a
)
{
MyPaintTiledSurface *self = (MyPaintTiledSurface *)surface;
if (radius < 1.0f) radius = 1.0f;
const float hardness = 0.5f;
const float aspect_ratio = 1.0f;
const float angle = 0.0f;
float sum_weight, sum_r, sum_g, sum_b, sum_a;
sum_weight = sum_r = sum_g = sum_b = sum_a = 0.0f;
// in case we return with an error
*color_r = 0.0f;
*color_g = 1.0f;
*color_b = 0.0f;
// WARNING: some code duplication with draw_dab
float r_fringe = radius + 1.0f; // +1 should not be required, only to be sure
int tx1 = floor(floor(x - r_fringe) / MYPAINT_TILE_SIZE);
int tx2 = floor(floor(x + r_fringe) / MYPAINT_TILE_SIZE);
int ty1 = floor(floor(y - r_fringe) / MYPAINT_TILE_SIZE);
int ty2 = floor(floor(y + r_fringe) / MYPAINT_TILE_SIZE);
#ifdef _OPENMP
int tiles_n = (tx2 - tx1) * (ty2 - ty1);
#endif
#pragma omp parallel for schedule(static) if(self->threadsafe_tile_requests && tiles_n > 3)
for (int ty = ty1; ty <= ty2; ty++) {
for (int tx = tx1; tx <= tx2; tx++) {
// Flush queued draw_dab operations
process_tile(self, tx, ty);
MyPaintTileRequest request_data;
const int mipmap_level = 0;
mypaint_tile_request_init(&request_data, mipmap_level, tx, ty, TRUE);
mypaint_tiled_surface_tile_request_start(self, &request_data);
uint16_t * rgba_p = request_data.buffer;
if (!rgba_p) {
printf("Warning: Unable to get tile!\n");
break;
}
// first, we calculate the mask (opacity for each pixel)
uint16_t mask[MYPAINT_TILE_SIZE*MYPAINT_TILE_SIZE+2*MYPAINT_TILE_SIZE];
render_dab_mask(mask,
x - tx*MYPAINT_TILE_SIZE,
y - ty*MYPAINT_TILE_SIZE,
radius,
hardness,
aspect_ratio, angle
);
// TODO: try atomic operations instead
#pragma omp critical
{
get_color_pixels_accumulate (mask, rgba_p,
&sum_weight, &sum_r, &sum_g, &sum_b, &sum_a);
}
mypaint_tiled_surface_tile_request_end(self, &request_data);
}
}
assert(sum_weight > 0.0f);
sum_a /= sum_weight;
sum_r /= sum_weight;
sum_g /= sum_weight;
sum_b /= sum_weight;
*color_a = sum_a;
// now un-premultiply the alpha
if (sum_a > 0.0f) {
*color_r = sum_r / sum_a;
*color_g = sum_g / sum_a;
*color_b = sum_b / sum_a;
} else {
// it is all transparent, so don't care about the colors
// (let's make them ugly so bugs will be visible)
*color_r = 0.0f;
*color_g = 1.0f;
*color_b = 0.0f;
}
// fix rounding problems that do happen due to floating point math
*color_r = CLAMP(*color_r, 0.0f, 1.0f);
*color_g = CLAMP(*color_g, 0.0f, 1.0f);
*color_b = CLAMP(*color_b, 0.0f, 1.0f);
*color_a = CLAMP(*color_a, 0.0f, 1.0f);
}
/**
* mypaint_tiled_surface_init: (skip)
*
* Initialize the surface, passing in implementations of the tile backend.
* Note: Only intended to be called from subclasses of #MyPaintTiledSurface
**/
void
mypaint_tiled_surface_init(MyPaintTiledSurface *self,
MyPaintTileRequestStartFunction tile_request_start,
MyPaintTileRequestEndFunction tile_request_end)
{
mypaint_surface_init(&self->parent);
self->parent.draw_dab = draw_dab;
self->parent.get_color = get_color;
self->parent.begin_atomic = begin_atomic_default;
self->parent.end_atomic = end_atomic_default;
self->tile_request_end = tile_request_end;
self->tile_request_start = tile_request_start;
self->tile_size = MYPAINT_TILE_SIZE;
self->threadsafe_tile_requests = FALSE;
self->dirty_bbox.x = 0;
self->dirty_bbox.y = 0;
self->dirty_bbox.width = 0;
self->dirty_bbox.height = 0;
self->surface_do_symmetry = FALSE;
self->surface_center_x = 0.0f;
self->operation_queue = operation_queue_new();
}
/**
* mypaint_tiled_surface_destroy: (skip)
*
* Deallocate resources set up by mypaint_tiled_surface_init()
* Does not free the #MyPaintTiledSurface itself.
* Note: Only intended to be called from subclasses of #MyPaintTiledSurface
*/
void
mypaint_tiled_surface_destroy(MyPaintTiledSurface *self)
{
operation_queue_free(self->operation_queue);
}
|
c55c7aec73df0f31d67fbe39510946453b899e1d.c | #define _POSIX_C_SOURCE 200809L
#include "stdlib.h"
#include "math.h"
#include "sys/time.h"
#include "omp.h"
struct dataobj
{
void *restrict data;
int * size;
int * npsize;
int * dsize;
int * hsize;
int * hofs;
int * oofs;
} ;
struct profiler
{
double section0;
double section1;
double section2;
} ;
int Forward(struct dataobj *restrict damp_vec, const float dt, const float o_x, const float o_y, const float o_z, struct dataobj *restrict rec_vec, struct dataobj *restrict rec_coords_vec, struct dataobj *restrict src_vec, struct dataobj *restrict src_coords_vec, struct dataobj *restrict u_vec, struct dataobj *restrict vp_vec, const int x_M, const int x_m, const int y_M, const int y_m, const int z_M, const int z_m, const int p_rec_M, const int p_rec_m, const int p_src_M, const int p_src_m, const int time_M, const int time_m, struct profiler * timers)
{
float (*restrict damp)[damp_vec->size[1]][damp_vec->size[2]] __attribute__ ((aligned (64))) = (float (*)[damp_vec->size[1]][damp_vec->size[2]]) damp_vec->data;
float (*restrict rec)[rec_vec->size[1]] __attribute__ ((aligned (64))) = (float (*)[rec_vec->size[1]]) rec_vec->data;
float (*restrict rec_coords)[rec_coords_vec->size[1]] __attribute__ ((aligned (64))) = (float (*)[rec_coords_vec->size[1]]) rec_coords_vec->data;
float (*restrict src)[src_vec->size[1]] __attribute__ ((aligned (64))) = (float (*)[src_vec->size[1]]) src_vec->data;
float (*restrict src_coords)[src_coords_vec->size[1]] __attribute__ ((aligned (64))) = (float (*)[src_coords_vec->size[1]]) src_coords_vec->data;
float (*restrict u)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]] __attribute__ ((aligned (64))) = (float (*)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]]) u_vec->data;
float (*restrict vp)[vp_vec->size[1]][vp_vec->size[2]] __attribute__ ((aligned (64))) = (float (*)[vp_vec->size[1]][vp_vec->size[2]]) vp_vec->data;
#pragma omp target enter data map(to: rec[0:rec_vec->size[0]][0:rec_vec->size[1]])
#pragma omp target enter data map(to: u[0:u_vec->size[0]][0:u_vec->size[1]][0:u_vec->size[2]][0:u_vec->size[3]])
#pragma omp target enter data map(to: damp[0:damp_vec->size[0]][0:damp_vec->size[1]][0:damp_vec->size[2]])
#pragma omp target enter data map(to: rec_coords[0:rec_coords_vec->size[0]][0:rec_coords_vec->size[1]])
#pragma omp target enter data map(to: src[0:src_vec->size[0]][0:src_vec->size[1]])
#pragma omp target enter data map(to: src_coords[0:src_coords_vec->size[0]][0:src_coords_vec->size[1]])
#pragma omp target enter data map(to: vp[0:vp_vec->size[0]][0:vp_vec->size[1]][0:vp_vec->size[2]])
for (int time = time_m, t0 = (time)%(3), t1 = (time + 1)%(3), t2 = (time + 2)%(3); time <= time_M; time += 1, t0 = (time)%(3), t1 = (time + 1)%(3), t2 = (time + 2)%(3))
{
struct timeval start_section0, end_section0;
gettimeofday(&start_section0, NULL);
/* Begin section0 */
#pragma omp target teams distribute parallel for collapse(3)
for (int x = x_m; x <= x_M; x += 1)
{
for (int y = y_m; y <= y_M; y += 1)
{
for (int z = z_m; z <= z_M; z += 1)
{
float r0 = vp[x + 12][y + 12][z + 12]*vp[x + 12][y + 12][z + 12];
u[t1][x + 12][y + 12][z + 12] = 2.0F*(5.0e-1F*r0*(dt*dt)*(-1.50312647e-7F*(u[t0][x + 6][y + 12][z + 12] + u[t0][x + 12][y + 6][z + 12] + u[t0][x + 12][y + 12][z + 6] + u[t0][x + 12][y + 12][z + 18] + u[t0][x + 12][y + 18][z + 12] + u[t0][x + 18][y + 12][z + 12]) + 2.59740254e-6F*(u[t0][x + 7][y + 12][z + 12] + u[t0][x + 12][y + 7][z + 12] + u[t0][x + 12][y + 12][z + 7] + u[t0][x + 12][y + 12][z + 17] + u[t0][x + 12][y + 17][z + 12] + u[t0][x + 17][y + 12][z + 12]) - 2.23214281e-5F*(u[t0][x + 8][y + 12][z + 12] + u[t0][x + 12][y + 8][z + 12] + u[t0][x + 12][y + 12][z + 8] + u[t0][x + 12][y + 12][z + 16] + u[t0][x + 12][y + 16][z + 12] + u[t0][x + 16][y + 12][z + 12]) + 1.32275129e-4F*(u[t0][x + 9][y + 12][z + 12] + u[t0][x + 12][y + 9][z + 12] + u[t0][x + 12][y + 12][z + 9] + u[t0][x + 12][y + 12][z + 15] + u[t0][x + 12][y + 15][z + 12] + u[t0][x + 15][y + 12][z + 12]) - 6.69642842e-4F*(u[t0][x + 10][y + 12][z + 12] + u[t0][x + 12][y + 10][z + 12] + u[t0][x + 12][y + 12][z + 10] + u[t0][x + 12][y + 12][z + 14] + u[t0][x + 12][y + 14][z + 12] + u[t0][x + 14][y + 12][z + 12]) + 4.28571419e-3F*(u[t0][x + 11][y + 12][z + 12] + u[t0][x + 12][y + 11][z + 12] + u[t0][x + 12][y + 12][z + 11] + u[t0][x + 12][y + 12][z + 13] + u[t0][x + 12][y + 13][z + 12] + u[t0][x + 13][y + 12][z + 12]) - 2.23708328e-2F*u[t0][x + 12][y + 12][z + 12]) + 5.0e-1F*(r0*dt*damp[x + 1][y + 1][z + 1]*u[t0][x + 12][y + 12][z + 12] - u[t2][x + 12][y + 12][z + 12]) + 1.0F*u[t0][x + 12][y + 12][z + 12])/(r0*dt*damp[x + 1][y + 1][z + 1] + 1);
}
}
}
/* End section0 */
gettimeofday(&end_section0, NULL);
timers->section0 += (double)(end_section0.tv_sec-start_section0.tv_sec)+(double)(end_section0.tv_usec-start_section0.tv_usec)/1000000;
struct timeval start_section1, end_section1;
gettimeofday(&start_section1, NULL);
/* Begin section1 */
#pragma omp target teams distribute parallel for collapse(1)
for (int p_src = p_src_m; p_src <= p_src_M; p_src += 1)
{
int ii_src_0 = (int)(floor(-5.0e-2*o_x + 5.0e-2*src_coords[p_src][0]));
int ii_src_1 = (int)(floor(-5.0e-2*o_y + 5.0e-2*src_coords[p_src][1]));
int ii_src_2 = (int)(floor(-5.0e-2*o_z + 5.0e-2*src_coords[p_src][2]));
int ii_src_3 = (int)(floor(-5.0e-2*o_z + 5.0e-2*src_coords[p_src][2])) + 1;
int ii_src_4 = (int)(floor(-5.0e-2*o_y + 5.0e-2*src_coords[p_src][1])) + 1;
int ii_src_5 = (int)(floor(-5.0e-2*o_x + 5.0e-2*src_coords[p_src][0])) + 1;
float px = (float)(-o_x - 2.0e+1F*(int)(floor(-5.0e-2F*o_x + 5.0e-2F*src_coords[p_src][0])) + src_coords[p_src][0]);
float py = (float)(-o_y - 2.0e+1F*(int)(floor(-5.0e-2F*o_y + 5.0e-2F*src_coords[p_src][1])) + src_coords[p_src][1]);
float pz = (float)(-o_z - 2.0e+1F*(int)(floor(-5.0e-2F*o_z + 5.0e-2F*src_coords[p_src][2])) + src_coords[p_src][2]);
if (ii_src_0 >= x_m - 1 && ii_src_1 >= y_m - 1 && ii_src_2 >= z_m - 1 && ii_src_0 <= x_M + 1 && ii_src_1 <= y_M + 1 && ii_src_2 <= z_M + 1)
{
float r1 = (dt*dt)*(vp[ii_src_0 + 12][ii_src_1 + 12][ii_src_2 + 12]*vp[ii_src_0 + 12][ii_src_1 + 12][ii_src_2 + 12])*(-1.25e-4F*px*py*pz + 2.5e-3F*px*py + 2.5e-3F*px*pz - 5.0e-2F*px + 2.5e-3F*py*pz - 5.0e-2F*py - 5.0e-2F*pz + 1)*src[time][p_src];
#pragma omp atomic update
u[t1][ii_src_0 + 12][ii_src_1 + 12][ii_src_2 + 12] += r1;
}
if (ii_src_0 >= x_m - 1 && ii_src_1 >= y_m - 1 && ii_src_3 >= z_m - 1 && ii_src_0 <= x_M + 1 && ii_src_1 <= y_M + 1 && ii_src_3 <= z_M + 1)
{
float r2 = (dt*dt)*(vp[ii_src_0 + 12][ii_src_1 + 12][ii_src_3 + 12]*vp[ii_src_0 + 12][ii_src_1 + 12][ii_src_3 + 12])*(1.25e-4F*px*py*pz - 2.5e-3F*px*pz - 2.5e-3F*py*pz + 5.0e-2F*pz)*src[time][p_src];
#pragma omp atomic update
u[t1][ii_src_0 + 12][ii_src_1 + 12][ii_src_3 + 12] += r2;
}
if (ii_src_0 >= x_m - 1 && ii_src_2 >= z_m - 1 && ii_src_4 >= y_m - 1 && ii_src_0 <= x_M + 1 && ii_src_2 <= z_M + 1 && ii_src_4 <= y_M + 1)
{
float r3 = (dt*dt)*(vp[ii_src_0 + 12][ii_src_4 + 12][ii_src_2 + 12]*vp[ii_src_0 + 12][ii_src_4 + 12][ii_src_2 + 12])*(1.25e-4F*px*py*pz - 2.5e-3F*px*py - 2.5e-3F*py*pz + 5.0e-2F*py)*src[time][p_src];
#pragma omp atomic update
u[t1][ii_src_0 + 12][ii_src_4 + 12][ii_src_2 + 12] += r3;
}
if (ii_src_0 >= x_m - 1 && ii_src_3 >= z_m - 1 && ii_src_4 >= y_m - 1 && ii_src_0 <= x_M + 1 && ii_src_3 <= z_M + 1 && ii_src_4 <= y_M + 1)
{
float r4 = (dt*dt)*(vp[ii_src_0 + 12][ii_src_4 + 12][ii_src_3 + 12]*vp[ii_src_0 + 12][ii_src_4 + 12][ii_src_3 + 12])*(-1.25e-4F*px*py*pz + 2.5e-3F*py*pz)*src[time][p_src];
#pragma omp atomic update
u[t1][ii_src_0 + 12][ii_src_4 + 12][ii_src_3 + 12] += r4;
}
if (ii_src_1 >= y_m - 1 && ii_src_2 >= z_m - 1 && ii_src_5 >= x_m - 1 && ii_src_1 <= y_M + 1 && ii_src_2 <= z_M + 1 && ii_src_5 <= x_M + 1)
{
float r5 = (dt*dt)*(vp[ii_src_5 + 12][ii_src_1 + 12][ii_src_2 + 12]*vp[ii_src_5 + 12][ii_src_1 + 12][ii_src_2 + 12])*(1.25e-4F*px*py*pz - 2.5e-3F*px*py - 2.5e-3F*px*pz + 5.0e-2F*px)*src[time][p_src];
#pragma omp atomic update
u[t1][ii_src_5 + 12][ii_src_1 + 12][ii_src_2 + 12] += r5;
}
if (ii_src_1 >= y_m - 1 && ii_src_3 >= z_m - 1 && ii_src_5 >= x_m - 1 && ii_src_1 <= y_M + 1 && ii_src_3 <= z_M + 1 && ii_src_5 <= x_M + 1)
{
float r6 = (dt*dt)*(vp[ii_src_5 + 12][ii_src_1 + 12][ii_src_3 + 12]*vp[ii_src_5 + 12][ii_src_1 + 12][ii_src_3 + 12])*(-1.25e-4F*px*py*pz + 2.5e-3F*px*pz)*src[time][p_src];
#pragma omp atomic update
u[t1][ii_src_5 + 12][ii_src_1 + 12][ii_src_3 + 12] += r6;
}
if (ii_src_2 >= z_m - 1 && ii_src_4 >= y_m - 1 && ii_src_5 >= x_m - 1 && ii_src_2 <= z_M + 1 && ii_src_4 <= y_M + 1 && ii_src_5 <= x_M + 1)
{
float r7 = (dt*dt)*(vp[ii_src_5 + 12][ii_src_4 + 12][ii_src_2 + 12]*vp[ii_src_5 + 12][ii_src_4 + 12][ii_src_2 + 12])*(-1.25e-4F*px*py*pz + 2.5e-3F*px*py)*src[time][p_src];
#pragma omp atomic update
u[t1][ii_src_5 + 12][ii_src_4 + 12][ii_src_2 + 12] += r7;
}
if (ii_src_3 >= z_m - 1 && ii_src_4 >= y_m - 1 && ii_src_5 >= x_m - 1 && ii_src_3 <= z_M + 1 && ii_src_4 <= y_M + 1 && ii_src_5 <= x_M + 1)
{
float r8 = 1.25e-4F*px*py*pz*(dt*dt)*(vp[ii_src_5 + 12][ii_src_4 + 12][ii_src_3 + 12]*vp[ii_src_5 + 12][ii_src_4 + 12][ii_src_3 + 12])*src[time][p_src];
#pragma omp atomic update
u[t1][ii_src_5 + 12][ii_src_4 + 12][ii_src_3 + 12] += r8;
}
}
/* End section1 */
gettimeofday(&end_section1, NULL);
timers->section1 += (double)(end_section1.tv_sec-start_section1.tv_sec)+(double)(end_section1.tv_usec-start_section1.tv_usec)/1000000;
struct timeval start_section2, end_section2;
gettimeofday(&start_section2, NULL);
/* Begin section2 */
#pragma omp target teams distribute parallel for collapse(1)
for (int p_rec = p_rec_m; p_rec <= p_rec_M; p_rec += 1)
{
int ii_rec_0 = (int)(floor(-5.0e-2*o_x + 5.0e-2*rec_coords[p_rec][0]));
int ii_rec_1 = (int)(floor(-5.0e-2*o_y + 5.0e-2*rec_coords[p_rec][1]));
int ii_rec_2 = (int)(floor(-5.0e-2*o_z + 5.0e-2*rec_coords[p_rec][2]));
int ii_rec_3 = (int)(floor(-5.0e-2*o_z + 5.0e-2*rec_coords[p_rec][2])) + 1;
int ii_rec_4 = (int)(floor(-5.0e-2*o_y + 5.0e-2*rec_coords[p_rec][1])) + 1;
int ii_rec_5 = (int)(floor(-5.0e-2*o_x + 5.0e-2*rec_coords[p_rec][0])) + 1;
float px = (float)(-o_x - 2.0e+1F*(int)(floor(-5.0e-2F*o_x + 5.0e-2F*rec_coords[p_rec][0])) + rec_coords[p_rec][0]);
float py = (float)(-o_y - 2.0e+1F*(int)(floor(-5.0e-2F*o_y + 5.0e-2F*rec_coords[p_rec][1])) + rec_coords[p_rec][1]);
float pz = (float)(-o_z - 2.0e+1F*(int)(floor(-5.0e-2F*o_z + 5.0e-2F*rec_coords[p_rec][2])) + rec_coords[p_rec][2]);
float sum = 0.0F;
if (ii_rec_0 >= x_m - 1 && ii_rec_1 >= y_m - 1 && ii_rec_2 >= z_m - 1 && ii_rec_0 <= x_M + 1 && ii_rec_1 <= y_M + 1 && ii_rec_2 <= z_M + 1)
{
sum += (-1.25e-4F*px*py*pz + 2.5e-3F*px*py + 2.5e-3F*px*pz - 5.0e-2F*px + 2.5e-3F*py*pz - 5.0e-2F*py - 5.0e-2F*pz + 1)*u[t0][ii_rec_0 + 12][ii_rec_1 + 12][ii_rec_2 + 12];
}
if (ii_rec_0 >= x_m - 1 && ii_rec_1 >= y_m - 1 && ii_rec_3 >= z_m - 1 && ii_rec_0 <= x_M + 1 && ii_rec_1 <= y_M + 1 && ii_rec_3 <= z_M + 1)
{
sum += (1.25e-4F*px*py*pz - 2.5e-3F*px*pz - 2.5e-3F*py*pz + 5.0e-2F*pz)*u[t0][ii_rec_0 + 12][ii_rec_1 + 12][ii_rec_3 + 12];
}
if (ii_rec_0 >= x_m - 1 && ii_rec_2 >= z_m - 1 && ii_rec_4 >= y_m - 1 && ii_rec_0 <= x_M + 1 && ii_rec_2 <= z_M + 1 && ii_rec_4 <= y_M + 1)
{
sum += (1.25e-4F*px*py*pz - 2.5e-3F*px*py - 2.5e-3F*py*pz + 5.0e-2F*py)*u[t0][ii_rec_0 + 12][ii_rec_4 + 12][ii_rec_2 + 12];
}
if (ii_rec_0 >= x_m - 1 && ii_rec_3 >= z_m - 1 && ii_rec_4 >= y_m - 1 && ii_rec_0 <= x_M + 1 && ii_rec_3 <= z_M + 1 && ii_rec_4 <= y_M + 1)
{
sum += (-1.25e-4F*px*py*pz + 2.5e-3F*py*pz)*u[t0][ii_rec_0 + 12][ii_rec_4 + 12][ii_rec_3 + 12];
}
if (ii_rec_1 >= y_m - 1 && ii_rec_2 >= z_m - 1 && ii_rec_5 >= x_m - 1 && ii_rec_1 <= y_M + 1 && ii_rec_2 <= z_M + 1 && ii_rec_5 <= x_M + 1)
{
sum += (1.25e-4F*px*py*pz - 2.5e-3F*px*py - 2.5e-3F*px*pz + 5.0e-2F*px)*u[t0][ii_rec_5 + 12][ii_rec_1 + 12][ii_rec_2 + 12];
}
if (ii_rec_1 >= y_m - 1 && ii_rec_3 >= z_m - 1 && ii_rec_5 >= x_m - 1 && ii_rec_1 <= y_M + 1 && ii_rec_3 <= z_M + 1 && ii_rec_5 <= x_M + 1)
{
sum += (-1.25e-4F*px*py*pz + 2.5e-3F*px*pz)*u[t0][ii_rec_5 + 12][ii_rec_1 + 12][ii_rec_3 + 12];
}
if (ii_rec_2 >= z_m - 1 && ii_rec_4 >= y_m - 1 && ii_rec_5 >= x_m - 1 && ii_rec_2 <= z_M + 1 && ii_rec_4 <= y_M + 1 && ii_rec_5 <= x_M + 1)
{
sum += (-1.25e-4F*px*py*pz + 2.5e-3F*px*py)*u[t0][ii_rec_5 + 12][ii_rec_4 + 12][ii_rec_2 + 12];
}
if (ii_rec_3 >= z_m - 1 && ii_rec_4 >= y_m - 1 && ii_rec_5 >= x_m - 1 && ii_rec_3 <= z_M + 1 && ii_rec_4 <= y_M + 1 && ii_rec_5 <= x_M + 1)
{
sum += 1.25e-4F*px*py*pz*u[t0][ii_rec_5 + 12][ii_rec_4 + 12][ii_rec_3 + 12];
}
rec[time][p_rec] = sum;
}
/* End section2 */
gettimeofday(&end_section2, NULL);
timers->section2 += (double)(end_section2.tv_sec-start_section2.tv_sec)+(double)(end_section2.tv_usec-start_section2.tv_usec)/1000000;
}
#pragma omp target update from(rec[0:rec_vec->size[0]][0:rec_vec->size[1]])
#pragma omp target exit data map(release: rec[0:rec_vec->size[0]][0:rec_vec->size[1]])
#pragma omp target update from(u[0:u_vec->size[0]][0:u_vec->size[1]][0:u_vec->size[2]][0:u_vec->size[3]])
#pragma omp target exit data map(release: u[0:u_vec->size[0]][0:u_vec->size[1]][0:u_vec->size[2]][0:u_vec->size[3]])
#pragma omp target exit data map(delete: damp[0:damp_vec->size[0]][0:damp_vec->size[1]][0:damp_vec->size[2]])
#pragma omp target exit data map(delete: rec_coords[0:rec_coords_vec->size[0]][0:rec_coords_vec->size[1]])
#pragma omp target exit data map(delete: src[0:src_vec->size[0]][0:src_vec->size[1]])
#pragma omp target exit data map(delete: src_coords[0:src_coords_vec->size[0]][0:src_coords_vec->size[1]])
#pragma omp target exit data map(delete: vp[0:vp_vec->size[0]][0:vp_vec->size[1]][0:vp_vec->size[2]])
return 0;
}
|
GB_unop__ainv_int16_int16.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__ainv_int16_int16
// op(A') function: GB_unop_tran__ainv_int16_int16
// C type: int16_t
// A type: int16_t
// cast: int16_t cij = aij
// unaryop: cij = -aij
#define GB_ATYPE \
int16_t
#define GB_CTYPE \
int16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CAST(z, aij) \
int16_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int16_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int16_t z = aij ; \
Cx [pC] = -z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_INT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__ainv_int16_int16
(
int16_t *Cx, // Cx and Ax may be aliased
const int16_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int16_t aij = Ax [p] ;
int16_t z = aij ;
Cx [p] = -z ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__ainv_int16_int16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
QLA_D3_V_vpeq_M_times_pV.c | /**************** QLA_D3_V_vpeq_M_times_pV.c ********************/
#include <stdio.h>
#include <qla_config.h>
#include <qla_types.h>
#include <qla_random.h>
#include <qla_cmath.h>
#include <qla_d3.h>
#include <math.h>
static void start_slice(){
__asm__ __volatile__ ("");
}
static void end_slice(){
__asm__ __volatile__ ("");
}
/*
void QLA_D3_V_vpeq_M_times_pV ( QLA_D3_ColorVector *restrict r, QLA_D3_ColorMatrix *restrict a, QLA_D3_ColorVector *restrict *b, int n)
{
// start_slice();
#ifdef HAVE_XLC
#pragma disjoint(*r,*a,**b)
__alignx(16,r);
__alignx(16,a);
#endif
#pragma omp parallel for
for(int i=0; i<n; i++) {
#ifdef HAVE_XLC
__alignx(16,b[i]);
#endif
for(int i_c=0; i_c<3; i_c++) {
QLA_D_Complex x;
QLA_c_eq_c(x,QLA_D3_elem_V(r[i],i_c));
for(int k_c=0; k_c<3; k_c++) {
QLA_c_peq_c_times_c(x, QLA_D3_elem_M(a[i],i_c,k_c), QLA_D3_elem_V(*b[i],k_c));
}
QLA_c_eq_c(QLA_D3_elem_V(r[i],i_c),x);
}
}
// end_slice();
}
*/
|
Example_lastprivate.1.c | /*
* @@name: lastprivate.1c
* @@type: C
* @@compilable: yes
* @@linkable: no
* @@expect: success
*/
void lastpriv (int n, float *a, float *b)
{
int i;
#pragma omp parallel
{
#pragma omp for lastprivate(i)
for (i=0; i<n-1; i++)
a[i] = b[i] + b[i+1];
}
a[i]=b[i]; /* i == n-1 here */
}
|
GB_unop__log2_fp64_fp64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__log2_fp64_fp64)
// op(A') function: GB (_unop_tran__log2_fp64_fp64)
// C type: double
// A type: double
// cast: double cij = aij
// unaryop: cij = log2 (aij)
#define GB_ATYPE \
double
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = log2 (x) ;
// casting
#define GB_CAST(z, aij) \
double z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
double aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
double z = aij ; \
Cx [pC] = log2 (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LOG2 || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__log2_fp64_fp64)
(
double *Cx, // Cx and Ax may be aliased
const double *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
double z = aij ;
Cx [p] = log2 (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
double aij = Ax [p] ;
double z = aij ;
Cx [p] = log2 (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__log2_fp64_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
omp3-3.c | #include<stdio.h>
#ifndef N
#define N 5000
#endif
#define M 1000000000
int a[N][N], b[N][N];
int main() {
int i, j, sum;
#pragma omp parallel sections
{
#pragma omp section
{
int i, j;
for (i = 0; i < N; i++)
for (j = 0; j < N; j++)
a[i][j] = i + j;
}
#pragma omp section
{
int i, j;
for (i = 0; i < N; i++)
for (j = 0; j < N; j++)
b[i][j] = i - j;
}
}
sum = 0;
for (i = 0; i < N; i++)
for (j = 0; j < N; j++) {
sum += a[i][j];
sum %= M;
}
printf("%d\n", sum);
sum = 0;
for (i = 0; i < N; i++)
for (j = 0; j < N; j++) {
sum += b[i][j];
sum %= M;
}
printf("%d\n", sum);
return 0;
}
|
3d25pt.c | /*
* Order-2, 3D 25 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
#ifndef min
#define min(x,y) ((x) < (y)? (x) : (y))
#endif
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
double ***roc2 = (double ***) malloc(sizeof(double**));
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
roc2 = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
roc2[i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
roc2[i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 8;
tile_size[1] = 8;
tile_size[2] = 24;
tile_size[3] = 1024;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
roc2[i][j][k] = 2.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
const double coef0 = -0.28472;
const double coef1 = 0.16000;
const double coef2 = -0.02000;
const double coef3 = 0.00254;
const double coef4 = -0.00018;
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt; t++) {
for (i = 4; i < Nz-4; i++) {
for (j = 4; j < Ny-4; j++) {
for (k = 4; k < Nx-4; k++) {
A[(t+1)%2][i][j][k] = 2.0*A[t%2][i][j][k] - A[(t+1)%2][i][j][k] + roc2[i][j][k]*(
coef0* A[t%2][i ][j ][k ] +
coef1*(A[t%2][i-1][j ][k ] + A[t%2][i+1][j ][k ] +
A[t%2][i ][j-1][k ] + A[t%2][i ][j+1][k ] +
A[t%2][i ][j ][k-1] + A[t%2][i ][j ][k+1]) +
coef2*(A[t%2][i-2][j ][k ] + A[t%2][i+2][j ][k ] +
A[t%2][i ][j-2][k ] + A[t%2][i ][j+2][k ] +
A[t%2][i ][j ][k-2] + A[t%2][i ][j ][k+2]) +
coef3*(A[t%2][i-3][j ][k ] + A[t%2][i+3][j ][k ] +
A[t%2][i ][j-3][k ] + A[t%2][i ][j+3][k ] +
A[t%2][i ][j ][k-3] + A[t%2][i ][j ][k+3]) +
coef4*(A[t%2][i-4][j ][k ] + A[t%2][i+4][j ][k ] +
A[t%2][i ][j-4][k ] + A[t%2][i ][j+4][k ] +
A[t%2][i ][j ][k-4] + A[t%2][i ][j ][k+4]) );
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = MIN(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
free(roc2[i][j]);
}
free(A[0][i]);
free(A[1][i]);
free(roc2[i]);
}
free(A[0]);
free(A[1]);
free(roc2);
return 0;
}
|
params.c | /*
c Ivo Hofacker
Vienna RNA package
*/
#include <config.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include "energy_par.h"
#include "fold_vars.h"
#include "utils.h"
#include "params.h"
/**
*** \file params.c
*** <P>
*** This file provides functions that return temperature scaled energy parameters and
*** Boltzmann weights packed in datastructures
*** </P>
***/
/*@unused@*/
static char rcsid[] UNUSED = "$Id: params.c,v 1.9 2008/07/04 14:29:14 ivo Exp $";
PRIVATE paramT p;
PRIVATE int id=-1;
/* variables for partition function */
PRIVATE pf_paramT pf;
PRIVATE int pf_id=-1;
#ifdef _OPENMP
#pragma omp threadprivate(id, pf_id)
#endif
PUBLIC paramT *scale_parameters(void){
model_detailsT md;
set_model_details(&md);
return get_scaled_parameters(temperature, md);
}
PUBLIC paramT *get_scaled_parameters( double temp,
model_detailsT md){
unsigned int i,j,k,l;
double tempf;
paramT *params;
params = (paramT *)space(sizeof(paramT));
/* store the model details */
params->model_details = md;
params->temperature = temp;
tempf = ((params->temperature+K0)/Tmeasure);
for(i = VRNA_GQUAD_MIN_STACK_SIZE; i <= VRNA_GQUAD_MAX_STACK_SIZE; i++)
for(j = 3*VRNA_GQUAD_MIN_LINKER_LENGTH; j <= 3*VRNA_GQUAD_MAX_LINKER_LENGTH; j++){
double GQuadAlpha_T = (double)GQuadAlphadH - (double)(GQuadAlphadH - GQuadAlpha37) * tempf;
double GQuadBeta_T = (double)GQuadBetadH - (double)(GQuadBetadH - GQuadBeta37) * tempf;
params->gquad[i][j] = (int)GQuadAlpha_T*(i-1) + (int)(((double)GQuadBeta_T)*log(j - 2));
}
for (i=0; i<31; i++)
params->hairpin[i] = hairpindH[i] - (hairpindH[i] - hairpin37[i])*tempf;
for (i=0; i<=MIN2(30,MAXLOOP); i++) {
params->bulge[i] = bulgedH[i] - (bulgedH[i] - bulge37[i]) * tempf;
params->internal_loop[i] = internal_loopdH[i] - (internal_loopdH[i] - internal_loop37[i]) * tempf;
}
params->lxc = lxc37*tempf;
for (; i<=MAXLOOP; i++) {
params->bulge[i] = params->bulge[30]+(int)(params->lxc*log((double)(i)/30.));
params->internal_loop[i] = params->internal_loop[30]+(int)(params->lxc*log((double)(i)/30.));
}
params->ninio[2] = niniodH - (niniodH - ninio37) * tempf;
params->TripleC = TripleCdH - (TripleCdH - TripleC37) * tempf;
params->MultipleCA = MultipleCAdH - (MultipleCAdH - MultipleCA37) * tempf;
params->MultipleCB = MultipleCBdH - (MultipleCBdH - MultipleCB37) * tempf;
for (i=0; (i*7)<strlen(Tetraloops); i++)
params->Tetraloop_E[i] = TetraloopdH[i] - (TetraloopdH[i]-Tetraloop37[i])*tempf;
for (i=0; (i*5)<strlen(Triloops); i++)
params->Triloop_E[i] = TriloopdH[i] - (TriloopdH[i]-Triloop37[i])*tempf;
for (i=0; (i*9)<strlen(Hexaloops); i++)
params->Hexaloop_E[i] = HexaloopdH[i] - (HexaloopdH[i]-Hexaloop37[i])*tempf;
params->TerminalAU = TerminalAUdH - (TerminalAUdH - TerminalAU37) * tempf;
params->DuplexInit = DuplexInitdH - (DuplexInitdH - DuplexInit37) *tempf;
params->MLbase = ML_BASEdH - (ML_BASEdH - ML_BASE37) * tempf;
for (i=0; i<=NBPAIRS; i++)
params->MLintern[i] = ML_interndH - (ML_interndH - ML_intern37) * tempf;
params->MLclosing = ML_closingdH - (ML_closingdH - ML_closing37) * tempf;
/* stacks G(T) = H - [H - G(T0)]*T/T0 */
for (i=0; i<=NBPAIRS; i++)
for (j=0; j<=NBPAIRS; j++)
params->stack[i][j] = stackdH[i][j] - (stackdH[i][j] - stack37[i][j])*tempf;
/* mismatches */
for (i=0; i<=NBPAIRS; i++)
for (j=0; j<5; j++)
for (k=0; k<5; k++) {
int mm;
params->mismatchI[i][j][k] = mismatchIdH[i][j][k] - (mismatchIdH[i][j][k] - mismatchI37[i][j][k])*tempf;
params->mismatchH[i][j][k] = mismatchHdH[i][j][k] - (mismatchHdH[i][j][k] - mismatchH37[i][j][k])*tempf;
params->mismatch1nI[i][j][k] = mismatch1nIdH[i][j][k]-(mismatch1nIdH[i][j][k]-mismatch1nI37[i][j][k])*tempf;/* interior nx1 loops */
params->mismatch23I[i][j][k] = mismatch23IdH[i][j][k]-(mismatch23IdH[i][j][k]-mismatch23I37[i][j][k])*tempf;/* interior 2x3 loops */
if(md.dangles){
mm = mismatchMdH[i][j][k] - (mismatchMdH[i][j][k] - mismatchM37[i][j][k])*tempf;
params->mismatchM[i][j][k] = (mm > 0) ? 0 : mm;
mm = mismatchExtdH[i][j][k] - (mismatchExtdH[i][j][k] - mismatchExt37[i][j][k])*tempf;
params->mismatchExt[i][j][k] = (mm > 0) ? 0 : mm;
}
else{
params->mismatchM[i][j][k] = params->mismatchExt[i][j][k] = 0;
}
}
/* dangles */
for (i=0; i<=NBPAIRS; i++)
for (j=0; j<5; j++) {
int dd;
dd = dangle5_dH[i][j] - (dangle5_dH[i][j] - dangle5_37[i][j])*tempf;
params->dangle5[i][j] = (dd>0) ? 0 : dd; /* must be <= 0 */
dd = dangle3_dH[i][j] - (dangle3_dH[i][j] - dangle3_37[i][j])*tempf;
params->dangle3[i][j] = (dd>0) ? 0 : dd; /* must be <= 0 */
}
/* interior 1x1 loops */
for (i=0; i<=NBPAIRS; i++)
for (j=0; j<=NBPAIRS; j++)
for (k=0; k<5; k++)
for (l=0; l<5; l++)
params->int11[i][j][k][l] = int11_dH[i][j][k][l] - (int11_dH[i][j][k][l] - int11_37[i][j][k][l])*tempf;
/* interior 2x1 loops */
for (i=0; i<=NBPAIRS; i++)
for (j=0; j<=NBPAIRS; j++)
for (k=0; k<5; k++)
for (l=0; l<5; l++) {
int m;
for (m=0; m<5; m++)
params->int21[i][j][k][l][m] = int21_dH[i][j][k][l][m] - (int21_dH[i][j][k][l][m] - int21_37[i][j][k][l][m])*tempf;
}
/* interior 2x2 loops */
for (i=0; i<=NBPAIRS; i++)
for (j=0; j<=NBPAIRS; j++)
for (k=0; k<5; k++)
for (l=0; l<5; l++) {
int m,n;
for (m=0; m<5; m++)
for (n=0; n<5; n++)
params->int22[i][j][k][l][m][n] = int22_dH[i][j][k][l][m][n] - (int22_dH[i][j][k][l][m][n]-int22_37[i][j][k][l][m][n])*tempf;
}
strncpy(params->Tetraloops, Tetraloops, 281);
strncpy(params->Triloops, Triloops, 241);
strncpy(params->Hexaloops, Hexaloops, 361);
params->id = ++id;
return params;
}
/*------------------------------------------------------------------------*/
#define SCALE 10
/**
*** dangling ends should never be destabilizing, i.e. expdangle>=1<BR>
*** specific heat needs smooth function (2nd derivative)<BR>
*** we use a*(sin(x+b)+1)^2, with a=2/(3*sqrt(3)), b=Pi/6-sqrt(3)/2,
*** in the interval b<x<sqrt(3)/2
*/
#define SMOOTH(X) ((X)/SCALE<-1.2283697)?0:(((X)/SCALE>0.8660254)?(X):\
SCALE*0.38490018*(sin((X)/SCALE-0.34242663)+1)*(sin((X)/SCALE-0.34242663)+1))
/* #define SMOOTH(X) ((X)<0 ? 0 : (X)) */
PUBLIC pf_paramT *get_scaled_pf_parameters(void){
model_detailsT md;
set_model_details(&md);
return get_boltzmann_factors(temperature, 1.0, md, pf_scale);
}
PUBLIC pf_paramT *get_boltzmann_factors(double temp,
double betaScale,
model_detailsT md,
double pf_scale){
unsigned int i, j, k, l;
double kT, TT;
double GT;
pf_paramT *pf;
pf = (pf_paramT *)space(sizeof(pf_paramT));
pf->model_details = md;
pf->temperature = temp;
pf->alpha = betaScale;
pf->kT = kT = betaScale*(temp+K0)*GASCONST; /* kT in cal/mol */
pf->pf_scale = pf_scale;
TT = (temp+K0)/(Tmeasure);
for(i = VRNA_GQUAD_MIN_STACK_SIZE; i <= VRNA_GQUAD_MAX_STACK_SIZE; i++)
for(j = 3*VRNA_GQUAD_MIN_LINKER_LENGTH; j <= 3*VRNA_GQUAD_MAX_LINKER_LENGTH; j++){
double GQuadAlpha_T = (double)GQuadAlphadH - (double)(GQuadAlphadH - GQuadAlpha37) * TT;
double GQuadBeta_T = (double)GQuadBetadH - (double)(GQuadBetadH - GQuadBeta37) * TT;
GT = ((double)GQuadAlpha_T)*((double)(i-1)) + ((double)GQuadBeta_T)*log(((double)j) - 2.);
pf->expgquad[i][j] = exp( -GT*10./kT);
}
/* loop energies: hairpins, bulges, interior, mulit-loops */
for (i=0; i<31; i++){
GT = hairpindH[i] - (hairpindH[i] - hairpin37[i])*TT;
pf->exphairpin[i] = exp( -GT*10./kT);
}
for (i=0; i<=MIN2(30, MAXLOOP); i++) {
GT = bulgedH[i]- (bulgedH[i] - bulge37[i])*TT;
pf->expbulge[i] = exp( -GT*10./kT);
GT = internal_loopdH[i] - (internal_loopdH[i] - internal_loop37[i])*TT;
pf->expinternal[i] = exp( -GT*10./kT);
}
/* special case of size 2 interior loops (single mismatch) */
if (james_rule) pf->expinternal[2] = exp ( -80*10./kT);
pf->lxc = lxc37*TT;
GT = DuplexInitdH - (DuplexInitdH - DuplexInit37)*TT;
pf->expDuplexInit = exp( -GT*10./kT);
for (i=31; i<=MAXLOOP; i++) {
GT = bulge37[30]*TT + (pf->lxc*log( i/30.));
pf->expbulge[i] = exp( -GT*10./kT);
GT = internal_loop37[30]*TT + (pf->lxc*log( i/30.));
pf->expinternal[i] = exp( -GT*10./kT);
}
GT = niniodH - (niniodH - ninio37)*TT;
for (j=0; j<=MAXLOOP; j++)
pf->expninio[2][j]=exp(-MIN2(MAX_NINIO,j*GT)*10./kT);
for (i=0; (i*7)<strlen(Tetraloops); i++) {
GT = TetraloopdH[i] - (TetraloopdH[i]-Tetraloop37[i])*TT;
pf->exptetra[i] = exp( -GT*10./kT);
}
for (i=0; (i*5)<strlen(Triloops); i++) {
GT = TriloopdH[i] - (TriloopdH[i]-Triloop37[i])*TT;
pf->exptri[i] = exp( -GT*10./kT);
}
for (i=0; (i*9)<strlen(Hexaloops); i++) {
GT = HexaloopdH[i] - (HexaloopdH[i]-Hexaloop37[i])*TT;
pf->exphex[i] = exp( -GT*10./kT);
}
GT = ML_closingdH - (ML_closingdH - ML_closing37)*TT;
pf->expMLclosing = exp( -GT*10./kT);
for (i=0; i<=NBPAIRS; i++) {
GT = ML_interndH - (ML_interndH - ML_intern37)*TT;
/* if (i>2) GT += TerminalAU; */
pf->expMLintern[i] = exp( -GT*10./kT);
}
GT = TerminalAUdH - (TerminalAUdH - TerminalAU37)*TT;
pf->expTermAU = exp(-GT*10./kT);
GT = ML_BASEdH - (ML_BASEdH - ML_BASE37)*TT;
pf->expMLbase=exp(-10.*GT/kT);
/* if dangles==0 just set their energy to 0,
don't let dangle energies become > 0 (at large temps),
but make sure go smoothly to 0 */
for (i=0; i<=NBPAIRS; i++)
for (j=0; j<=4; j++) {
if (md.dangles) {
GT = dangle5_dH[i][j] - (dangle5_dH[i][j] - dangle5_37[i][j])*TT;
pf->expdangle5[i][j] = exp(SMOOTH(-GT)*10./kT);
GT = dangle3_dH[i][j] - (dangle3_dH[i][j] - dangle3_37[i][j])*TT;
pf->expdangle3[i][j] = exp(SMOOTH(-GT)*10./kT);
} else
pf->expdangle3[i][j] = pf->expdangle5[i][j] = 1;
}
/* stacking energies */
for (i=0; i<=NBPAIRS; i++)
for (j=0; j<=NBPAIRS; j++) {
GT = stackdH[i][j] - (stackdH[i][j] - stack37[i][j])*TT;
pf->expstack[i][j] = exp( -GT*10./kT);
}
/* mismatch energies */
for (i=0; i<=NBPAIRS; i++)
for (j=0; j<5; j++)
for (k=0; k<5; k++) {
GT = mismatchIdH[i][j][k] - ( mismatchIdH[i][j][k] - mismatchI37[i][j][k])*TT;
pf->expmismatchI[i][j][k] = exp(-GT*10.0/kT);
GT = mismatch1nIdH[i][j][k] - (mismatch1nIdH[i][j][k] - mismatch1nI37[i][j][k])*TT;
pf->expmismatch1nI[i][j][k] = exp(-GT*10.0/kT);
GT = mismatchHdH[i][j][k] - (mismatchHdH[i][j][k] - mismatchH37[i][j][k])*TT;
pf->expmismatchH[i][j][k] = exp(-GT*10.0/kT);
if (md.dangles) {
GT = mismatchMdH[i][j][k] - (mismatchMdH[i][j][k] - mismatchM37[i][j][k])*TT;
pf->expmismatchM[i][j][k] = exp(SMOOTH(-GT)*10.0/kT);
GT = mismatchExtdH[i][j][k] - (mismatchExtdH[i][j][k] - mismatchExt37[i][j][k])*TT;
pf->expmismatchExt[i][j][k] = exp(SMOOTH(-GT)*10.0/kT);
}
else{
pf->expmismatchM[i][j][k] = pf->expmismatchExt[i][j][k] = 1.;
}
GT = mismatch23IdH[i][j][k] - (mismatch23IdH[i][j][k] - mismatch23I37[i][j][k])*TT;
pf->expmismatch23I[i][j][k] = exp(-GT*10.0/kT);
}
/* interior lops of length 2 */
for (i=0; i<=NBPAIRS; i++)
for (j=0; j<=NBPAIRS; j++)
for (k=0; k<5; k++)
for (l=0; l<5; l++) {
GT = int11_dH[i][j][k][l] -
(int11_dH[i][j][k][l] - int11_37[i][j][k][l])*TT;
pf->expint11[i][j][k][l] = exp(-GT*10./kT);
}
/* interior 2x1 loops */
for (i=0; i<=NBPAIRS; i++)
for (j=0; j<=NBPAIRS; j++)
for (k=0; k<5; k++)
for (l=0; l<5; l++) {
int m;
for (m=0; m<5; m++) {
GT = int21_dH[i][j][k][l][m] -
(int21_dH[i][j][k][l][m] - int21_37[i][j][k][l][m])*TT;
pf->expint21[i][j][k][l][m] = exp(-GT*10./kT);
}
}
/* interior 2x2 loops */
for (i=0; i<=NBPAIRS; i++)
for (j=0; j<=NBPAIRS; j++)
for (k=0; k<5; k++)
for (l=0; l<5; l++) {
int m,n;
for (m=0; m<5; m++)
for (n=0; n<5; n++) {
GT = int22_dH[i][j][k][l][m][n] -
(int22_dH[i][j][k][l][m][n]-int22_37[i][j][k][l][m][n])*TT;
pf->expint22[i][j][k][l][m][n] = exp(-GT*10./kT);
}
}
strncpy(pf->Tetraloops, Tetraloops, 281);
strncpy(pf->Triloops, Triloops, 241);
strncpy(pf->Hexaloops, Hexaloops, 361);
return pf;
}
PUBLIC pf_paramT *get_scaled_alipf_parameters(unsigned int n_seq){
model_detailsT md;
set_model_details(&md);
return get_boltzmann_factors_ali(n_seq, temperature, 1.0, md, pf_scale);
}
PUBLIC pf_paramT *get_boltzmann_factors_ali(unsigned int n_seq,
double temperature,
double betaScale,
model_detailsT md,
double pf_scale){
/* scale energy parameters and pre-calculate Boltzmann weights */
unsigned int i, j, k, l;
double kTn, TT;
double GT;
pf_paramT *pf;
pf = (pf_paramT *)space(sizeof(pf_paramT));
pf->model_details = md;
pf->alpha = betaScale;
pf->temperature = temperature;
pf->pf_scale = pf_scale;
pf->kT = kTn = ((double)n_seq)*betaScale*(temperature+K0)*GASCONST; /* kT in cal/mol */
TT = (temperature+K0)/(Tmeasure);
/* loop energies: hairpins, bulges, interior, mulit-loops */
for (i=0; i<31; i++) {
GT = hairpindH[i] - (hairpindH[i] - hairpin37[i])*TT;
pf->exphairpin[i] = exp( -GT*10./kTn);
}
/*add penalty for too short hairpins*/
for (i=0; i<3; i++) {
GT= 600/*Penalty*/*TT;
pf->exphairpin[i] = exp( -GT*10./kTn);
}
for (i=0; i<=MIN2(30, MAXLOOP); i++) {
GT = bulgedH[i]- (bulgedH[i] - bulge37[i])*TT;
pf->expbulge[i] = exp( -GT*10./kTn);
GT = internal_loopdH[i] - (internal_loopdH[i] - internal_loop37[i])*TT;
pf->expinternal[i] = exp( -GT*10./kTn);
}
/* special case of size 2 interior loops (single mismatch) */
if (james_rule) pf->expinternal[2] = exp ( -80*10./kTn);
pf->lxc = lxc37*TT;
GT = DuplexInitdH - (DuplexInitdH - DuplexInit37)*TT;
pf->expDuplexInit = exp( -GT*10./kTn);
for (i=31; i<=MAXLOOP; i++) {
GT = bulge37[30]*TT + (pf->lxc*log( i/30.));
pf->expbulge[i] = exp( -GT*10./kTn);
GT = internal_loop37[30]*TT + (pf->lxc*log( i/30.));
pf->expinternal[i] = exp( -GT*10./kTn);
}
GT = niniodH - (niniodH - ninio37)*TT;
for (j=0; j<=MAXLOOP; j++)
pf->expninio[2][j]=exp(-MIN2(MAX_NINIO,j*GT)*10./kTn);
for (i=0; (i*7)<strlen(Tetraloops); i++) {
GT = TetraloopdH[i] - (TetraloopdH[i]-Tetraloop37[i])*TT;
pf->exptetra[i] = exp( -GT*10./kTn);
}
for (i=0; (i*5)<strlen(Triloops); i++) {
GT = TriloopdH[i] - (TriloopdH[i]-Triloop37[i])*TT;
pf->exptri[i] = exp( -GT*10./kTn);
}
for (i=0; (i*9)<strlen(Hexaloops); i++) {
GT = HexaloopdH[i] - (HexaloopdH[i]-Hexaloop37[i])*TT;
pf->exphex[i] = exp( -GT*10./kTn);
}
GT = ML_closingdH - (ML_closingdH - ML_closing37)*TT;
pf->expMLclosing = exp( -GT*10./kTn);
for (i=0; i<=NBPAIRS; i++) { /* includes AU penalty */
GT = ML_interndH - (ML_interndH - ML_intern37)*TT;
/* if (i>2) GT += TerminalAU; */
pf->expMLintern[i] = exp( -GT*10./kTn);
}
GT = TerminalAUdH - (TerminalAUdH - TerminalAU37)*TT;
pf->expTermAU = exp(-GT*10./kTn);
GT = ML_BASEdH - (ML_BASEdH - ML_BASE37)*TT;
pf->expMLbase=exp(-10.*GT/(kTn/n_seq));
/* if dangle_model==0 just set their energy to 0,
don't let dangle energies become > 0 (at large temps),
but make sure go smoothly to 0 */
for (i=0; i<=NBPAIRS; i++)
for (j=0; j<=4; j++) {
if (md.dangles) {
GT = dangle5_dH[i][j] - (dangle5_dH[i][j] - dangle5_37[i][j])*TT;
pf->expdangle5[i][j] = exp(SMOOTH(-GT)*10./kTn);
GT = dangle3_dH[i][j] - (dangle3_dH[i][j] - dangle3_37[i][j])*TT;
pf->expdangle3[i][j] = exp(SMOOTH(-GT)*10./kTn);
} else
pf->expdangle3[i][j] = pf->expdangle5[i][j] = 1;
}
/* stacking energies */
for (i=0; i<=NBPAIRS; i++)
for (j=0; j<=NBPAIRS; j++) {
GT = stackdH[i][j] - (stackdH[i][j] - stack37[i][j])*TT;
pf->expstack[i][j] = exp( -GT*10./kTn);
}
/* mismatch energies */
for (i=0; i<=NBPAIRS; i++)
for (j=0; j<5; j++)
for (k=0; k<5; k++) {
GT = mismatchIdH[i][j][k] - ( mismatchIdH[i][j][k] - mismatchI37[i][j][k])*TT;
pf->expmismatchI[i][j][k] = exp(-GT*10.0/kTn);
GT = mismatch1nIdH[i][j][k] - (mismatch1nIdH[i][j][k] - mismatch1nI37[i][j][k])*TT;
pf->expmismatch1nI[i][j][k] = exp(-GT*10.0/kTn);
GT = mismatchHdH[i][j][k] - (mismatchHdH[i][j][k] - mismatchH37[i][j][k])*TT;
pf->expmismatchH[i][j][k] = exp(-GT*10.0/kTn);
if (md.dangles) {
GT = mismatchMdH[i][j][k] - (mismatchMdH[i][j][k] - mismatchM37[i][j][k])*TT;
pf->expmismatchM[i][j][k] = exp(SMOOTH(-GT)*10.0/kTn);
GT = mismatchExtdH[i][j][k] - (mismatchExtdH[i][j][k] - mismatchExt37[i][j][k])*TT;
pf->expmismatchExt[i][j][k] = exp(SMOOTH(-GT)*10.0/kTn);
}
else{
pf->expmismatchM[i][j][k] = pf->expmismatchExt[i][j][k] = 1.;
}
GT = mismatch23IdH[i][j][k] - (mismatch23IdH[i][j][k] - mismatch23I37[i][j][k])*TT;
pf->expmismatch23I[i][j][k] = exp(-GT*10.0/kTn);
}
/* interior lops of length 2 */
for (i=0; i<=NBPAIRS; i++)
for (j=0; j<=NBPAIRS; j++)
for (k=0; k<5; k++)
for (l=0; l<5; l++) {
GT = int11_dH[i][j][k][l] -
(int11_dH[i][j][k][l] - int11_37[i][j][k][l])*TT;
pf->expint11[i][j][k][l] = exp(-GT*10./kTn);
}
/* interior 2x1 loops */
for (i=0; i<=NBPAIRS; i++)
for (j=0; j<=NBPAIRS; j++)
for (k=0; k<5; k++)
for (l=0; l<5; l++) {
int m;
for (m=0; m<5; m++) {
GT = int21_dH[i][j][k][l][m] -
(int21_dH[i][j][k][l][m] - int21_37[i][j][k][l][m])*TT;
pf->expint21[i][j][k][l][m] = exp(-GT*10./kTn);
}
}
/* interior 2x2 loops */
for (i=0; i<=NBPAIRS; i++)
for (j=0; j<=NBPAIRS; j++)
for (k=0; k<5; k++)
for (l=0; l<5; l++) {
int m,n;
for (m=0; m<5; m++)
for (n=0; n<5; n++) {
GT = int22_dH[i][j][k][l][m][n] -
(int22_dH[i][j][k][l][m][n]-int22_37[i][j][k][l][m][n])*TT;
pf->expint22[i][j][k][l][m][n] = exp(-GT*10./kTn);
}
}
strncpy(pf->Tetraloops, Tetraloops, 281);
strncpy(pf->Triloops, Triloops, 241);
strncpy(pf->Hexaloops, Hexaloops, 361);
return pf;
}
PUBLIC pf_paramT *get_boltzmann_factor_copy(pf_paramT *par){
pf_paramT *copy = NULL;
if(par){
copy = (pf_paramT *) space(sizeof(pf_paramT));
memcpy(copy, par, sizeof(pf_paramT));
}
return copy;
}
PUBLIC paramT *get_parameter_copy(paramT *par){
paramT *copy = NULL;
if(par){
copy = (paramT *) space(sizeof(paramT));
memcpy(copy, par, sizeof(paramT));
}
return copy;
}
/*###########################################*/
/*# deprecated functions below #*/
/*###########################################*/
PUBLIC paramT *copy_parameters(void){
paramT *copy;
if (p.id != id) return scale_parameters();
else{
copy = (paramT *) space(sizeof(paramT));
memcpy(copy, &p, sizeof(paramT));
}
return copy;
}
PUBLIC paramT *set_parameters(paramT *dest){
memcpy(&p, dest, sizeof(paramT));
return &p;
}
PUBLIC pf_paramT *copy_pf_param(void){
pf_paramT *copy, *new;
if (pf.id != pf_id) return get_scaled_pf_parameters();
else{
copy = (pf_paramT *) space(sizeof(pf_paramT));
memcpy(copy, &pf, sizeof(pf_paramT));
}
return copy;
}
PUBLIC pf_paramT *set_pf_param(paramT *dest){
memcpy(&pf, dest, sizeof(pf_paramT));
return &pf;
}
PUBLIC pf_paramT *scale_pf_parameters(void){
return get_scaled_pf_parameters();
}
|
cp-tree.h | /* Definitions for C++ parsing and type checking.
Copyright (C) 1987-2014 Free Software Foundation, Inc.
Contributed by Michael Tiemann (tiemann@cygnus.com)
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
GCC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
#ifndef GCC_CP_TREE_H
#define GCC_CP_TREE_H
#include "ggc.h"
#include "function.h"
#include "hashtab.h"
#include "vec.h"
/* In order for the format checking to accept the C++ front end
diagnostic framework extensions, you must include this file before
diagnostic-core.h, not after. We override the definition of GCC_DIAG_STYLE
in c-common.h. */
#undef GCC_DIAG_STYLE
#define GCC_DIAG_STYLE __gcc_cxxdiag__
#if defined(GCC_DIAGNOSTIC_CORE_H) || defined (GCC_C_COMMON_H)
#error \
In order for the format checking to accept the C++ front end diagnostic \
framework extensions, you must include this file before diagnostic-core.h and \
c-common.h, not after.
#endif
#include "c-family/c-common.h"
#include "diagnostic.h"
#include "name-lookup.h"
/* Usage of TREE_LANG_FLAG_?:
0: IDENTIFIER_MARKED (IDENTIFIER_NODEs)
NEW_EXPR_USE_GLOBAL (in NEW_EXPR).
DELETE_EXPR_USE_GLOBAL (in DELETE_EXPR).
COMPOUND_EXPR_OVERLOADED (in COMPOUND_EXPR).
TREE_INDIRECT_USING (in NAMESPACE_DECL).
CLEANUP_P (in TRY_BLOCK)
AGGR_INIT_VIA_CTOR_P (in AGGR_INIT_EXPR)
PTRMEM_OK_P (in ADDR_EXPR, OFFSET_REF, SCOPE_REF)
PAREN_STRING_LITERAL (in STRING_CST)
DECL_GNU_TLS_P (in VAR_DECL)
KOENIG_LOOKUP_P (in CALL_EXPR)
STATEMENT_LIST_NO_SCOPE (in STATEMENT_LIST).
EXPR_STMT_STMT_EXPR_RESULT (in EXPR_STMT)
STMT_EXPR_NO_SCOPE (in STMT_EXPR)
BIND_EXPR_TRY_BLOCK (in BIND_EXPR)
TYPENAME_IS_ENUM_P (in TYPENAME_TYPE)
OMP_FOR_GIMPLIFYING_P (in OMP_FOR, OMP_SIMD and OMP_DISTRIBUTE)
BASELINK_QUALIFIED_P (in BASELINK)
TARGET_EXPR_IMPLICIT_P (in TARGET_EXPR)
TEMPLATE_PARM_PARAMETER_PACK (in TEMPLATE_PARM_INDEX)
ATTR_IS_DEPENDENT (in the TREE_LIST for an attribute)
ABI_TAG_IMPLICIT (in the TREE_LIST for the argument of abi_tag)
CONSTRUCTOR_IS_DIRECT_INIT (in CONSTRUCTOR)
LAMBDA_EXPR_CAPTURES_THIS_P (in LAMBDA_EXPR)
DECLTYPE_FOR_LAMBDA_CAPTURE (in DECLTYPE_TYPE)
VEC_INIT_EXPR_IS_CONSTEXPR (in VEC_INIT_EXPR)
DECL_OVERRIDE_P (in FUNCTION_DECL)
IMPLICIT_CONV_EXPR_DIRECT_INIT (in IMPLICIT_CONV_EXPR)
TRANSACTION_EXPR_IS_STMT (in TRANSACTION_EXPR)
CONVERT_EXPR_VBASE_PATH (in CONVERT_EXPR)
OVL_ARG_DEPENDENT (in OVERLOAD)
PACK_EXPANSION_LOCAL_P (in *_PACK_EXPANSION)
TINFO_RECHECK_ACCESS_P (in TEMPLATE_INFO)
SIZEOF_EXPR_TYPE_P (in SIZEOF_EXPR)
1: IDENTIFIER_VIRTUAL_P (in IDENTIFIER_NODE)
TI_PENDING_TEMPLATE_FLAG.
TEMPLATE_PARMS_FOR_INLINE.
DELETE_EXPR_USE_VEC (in DELETE_EXPR).
(TREE_CALLS_NEW) (in _EXPR or _REF) (commented-out).
ICS_ELLIPSIS_FLAG (in _CONV)
DECL_INITIALIZED_P (in VAR_DECL)
TYPENAME_IS_CLASS_P (in TYPENAME_TYPE)
STMT_IS_FULL_EXPR_P (in _STMT)
TARGET_EXPR_LIST_INIT_P (in TARGET_EXPR)
LAMBDA_EXPR_MUTABLE_P (in LAMBDA_EXPR)
DECL_FINAL_P (in FUNCTION_DECL)
QUALIFIED_NAME_IS_TEMPLATE (in SCOPE_REF)
DECLTYPE_FOR_INIT_CAPTURE (in DECLTYPE_TYPE)
2: IDENTIFIER_OPNAME_P (in IDENTIFIER_NODE)
ICS_THIS_FLAG (in _CONV)
DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P (in VAR_DECL)
STATEMENT_LIST_TRY_BLOCK (in STATEMENT_LIST)
TYPENAME_IS_RESOLVING_P (in TYPE_NAME_TYPE)
TARGET_EXPR_DIRECT_INIT_P (in TARGET_EXPR)
FNDECL_USED_AUTO (in FUNCTION_DECL)
DECLTYPE_FOR_LAMBDA_PROXY (in DECLTYPE_TYPE)
REF_PARENTHESIZED_P (in COMPONENT_REF, INDIRECT_REF)
AGGR_INIT_ZERO_FIRST (in AGGR_INIT_EXPR)
3: (TREE_REFERENCE_EXPR) (in NON_LVALUE_EXPR) (commented-out).
ICS_BAD_FLAG (in _CONV)
FN_TRY_BLOCK_P (in TRY_BLOCK)
IDENTIFIER_CTOR_OR_DTOR_P (in IDENTIFIER_NODE)
BIND_EXPR_BODY_BLOCK (in BIND_EXPR)
DECL_NON_TRIVIALLY_INITIALIZED_P (in VAR_DECL)
CALL_EXPR_LIST_INIT_P (in CALL_EXPR, AGGR_INIT_EXPR)
4: TREE_HAS_CONSTRUCTOR (in INDIRECT_REF, SAVE_EXPR, CONSTRUCTOR,
or FIELD_DECL).
IDENTIFIER_TYPENAME_P (in IDENTIFIER_NODE)
DECL_TINFO_P (in VAR_DECL)
FUNCTION_REF_QUALIFIED (in FUNCTION_TYPE, METHOD_TYPE)
5: C_IS_RESERVED_WORD (in IDENTIFIER_NODE)
DECL_VTABLE_OR_VTT_P (in VAR_DECL)
FUNCTION_RVALUE_QUALIFIED (in FUNCTION_TYPE, METHOD_TYPE)
6: IDENTIFIER_REPO_CHOSEN (in IDENTIFIER_NODE)
DECL_CONSTRUCTION_VTABLE_P (in VAR_DECL)
TYPE_MARKED_P (in _TYPE)
RANGE_FOR_IVDEP (in RANGE_FOR_STMT)
Usage of TYPE_LANG_FLAG_?:
0: TYPE_DEPENDENT_P
1: TYPE_HAS_USER_CONSTRUCTOR.
2: unused
3: TYPE_FOR_JAVA.
4: TYPE_HAS_NONTRIVIAL_DESTRUCTOR
5: CLASS_TYPE_P (in RECORD_TYPE and UNION_TYPE)
ENUM_FIXED_UNDERLYING_TYPE_P (in ENUMERAL_TYPE)
AUTO_IS_DECLTYPE (in TEMPLATE_TYPE_PARM)
REFERENCE_VLA_OK (in REFERENCE_TYPE)
6: TYPE_DEPENDENT_P_VALID
Usage of DECL_LANG_FLAG_?:
0: DECL_ERROR_REPORTED (in VAR_DECL).
DECL_TEMPLATE_PARM_P (in PARM_DECL, CONST_DECL, TYPE_DECL, or TEMPLATE_DECL)
DECL_LOCAL_FUNCTION_P (in FUNCTION_DECL)
DECL_MUTABLE_P (in FIELD_DECL)
DECL_DEPENDENT_P (in USING_DECL)
1: C_TYPEDEF_EXPLICITLY_SIGNED (in TYPE_DECL).
DECL_TEMPLATE_INSTANTIATED (in a VAR_DECL or a FUNCTION_DECL)
DECL_MEMBER_TEMPLATE_P (in TEMPLATE_DECL)
USING_DECL_TYPENAME_P (in USING_DECL)
DECL_VLA_CAPTURE_P (in FIELD_DECL)
2: DECL_THIS_EXTERN (in VAR_DECL or FUNCTION_DECL).
DECL_IMPLICIT_TYPEDEF_P (in a TYPE_DECL)
3: DECL_IN_AGGR_P.
4: DECL_C_BIT_FIELD (in a FIELD_DECL)
DECL_ANON_UNION_VAR_P (in a VAR_DECL)
DECL_SELF_REFERENCE_P (in a TYPE_DECL)
DECL_INVALID_OVERRIDER_P (in a FUNCTION_DECL)
5: DECL_INTERFACE_KNOWN.
6: DECL_THIS_STATIC (in VAR_DECL or FUNCTION_DECL).
DECL_FIELD_IS_BASE (in FIELD_DECL)
TYPE_DECL_ALIAS_P (in TYPE_DECL)
7: DECL_DEAD_FOR_LOCAL (in VAR_DECL).
DECL_THUNK_P (in a member FUNCTION_DECL)
DECL_NORMAL_CAPTURE_P (in FIELD_DECL)
8: DECL_DECLARED_CONSTEXPR_P (in VAR_DECL, FUNCTION_DECL)
Usage of language-independent fields in a language-dependent manner:
TYPE_ALIAS_SET
This field is used by TYPENAME_TYPEs, TEMPLATE_TYPE_PARMs, and so
forth as a substitute for the mark bits provided in `lang_type'.
At present, only the six low-order bits are used.
TYPE_LANG_SLOT_1
For an ENUMERAL_TYPE, this is ENUM_TEMPLATE_INFO.
For a FUNCTION_TYPE or METHOD_TYPE, this is TYPE_RAISES_EXCEPTIONS
BINFO_VIRTUALS
For a binfo, this is a TREE_LIST. There is an entry for each
virtual function declared either in BINFO or its direct and
indirect primary bases.
The BV_DELTA of each node gives the amount by which to adjust the
`this' pointer when calling the function. If the method is an
overridden version of a base class method, then it is assumed
that, prior to adjustment, the this pointer points to an object
of the base class.
The BV_VCALL_INDEX of each node, if non-NULL, gives the vtable
index of the vcall offset for this entry.
The BV_FN is the declaration for the virtual function itself.
If BV_LOST_PRIMARY is set, it means that this entry is for a lost
primary virtual base and can be left null in the vtable.
BINFO_VTABLE
This is an expression with POINTER_TYPE that gives the value
to which the vptr should be initialized. Use get_vtbl_decl_for_binfo
to extract the VAR_DECL for the complete vtable.
DECL_VINDEX
This field is NULL for a non-virtual function. For a virtual
function, it is eventually set to an INTEGER_CST indicating the
index in the vtable at which this function can be found. When
a virtual function is declared, but before it is known what
function is overridden, this field is the error_mark_node.
Temporarily, it may be set to a TREE_LIST whose TREE_VALUE is
the virtual function this one overrides, and whose TREE_CHAIN is
the old DECL_VINDEX. */
/* Language-specific tree checkers. */
#define VAR_OR_FUNCTION_DECL_CHECK(NODE) \
TREE_CHECK2(NODE,VAR_DECL,FUNCTION_DECL)
#define TYPE_FUNCTION_OR_TEMPLATE_DECL_CHECK(NODE) \
TREE_CHECK3(NODE,TYPE_DECL,TEMPLATE_DECL,FUNCTION_DECL)
#define TYPE_FUNCTION_OR_TEMPLATE_DECL_P(NODE) \
(TREE_CODE (NODE) == TYPE_DECL || TREE_CODE (NODE) == TEMPLATE_DECL \
|| TREE_CODE (NODE) == FUNCTION_DECL)
#define VAR_FUNCTION_OR_PARM_DECL_CHECK(NODE) \
TREE_CHECK3(NODE,VAR_DECL,FUNCTION_DECL,PARM_DECL)
#define VAR_TEMPL_TYPE_OR_FUNCTION_DECL_CHECK(NODE) \
TREE_CHECK4(NODE,VAR_DECL,FUNCTION_DECL,TYPE_DECL,TEMPLATE_DECL)
#define VAR_TEMPL_TYPE_FIELD_OR_FUNCTION_DECL_CHECK(NODE) \
TREE_CHECK5(NODE,VAR_DECL,FIELD_DECL,FUNCTION_DECL,TYPE_DECL,TEMPLATE_DECL)
#define BOUND_TEMPLATE_TEMPLATE_PARM_TYPE_CHECK(NODE) \
TREE_CHECK(NODE,BOUND_TEMPLATE_TEMPLATE_PARM)
#if defined ENABLE_TREE_CHECKING && (GCC_VERSION >= 2007)
#define THUNK_FUNCTION_CHECK(NODE) __extension__ \
({ __typeof (NODE) const __t = (NODE); \
if (TREE_CODE (__t) != FUNCTION_DECL || !__t->decl_common.lang_specific \
|| !__t->decl_common.lang_specific->u.fn.thunk_p) \
tree_check_failed (__t, __FILE__, __LINE__, __FUNCTION__, 0); \
__t; })
#else
#define THUNK_FUNCTION_CHECK(NODE) (NODE)
#endif
/* Language-dependent contents of an identifier. */
struct GTY(()) lang_identifier {
struct c_common_identifier c_common;
cxx_binding *namespace_bindings;
cxx_binding *bindings;
tree class_template_info;
tree label_value;
};
/* Return a typed pointer version of T if it designates a
C++ front-end identifier. */
inline lang_identifier*
identifier_p (tree t)
{
if (TREE_CODE (t) == IDENTIFIER_NODE)
return (lang_identifier*) t;
return NULL;
}
/* In an IDENTIFIER_NODE, nonzero if this identifier is actually a
keyword. C_RID_CODE (node) is then the RID_* value of the keyword,
and C_RID_YYCODE is the token number wanted by Yacc. */
#define C_IS_RESERVED_WORD(ID) TREE_LANG_FLAG_5 (ID)
#define LANG_IDENTIFIER_CAST(NODE) \
((struct lang_identifier*)IDENTIFIER_NODE_CHECK (NODE))
struct GTY(()) template_parm_index_s {
struct tree_common common;
int index;
int level;
int orig_level;
tree decl;
};
typedef struct template_parm_index_s template_parm_index;
struct GTY(()) ptrmem_cst {
struct tree_common common;
tree member;
};
typedef struct ptrmem_cst * ptrmem_cst_t;
#define IDENTIFIER_GLOBAL_VALUE(NODE) \
namespace_binding ((NODE), global_namespace)
#define SET_IDENTIFIER_GLOBAL_VALUE(NODE, VAL) \
set_namespace_binding ((NODE), global_namespace, (VAL))
#define IDENTIFIER_NAMESPACE_VALUE(NODE) \
namespace_binding ((NODE), current_namespace)
#define SET_IDENTIFIER_NAMESPACE_VALUE(NODE, VAL) \
set_namespace_binding ((NODE), current_namespace, (VAL))
#define CLEANUP_P(NODE) TREE_LANG_FLAG_0 (TRY_BLOCK_CHECK (NODE))
#define BIND_EXPR_TRY_BLOCK(NODE) \
TREE_LANG_FLAG_0 (BIND_EXPR_CHECK (NODE))
/* Used to mark the block around the member initializers and cleanups. */
#define BIND_EXPR_BODY_BLOCK(NODE) \
TREE_LANG_FLAG_3 (BIND_EXPR_CHECK (NODE))
#define FUNCTION_NEEDS_BODY_BLOCK(NODE) \
(DECL_CONSTRUCTOR_P (NODE) || DECL_DESTRUCTOR_P (NODE) \
|| LAMBDA_FUNCTION_P (NODE))
#define STATEMENT_LIST_NO_SCOPE(NODE) \
TREE_LANG_FLAG_0 (STATEMENT_LIST_CHECK (NODE))
#define STATEMENT_LIST_TRY_BLOCK(NODE) \
TREE_LANG_FLAG_2 (STATEMENT_LIST_CHECK (NODE))
/* Nonzero if this statement should be considered a full-expression,
i.e., if temporaries created during this statement should have
their destructors run at the end of this statement. */
#define STMT_IS_FULL_EXPR_P(NODE) TREE_LANG_FLAG_1 ((NODE))
/* Marks the result of a statement expression. */
#define EXPR_STMT_STMT_EXPR_RESULT(NODE) \
TREE_LANG_FLAG_0 (EXPR_STMT_CHECK (NODE))
/* Nonzero if this statement-expression does not have an associated scope. */
#define STMT_EXPR_NO_SCOPE(NODE) \
TREE_LANG_FLAG_0 (STMT_EXPR_CHECK (NODE))
/* Returns nonzero iff TYPE1 and TYPE2 are the same type, in the usual
sense of `same'. */
#define same_type_p(TYPE1, TYPE2) \
comptypes ((TYPE1), (TYPE2), COMPARE_STRICT)
/* Returns nonzero iff NODE is a declaration for the global function
`main'. */
#define DECL_MAIN_P(NODE) \
(DECL_EXTERN_C_FUNCTION_P (NODE) \
&& DECL_NAME (NODE) != NULL_TREE \
&& MAIN_NAME_P (DECL_NAME (NODE)) \
&& flag_hosted)
/* The overloaded FUNCTION_DECL. */
#define OVL_FUNCTION(NODE) \
(((struct tree_overload*)OVERLOAD_CHECK (NODE))->function)
#define OVL_CHAIN(NODE) TREE_CHAIN (NODE)
/* Polymorphic access to FUNCTION and CHAIN. */
#define OVL_CURRENT(NODE) \
((TREE_CODE (NODE) == OVERLOAD) ? OVL_FUNCTION (NODE) : (NODE))
#define OVL_NEXT(NODE) \
((TREE_CODE (NODE) == OVERLOAD) ? TREE_CHAIN (NODE) : NULL_TREE)
/* If set, this was imported in a using declaration.
This is not to confuse with being used somewhere, which
is not important for this node. */
#define OVL_USED(NODE) TREE_USED (OVERLOAD_CHECK (NODE))
/* If set, this OVERLOAD was created for argument-dependent lookup
and can be freed afterward. */
#define OVL_ARG_DEPENDENT(NODE) TREE_LANG_FLAG_0 (OVERLOAD_CHECK (NODE))
struct GTY(()) tree_overload {
struct tree_common common;
tree function;
};
/* Returns true iff NODE is a BASELINK. */
#define BASELINK_P(NODE) \
(TREE_CODE (NODE) == BASELINK)
/* The BINFO indicating the base in which lookup found the
BASELINK_FUNCTIONS. */
#define BASELINK_BINFO(NODE) \
(((struct tree_baselink*) BASELINK_CHECK (NODE))->binfo)
/* The functions referred to by the BASELINK; either a FUNCTION_DECL,
a TEMPLATE_DECL, an OVERLOAD, or a TEMPLATE_ID_EXPR. */
#define BASELINK_FUNCTIONS(NODE) \
(((struct tree_baselink*) BASELINK_CHECK (NODE))->functions)
/* The BINFO in which the search for the functions indicated by this baselink
began. This base is used to determine the accessibility of functions
selected by overload resolution. */
#define BASELINK_ACCESS_BINFO(NODE) \
(((struct tree_baselink*) BASELINK_CHECK (NODE))->access_binfo)
/* For a type-conversion operator, the BASELINK_OPTYPE indicates the type
to which the conversion should occur. This value is important if
the BASELINK_FUNCTIONS include a template conversion operator --
the BASELINK_OPTYPE can be used to determine what type the user
requested. */
#define BASELINK_OPTYPE(NODE) \
(TREE_CHAIN (BASELINK_CHECK (NODE)))
/* Nonzero if this baselink was from a qualified lookup. */
#define BASELINK_QUALIFIED_P(NODE) \
TREE_LANG_FLAG_0 (BASELINK_CHECK (NODE))
struct GTY(()) tree_baselink {
struct tree_common common;
tree binfo;
tree functions;
tree access_binfo;
};
/* The different kinds of ids that we encounter. */
typedef enum cp_id_kind
{
/* Not an id at all. */
CP_ID_KIND_NONE,
/* An unqualified-id that is not a template-id. */
CP_ID_KIND_UNQUALIFIED,
/* An unqualified-id that is a dependent name. */
CP_ID_KIND_UNQUALIFIED_DEPENDENT,
/* An unqualified template-id. */
CP_ID_KIND_TEMPLATE_ID,
/* A qualified-id. */
CP_ID_KIND_QUALIFIED
} cp_id_kind;
/* The various kinds of C++0x warnings we encounter. */
typedef enum cpp0x_warn_str
{
/* extended initializer lists */
CPP0X_INITIALIZER_LISTS,
/* explicit conversion operators */
CPP0X_EXPLICIT_CONVERSION,
/* variadic templates */
CPP0X_VARIADIC_TEMPLATES,
/* lambda expressions */
CPP0X_LAMBDA_EXPR,
/* C++0x auto */
CPP0X_AUTO,
/* scoped enums */
CPP0X_SCOPED_ENUMS,
/* defaulted and deleted functions */
CPP0X_DEFAULTED_DELETED,
/* inline namespaces */
CPP0X_INLINE_NAMESPACES,
/* override controls, override/final */
CPP0X_OVERRIDE_CONTROLS,
/* non-static data member initializers */
CPP0X_NSDMI,
/* user defined literals */
CPP0X_USER_DEFINED_LITERALS,
/* delegating constructors */
CPP0X_DELEGATING_CTORS,
/* inheriting constructors */
CPP0X_INHERITING_CTORS,
/* C++11 attributes */
CPP0X_ATTRIBUTES,
/* ref-qualified member functions */
CPP0X_REF_QUALIFIER
} cpp0x_warn_str;
/* The various kinds of operation used by composite_pointer_type. */
typedef enum composite_pointer_operation
{
/* comparison */
CPO_COMPARISON,
/* conversion */
CPO_CONVERSION,
/* conditional expression */
CPO_CONDITIONAL_EXPR
} composite_pointer_operation;
/* Possible cases of expression list used by build_x_compound_expr_from_list. */
typedef enum expr_list_kind {
ELK_INIT, /* initializer */
ELK_MEM_INIT, /* member initializer */
ELK_FUNC_CAST /* functional cast */
} expr_list_kind;
/* Possible cases of implicit bad rhs conversions. */
typedef enum impl_conv_rhs {
ICR_DEFAULT_ARGUMENT, /* default argument */
ICR_CONVERTING, /* converting */
ICR_INIT, /* initialization */
ICR_ARGPASS, /* argument passing */
ICR_RETURN, /* return */
ICR_ASSIGN /* assignment */
} impl_conv_rhs;
/* Possible cases of implicit or explicit bad conversions to void. */
typedef enum impl_conv_void {
ICV_CAST, /* (explicit) conversion to void */
ICV_SECOND_OF_COND, /* second operand of conditional expression */
ICV_THIRD_OF_COND, /* third operand of conditional expression */
ICV_RIGHT_OF_COMMA, /* right operand of comma operator */
ICV_LEFT_OF_COMMA, /* left operand of comma operator */
ICV_STATEMENT, /* statement */
ICV_THIRD_IN_FOR /* for increment expression */
} impl_conv_void;
/* Possible invalid uses of an abstract class that might not have a
specific associated declaration. */
typedef enum abstract_class_use {
ACU_UNKNOWN, /* unknown or decl provided */
ACU_CAST, /* cast to abstract class */
ACU_NEW, /* new-expression of abstract class */
ACU_THROW, /* throw-expression of abstract class */
ACU_CATCH, /* catch-parameter of abstract class */
ACU_ARRAY, /* array of abstract class */
ACU_RETURN, /* return type of abstract class */
ACU_PARM /* parameter type of abstract class */
} abstract_class_use;
/* Macros for access to language-specific slots in an identifier. */
#define IDENTIFIER_NAMESPACE_BINDINGS(NODE) \
(LANG_IDENTIFIER_CAST (NODE)->namespace_bindings)
#define IDENTIFIER_TEMPLATE(NODE) \
(LANG_IDENTIFIER_CAST (NODE)->class_template_info)
/* The IDENTIFIER_BINDING is the innermost cxx_binding for the
identifier. It's PREVIOUS is the next outermost binding. Each
VALUE field is a DECL for the associated declaration. Thus,
name lookup consists simply of pulling off the node at the front
of the list (modulo oddities for looking up the names of types,
and such.) You can use SCOPE field to determine the scope
that bound the name. */
#define IDENTIFIER_BINDING(NODE) \
(LANG_IDENTIFIER_CAST (NODE)->bindings)
/* TREE_TYPE only indicates on local and class scope the current
type. For namespace scope, the presence of a type in any namespace
is indicated with global_type_node, and the real type behind must
be found through lookup. */
#define IDENTIFIER_TYPE_VALUE(NODE) identifier_type_value (NODE)
#define REAL_IDENTIFIER_TYPE_VALUE(NODE) TREE_TYPE (NODE)
#define SET_IDENTIFIER_TYPE_VALUE(NODE,TYPE) (TREE_TYPE (NODE) = (TYPE))
#define IDENTIFIER_HAS_TYPE_VALUE(NODE) (IDENTIFIER_TYPE_VALUE (NODE) ? 1 : 0)
#define IDENTIFIER_LABEL_VALUE(NODE) \
(LANG_IDENTIFIER_CAST (NODE)->label_value)
#define SET_IDENTIFIER_LABEL_VALUE(NODE, VALUE) \
IDENTIFIER_LABEL_VALUE (NODE) = (VALUE)
/* Nonzero if this identifier is used as a virtual function name somewhere
(optimizes searches). */
#define IDENTIFIER_VIRTUAL_P(NODE) TREE_LANG_FLAG_1 (NODE)
/* Nonzero if this identifier is the prefix for a mangled C++ operator
name. */
#define IDENTIFIER_OPNAME_P(NODE) TREE_LANG_FLAG_2 (NODE)
/* Nonzero if this identifier is the name of a type-conversion
operator. */
#define IDENTIFIER_TYPENAME_P(NODE) \
TREE_LANG_FLAG_4 (NODE)
/* Nonzero if this identifier is the name of a constructor or
destructor. */
#define IDENTIFIER_CTOR_OR_DTOR_P(NODE) \
TREE_LANG_FLAG_3 (NODE)
/* True iff NAME is the DECL_ASSEMBLER_NAME for an entity with vague
linkage which the prelinker has assigned to this translation
unit. */
#define IDENTIFIER_REPO_CHOSEN(NAME) \
(TREE_LANG_FLAG_6 (NAME))
/* In a RECORD_TYPE or UNION_TYPE, nonzero if any component is read-only. */
#define C_TYPE_FIELDS_READONLY(TYPE) \
(LANG_TYPE_CLASS_CHECK (TYPE)->fields_readonly)
/* The tokens stored in the default argument. */
#define DEFARG_TOKENS(NODE) \
(((struct tree_default_arg *)DEFAULT_ARG_CHECK (NODE))->tokens)
#define DEFARG_INSTANTIATIONS(NODE) \
(((struct tree_default_arg *)DEFAULT_ARG_CHECK (NODE))->instantiations)
struct GTY (()) tree_default_arg {
struct tree_common common;
struct cp_token_cache *tokens;
vec<tree, va_gc> *instantiations;
};
#define DEFERRED_NOEXCEPT_PATTERN(NODE) \
(((struct tree_deferred_noexcept *)DEFERRED_NOEXCEPT_CHECK (NODE))->pattern)
#define DEFERRED_NOEXCEPT_ARGS(NODE) \
(((struct tree_deferred_noexcept *)DEFERRED_NOEXCEPT_CHECK (NODE))->args)
#define DEFERRED_NOEXCEPT_SPEC_P(NODE) \
((NODE) && (TREE_PURPOSE (NODE)) \
&& (TREE_CODE (TREE_PURPOSE (NODE)) == DEFERRED_NOEXCEPT \
|| is_overloaded_fn (TREE_PURPOSE (NODE))))
struct GTY (()) tree_deferred_noexcept {
struct tree_base base;
tree pattern;
tree args;
};
/* The condition associated with the static assertion. This must be
an integral constant expression. */
#define STATIC_ASSERT_CONDITION(NODE) \
(((struct tree_static_assert *)STATIC_ASSERT_CHECK (NODE))->condition)
/* The message associated with the static assertion. This must be a
string constant, which will be emitted as an error message when the
static assert condition is false. */
#define STATIC_ASSERT_MESSAGE(NODE) \
(((struct tree_static_assert *)STATIC_ASSERT_CHECK (NODE))->message)
/* Source location information for a static assertion. */
#define STATIC_ASSERT_SOURCE_LOCATION(NODE) \
(((struct tree_static_assert *)STATIC_ASSERT_CHECK (NODE))->location)
struct GTY (()) tree_static_assert {
struct tree_common common;
tree condition;
tree message;
location_t location;
};
struct GTY (()) tree_argument_pack_select {
struct tree_common common;
tree argument_pack;
int index;
};
/* The different kinds of traits that we encounter. */
typedef enum cp_trait_kind
{
CPTK_BASES,
CPTK_DIRECT_BASES,
CPTK_HAS_NOTHROW_ASSIGN,
CPTK_HAS_NOTHROW_CONSTRUCTOR,
CPTK_HAS_NOTHROW_COPY,
CPTK_HAS_TRIVIAL_ASSIGN,
CPTK_HAS_TRIVIAL_CONSTRUCTOR,
CPTK_HAS_TRIVIAL_COPY,
CPTK_HAS_TRIVIAL_DESTRUCTOR,
CPTK_HAS_VIRTUAL_DESTRUCTOR,
CPTK_IS_ABSTRACT,
CPTK_IS_BASE_OF,
CPTK_IS_CLASS,
CPTK_IS_CONVERTIBLE_TO,
CPTK_IS_EMPTY,
CPTK_IS_ENUM,
CPTK_IS_FINAL,
CPTK_IS_LITERAL_TYPE,
CPTK_IS_POD,
CPTK_IS_POLYMORPHIC,
CPTK_IS_STD_LAYOUT,
CPTK_IS_TRIVIAL,
CPTK_IS_UNION,
CPTK_UNDERLYING_TYPE
} cp_trait_kind;
/* The types that we are processing. */
#define TRAIT_EXPR_TYPE1(NODE) \
(((struct tree_trait_expr *)TRAIT_EXPR_CHECK (NODE))->type1)
#define TRAIT_EXPR_TYPE2(NODE) \
(((struct tree_trait_expr *)TRAIT_EXPR_CHECK (NODE))->type2)
/* The specific trait that we are processing. */
#define TRAIT_EXPR_KIND(NODE) \
(((struct tree_trait_expr *)TRAIT_EXPR_CHECK (NODE))->kind)
struct GTY (()) tree_trait_expr {
struct tree_common common;
tree type1;
tree type2;
enum cp_trait_kind kind;
};
/* Based off of TYPE_ANONYMOUS_P. */
#define LAMBDA_TYPE_P(NODE) \
(CLASS_TYPE_P (NODE) && CLASSTYPE_LAMBDA_EXPR (NODE))
/* Test if FUNCTION_DECL is a lambda function. */
#define LAMBDA_FUNCTION_P(FNDECL) \
(DECL_OVERLOADED_OPERATOR_P (FNDECL) == CALL_EXPR \
&& LAMBDA_TYPE_P (CP_DECL_CONTEXT (FNDECL)))
enum cp_lambda_default_capture_mode_type {
CPLD_NONE,
CPLD_COPY,
CPLD_REFERENCE
};
/* The method of default capture, if any. */
#define LAMBDA_EXPR_DEFAULT_CAPTURE_MODE(NODE) \
(((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->default_capture_mode)
/* The capture-list, including `this'. Each capture is stored as a FIELD_DECL
* so that the name, type, and field are all together, whether or not it has
* been added to the lambda's class type.
TREE_LIST:
TREE_PURPOSE: The FIELD_DECL for this capture.
TREE_VALUE: The initializer. This is part of a GNU extension. */
#define LAMBDA_EXPR_CAPTURE_LIST(NODE) \
(((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->capture_list)
/* During parsing of the lambda-introducer, the node in the capture-list
that holds the 'this' capture. During parsing of the body, the
capture proxy for that node. */
#define LAMBDA_EXPR_THIS_CAPTURE(NODE) \
(((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->this_capture)
/* Predicate tracking whether `this' is in the effective capture set. */
#define LAMBDA_EXPR_CAPTURES_THIS_P(NODE) \
LAMBDA_EXPR_THIS_CAPTURE(NODE)
/* Predicate tracking whether the lambda was declared 'mutable'. */
#define LAMBDA_EXPR_MUTABLE_P(NODE) \
TREE_LANG_FLAG_1 (LAMBDA_EXPR_CHECK (NODE))
/* The return type in the expression.
* NULL_TREE indicates that none was specified. */
#define LAMBDA_EXPR_RETURN_TYPE(NODE) \
(((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->return_type)
/* The source location of the lambda. */
#define LAMBDA_EXPR_LOCATION(NODE) \
(((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->locus)
/* The mangling scope for the lambda: FUNCTION_DECL, PARM_DECL, VAR_DECL,
FIELD_DECL or NULL_TREE. If this is NULL_TREE, we have no linkage. */
#define LAMBDA_EXPR_EXTRA_SCOPE(NODE) \
(((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->extra_scope)
/* If EXTRA_SCOPE, this is the number of the lambda within that scope. */
#define LAMBDA_EXPR_DISCRIMINATOR(NODE) \
(((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->discriminator)
/* During parsing of the lambda, a vector of capture proxies which need
to be pushed once we're done processing a nested lambda. */
#define LAMBDA_EXPR_PENDING_PROXIES(NODE) \
(((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->pending_proxies)
/* The closure type of the lambda. Note that the TREE_TYPE of a
LAMBDA_EXPR is always NULL_TREE, because we need to instantiate the
LAMBDA_EXPR in order to instantiate the type. */
#define LAMBDA_EXPR_CLOSURE(NODE) \
(((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->closure)
struct GTY (()) tree_lambda_expr
{
struct tree_typed typed;
tree capture_list;
tree this_capture;
tree return_type;
tree extra_scope;
tree closure;
vec<tree, va_gc> *pending_proxies;
location_t locus;
enum cp_lambda_default_capture_mode_type default_capture_mode;
int discriminator;
};
/* A (typedef,context,usage location) triplet.
It represents a typedef used through a
context at a given source location.
e.g.
struct foo {
typedef int myint;
};
struct bar {
foo::myint v; // #1<-- this location.
};
In bar, the triplet will be (myint, foo, #1).
*/
struct GTY(()) qualified_typedef_usage_s {
tree typedef_decl;
tree context;
location_t locus;
};
typedef struct qualified_typedef_usage_s qualified_typedef_usage_t;
/* Non-zero if this template specialization has access violations that
should be rechecked when the function is instantiated outside argument
deduction. */
#define TINFO_HAS_ACCESS_ERRORS(NODE) \
(TREE_LANG_FLAG_0 (TEMPLATE_INFO_CHECK (NODE)))
#define FNDECL_HAS_ACCESS_ERRORS(NODE) \
(TINFO_HAS_ACCESS_ERRORS (DECL_TEMPLATE_INFO (NODE)))
struct GTY(()) tree_template_info {
struct tree_common common;
vec<qualified_typedef_usage_t, va_gc> *typedefs_needing_access_checking;
};
enum cp_tree_node_structure_enum {
TS_CP_GENERIC,
TS_CP_IDENTIFIER,
TS_CP_TPI,
TS_CP_PTRMEM,
TS_CP_BINDING,
TS_CP_OVERLOAD,
TS_CP_BASELINK,
TS_CP_WRAPPER,
TS_CP_DEFAULT_ARG,
TS_CP_DEFERRED_NOEXCEPT,
TS_CP_STATIC_ASSERT,
TS_CP_ARGUMENT_PACK_SELECT,
TS_CP_TRAIT_EXPR,
TS_CP_LAMBDA_EXPR,
TS_CP_TEMPLATE_INFO,
TS_CP_USERDEF_LITERAL,
LAST_TS_CP_ENUM
};
/* The resulting tree type. */
union GTY((desc ("cp_tree_node_structure (&%h)"),
chain_next ("(union lang_tree_node *) c_tree_chain_next (&%h.generic)"))) lang_tree_node {
union tree_node GTY ((tag ("TS_CP_GENERIC"),
desc ("tree_node_structure (&%h)"))) generic;
struct template_parm_index_s GTY ((tag ("TS_CP_TPI"))) tpi;
struct ptrmem_cst GTY ((tag ("TS_CP_PTRMEM"))) ptrmem;
struct tree_overload GTY ((tag ("TS_CP_OVERLOAD"))) overload;
struct tree_baselink GTY ((tag ("TS_CP_BASELINK"))) baselink;
struct tree_default_arg GTY ((tag ("TS_CP_DEFAULT_ARG"))) default_arg;
struct tree_deferred_noexcept GTY ((tag ("TS_CP_DEFERRED_NOEXCEPT"))) deferred_noexcept;
struct lang_identifier GTY ((tag ("TS_CP_IDENTIFIER"))) identifier;
struct tree_static_assert GTY ((tag ("TS_CP_STATIC_ASSERT")))
static_assertion;
struct tree_argument_pack_select GTY ((tag ("TS_CP_ARGUMENT_PACK_SELECT")))
argument_pack_select;
struct tree_trait_expr GTY ((tag ("TS_CP_TRAIT_EXPR")))
trait_expression;
struct tree_lambda_expr GTY ((tag ("TS_CP_LAMBDA_EXPR")))
lambda_expression;
struct tree_template_info GTY ((tag ("TS_CP_TEMPLATE_INFO")))
template_info;
struct tree_userdef_literal GTY ((tag ("TS_CP_USERDEF_LITERAL")))
userdef_literal;
};
enum cp_tree_index
{
CPTI_JAVA_BYTE_TYPE,
CPTI_JAVA_SHORT_TYPE,
CPTI_JAVA_INT_TYPE,
CPTI_JAVA_LONG_TYPE,
CPTI_JAVA_FLOAT_TYPE,
CPTI_JAVA_DOUBLE_TYPE,
CPTI_JAVA_CHAR_TYPE,
CPTI_JAVA_BOOLEAN_TYPE,
CPTI_WCHAR_DECL,
CPTI_VTABLE_ENTRY_TYPE,
CPTI_DELTA_TYPE,
CPTI_VTABLE_INDEX_TYPE,
CPTI_CLEANUP_TYPE,
CPTI_VTT_PARM_TYPE,
CPTI_CLASS_TYPE,
CPTI_UNKNOWN_TYPE,
CPTI_INIT_LIST_TYPE,
CPTI_VTBL_TYPE,
CPTI_VTBL_PTR_TYPE,
CPTI_STD,
CPTI_ABI,
CPTI_CONST_TYPE_INFO_TYPE,
CPTI_TYPE_INFO_PTR_TYPE,
CPTI_ABORT_FNDECL,
CPTI_GLOBAL_DELETE_FNDECL,
CPTI_AGGR_TAG,
CPTI_CTOR_IDENTIFIER,
CPTI_COMPLETE_CTOR_IDENTIFIER,
CPTI_BASE_CTOR_IDENTIFIER,
CPTI_DTOR_IDENTIFIER,
CPTI_COMPLETE_DTOR_IDENTIFIER,
CPTI_BASE_DTOR_IDENTIFIER,
CPTI_DELETING_DTOR_IDENTIFIER,
CPTI_DELTA_IDENTIFIER,
CPTI_IN_CHARGE_IDENTIFIER,
CPTI_VTT_PARM_IDENTIFIER,
CPTI_NELTS_IDENTIFIER,
CPTI_THIS_IDENTIFIER,
CPTI_PFN_IDENTIFIER,
CPTI_VPTR_IDENTIFIER,
CPTI_STD_IDENTIFIER,
CPTI_LANG_NAME_C,
CPTI_LANG_NAME_CPLUSPLUS,
CPTI_LANG_NAME_JAVA,
CPTI_EMPTY_EXCEPT_SPEC,
CPTI_NOEXCEPT_TRUE_SPEC,
CPTI_NOEXCEPT_FALSE_SPEC,
CPTI_JCLASS,
CPTI_TERMINATE,
CPTI_CALL_UNEXPECTED,
CPTI_ATEXIT_FN_PTR_TYPE,
CPTI_ATEXIT,
CPTI_DSO_HANDLE,
CPTI_DCAST,
CPTI_KEYED_CLASSES,
CPTI_NULLPTR,
CPTI_NULLPTR_TYPE,
CPTI_MAX
};
extern GTY(()) tree cp_global_trees[CPTI_MAX];
#define java_byte_type_node cp_global_trees[CPTI_JAVA_BYTE_TYPE]
#define java_short_type_node cp_global_trees[CPTI_JAVA_SHORT_TYPE]
#define java_int_type_node cp_global_trees[CPTI_JAVA_INT_TYPE]
#define java_long_type_node cp_global_trees[CPTI_JAVA_LONG_TYPE]
#define java_float_type_node cp_global_trees[CPTI_JAVA_FLOAT_TYPE]
#define java_double_type_node cp_global_trees[CPTI_JAVA_DOUBLE_TYPE]
#define java_char_type_node cp_global_trees[CPTI_JAVA_CHAR_TYPE]
#define java_boolean_type_node cp_global_trees[CPTI_JAVA_BOOLEAN_TYPE]
#define wchar_decl_node cp_global_trees[CPTI_WCHAR_DECL]
#define vtable_entry_type cp_global_trees[CPTI_VTABLE_ENTRY_TYPE]
/* The type used to represent an offset by which to adjust the `this'
pointer in pointer-to-member types. */
#define delta_type_node cp_global_trees[CPTI_DELTA_TYPE]
/* The type used to represent an index into the vtable. */
#define vtable_index_type cp_global_trees[CPTI_VTABLE_INDEX_TYPE]
#define class_type_node cp_global_trees[CPTI_CLASS_TYPE]
#define unknown_type_node cp_global_trees[CPTI_UNKNOWN_TYPE]
#define init_list_type_node cp_global_trees[CPTI_INIT_LIST_TYPE]
#define vtbl_type_node cp_global_trees[CPTI_VTBL_TYPE]
#define vtbl_ptr_type_node cp_global_trees[CPTI_VTBL_PTR_TYPE]
#define std_node cp_global_trees[CPTI_STD]
#define abi_node cp_global_trees[CPTI_ABI]
#define const_type_info_type_node cp_global_trees[CPTI_CONST_TYPE_INFO_TYPE]
#define type_info_ptr_type cp_global_trees[CPTI_TYPE_INFO_PTR_TYPE]
#define abort_fndecl cp_global_trees[CPTI_ABORT_FNDECL]
#define global_delete_fndecl cp_global_trees[CPTI_GLOBAL_DELETE_FNDECL]
#define current_aggr cp_global_trees[CPTI_AGGR_TAG]
#define nullptr_node cp_global_trees[CPTI_NULLPTR]
#define nullptr_type_node cp_global_trees[CPTI_NULLPTR_TYPE]
/* We cache these tree nodes so as to call get_identifier less
frequently. */
/* The name of a constructor that takes an in-charge parameter to
decide whether or not to construct virtual base classes. */
#define ctor_identifier cp_global_trees[CPTI_CTOR_IDENTIFIER]
/* The name of a constructor that constructs virtual base classes. */
#define complete_ctor_identifier cp_global_trees[CPTI_COMPLETE_CTOR_IDENTIFIER]
/* The name of a constructor that does not construct virtual base classes. */
#define base_ctor_identifier cp_global_trees[CPTI_BASE_CTOR_IDENTIFIER]
/* The name of a destructor that takes an in-charge parameter to
decide whether or not to destroy virtual base classes and whether
or not to delete the object. */
#define dtor_identifier cp_global_trees[CPTI_DTOR_IDENTIFIER]
/* The name of a destructor that destroys virtual base classes. */
#define complete_dtor_identifier cp_global_trees[CPTI_COMPLETE_DTOR_IDENTIFIER]
/* The name of a destructor that does not destroy virtual base
classes. */
#define base_dtor_identifier cp_global_trees[CPTI_BASE_DTOR_IDENTIFIER]
/* The name of a destructor that destroys virtual base classes, and
then deletes the entire object. */
#define deleting_dtor_identifier cp_global_trees[CPTI_DELETING_DTOR_IDENTIFIER]
#define delta_identifier cp_global_trees[CPTI_DELTA_IDENTIFIER]
#define in_charge_identifier cp_global_trees[CPTI_IN_CHARGE_IDENTIFIER]
/* The name of the parameter that contains a pointer to the VTT to use
for this subobject constructor or destructor. */
#define vtt_parm_identifier cp_global_trees[CPTI_VTT_PARM_IDENTIFIER]
#define nelts_identifier cp_global_trees[CPTI_NELTS_IDENTIFIER]
#define this_identifier cp_global_trees[CPTI_THIS_IDENTIFIER]
#define pfn_identifier cp_global_trees[CPTI_PFN_IDENTIFIER]
#define vptr_identifier cp_global_trees[CPTI_VPTR_IDENTIFIER]
/* The name of the std namespace. */
#define std_identifier cp_global_trees[CPTI_STD_IDENTIFIER]
#define lang_name_c cp_global_trees[CPTI_LANG_NAME_C]
#define lang_name_cplusplus cp_global_trees[CPTI_LANG_NAME_CPLUSPLUS]
#define lang_name_java cp_global_trees[CPTI_LANG_NAME_JAVA]
/* Exception specifier used for throw(). */
#define empty_except_spec cp_global_trees[CPTI_EMPTY_EXCEPT_SPEC]
#define noexcept_true_spec cp_global_trees[CPTI_NOEXCEPT_TRUE_SPEC]
#define noexcept_false_spec cp_global_trees[CPTI_NOEXCEPT_FALSE_SPEC]
/* If non-NULL, a POINTER_TYPE equivalent to (java::lang::Class*). */
#define jclass_node cp_global_trees[CPTI_JCLASS]
/* The declaration for `std::terminate'. */
#define terminate_node cp_global_trees[CPTI_TERMINATE]
/* The declaration for "__cxa_call_unexpected". */
#define call_unexpected_node cp_global_trees[CPTI_CALL_UNEXPECTED]
/* The type of the function-pointer argument to "__cxa_atexit" (or
"std::atexit", if "__cxa_atexit" is not being used). */
#define atexit_fn_ptr_type_node cp_global_trees[CPTI_ATEXIT_FN_PTR_TYPE]
/* A pointer to `std::atexit'. */
#define atexit_node cp_global_trees[CPTI_ATEXIT]
/* A pointer to `__dso_handle'. */
#define dso_handle_node cp_global_trees[CPTI_DSO_HANDLE]
/* The declaration of the dynamic_cast runtime. */
#define dynamic_cast_node cp_global_trees[CPTI_DCAST]
/* The type of a destructor. */
#define cleanup_type cp_global_trees[CPTI_CLEANUP_TYPE]
/* The type of the vtt parameter passed to subobject constructors and
destructors. */
#define vtt_parm_type cp_global_trees[CPTI_VTT_PARM_TYPE]
/* A TREE_LIST of the dynamic classes whose vtables may have to be
emitted in this translation unit. */
#define keyed_classes cp_global_trees[CPTI_KEYED_CLASSES]
/* Node to indicate default access. This must be distinct from the
access nodes in tree.h. */
#define access_default_node null_node
/* Global state. */
struct GTY(()) saved_scope {
vec<cxx_saved_binding, va_gc> *old_bindings;
tree old_namespace;
vec<tree, va_gc> *decl_ns_list;
tree class_name;
tree class_type;
tree access_specifier;
tree function_decl;
vec<tree, va_gc> *lang_base;
tree lang_name;
tree template_parms;
cp_binding_level *x_previous_class_level;
tree x_saved_tree;
/* Only used for uses of this in trailing return type. */
tree x_current_class_ptr;
tree x_current_class_ref;
int x_processing_template_decl;
int x_processing_specialization;
BOOL_BITFIELD x_processing_explicit_instantiation : 1;
BOOL_BITFIELD need_pop_function_context : 1;
int unevaluated_operand;
int inhibit_evaluation_warnings;
/* If non-zero, implicit "omp declare target" attribute is added into the
attribute lists. */
int omp_declare_target_attribute;
struct stmt_tree_s x_stmt_tree;
cp_binding_level *class_bindings;
cp_binding_level *bindings;
struct pointer_map_t *x_local_specializations;
struct saved_scope *prev;
};
/* The current open namespace. */
#define current_namespace scope_chain->old_namespace
/* The stack for namespaces of current declarations. */
#define decl_namespace_list scope_chain->decl_ns_list
/* IDENTIFIER_NODE: name of current class */
#define current_class_name scope_chain->class_name
/* _TYPE: the type of the current class */
#define current_class_type scope_chain->class_type
/* When parsing a class definition, the access specifier most recently
given by the user, or, if no access specifier was given, the
default value appropriate for the kind of class (i.e., struct,
class, or union). */
#define current_access_specifier scope_chain->access_specifier
/* Pointer to the top of the language name stack. */
#define current_lang_base scope_chain->lang_base
#define current_lang_name scope_chain->lang_name
/* When parsing a template declaration, a TREE_LIST represents the
active template parameters. Each node in the list represents one
level of template parameters. The innermost level is first in the
list. The depth of each level is stored as an INTEGER_CST in the
TREE_PURPOSE of each node. The parameters for that level are
stored in the TREE_VALUE. */
#define current_template_parms scope_chain->template_parms
#define processing_template_decl scope_chain->x_processing_template_decl
#define processing_specialization scope_chain->x_processing_specialization
#define processing_explicit_instantiation scope_chain->x_processing_explicit_instantiation
/* RAII sentinel to disable certain warnings during template substitution
and elsewhere. */
struct warning_sentinel
{
int &flag;
int val;
warning_sentinel(int& flag, bool suppress=true)
: flag(flag), val(flag) { if (suppress) flag = 0; }
~warning_sentinel() { flag = val; }
};
/* The cached class binding level, from the most recently exited
class, or NULL if none. */
#define previous_class_level scope_chain->x_previous_class_level
/* A map from local variable declarations in the body of the template
presently being instantiated to the corresponding instantiated
local variables. */
#define local_specializations scope_chain->x_local_specializations
/* A list of private types mentioned, for deferred access checking. */
extern GTY(()) struct saved_scope *scope_chain;
struct GTY(()) cxx_int_tree_map {
unsigned int uid;
tree to;
};
extern unsigned int cxx_int_tree_map_hash (const void *);
extern int cxx_int_tree_map_eq (const void *, const void *);
/* Global state pertinent to the current function. */
struct GTY(()) language_function {
struct c_language_function base;
tree x_cdtor_label;
tree x_current_class_ptr;
tree x_current_class_ref;
tree x_eh_spec_block;
tree x_in_charge_parm;
tree x_vtt_parm;
tree x_return_value;
tree x_auto_return_pattern;
BOOL_BITFIELD returns_value : 1;
BOOL_BITFIELD returns_null : 1;
BOOL_BITFIELD returns_abnormally : 1;
BOOL_BITFIELD infinite_loop: 1;
BOOL_BITFIELD x_in_function_try_handler : 1;
BOOL_BITFIELD x_in_base_initializer : 1;
/* True if this function can throw an exception. */
BOOL_BITFIELD can_throw : 1;
htab_t GTY((param_is(struct named_label_entry))) x_named_labels;
cp_binding_level *bindings;
vec<tree, va_gc> *x_local_names;
/* Tracking possibly infinite loops. This is a vec<tree> only because
vec<bool> doesn't work with gtype. */
vec<tree, va_gc> *infinite_loops;
htab_t GTY((param_is (struct cxx_int_tree_map))) extern_decl_map;
};
/* The current C++-specific per-function global variables. */
#define cp_function_chain (cfun->language)
/* In a constructor destructor, the point at which all derived class
destroying/construction has been done. I.e., just before a
constructor returns, or before any base class destroying will be done
in a destructor. */
#define cdtor_label cp_function_chain->x_cdtor_label
/* When we're processing a member function, current_class_ptr is the
PARM_DECL for the `this' pointer. The current_class_ref is an
expression for `*this'. */
#define current_class_ptr \
(*(cfun && cp_function_chain \
? &cp_function_chain->x_current_class_ptr \
: &scope_chain->x_current_class_ptr))
#define current_class_ref \
(*(cfun && cp_function_chain \
? &cp_function_chain->x_current_class_ref \
: &scope_chain->x_current_class_ref))
/* The EH_SPEC_BLOCK for the exception-specifiers for the current
function, if any. */
#define current_eh_spec_block cp_function_chain->x_eh_spec_block
/* The `__in_chrg' parameter for the current function. Only used for
constructors and destructors. */
#define current_in_charge_parm cp_function_chain->x_in_charge_parm
/* The `__vtt_parm' parameter for the current function. Only used for
constructors and destructors. */
#define current_vtt_parm cp_function_chain->x_vtt_parm
/* Set to 0 at beginning of a function definition, set to 1 if
a return statement that specifies a return value is seen. */
#define current_function_returns_value cp_function_chain->returns_value
/* Set to 0 at beginning of a function definition, set to 1 if
a return statement with no argument is seen. */
#define current_function_returns_null cp_function_chain->returns_null
/* Set to 0 at beginning of a function definition, set to 1 if
a call to a noreturn function is seen. */
#define current_function_returns_abnormally \
cp_function_chain->returns_abnormally
/* Set to 0 at beginning of a function definition, set to 1 if we see an
obvious infinite loop. This can have false positives and false
negatives, so it should only be used as a heuristic. */
#define current_function_infinite_loop cp_function_chain->infinite_loop
/* Nonzero if we are processing a base initializer. Zero elsewhere. */
#define in_base_initializer cp_function_chain->x_in_base_initializer
#define in_function_try_handler cp_function_chain->x_in_function_try_handler
/* Expression always returned from function, or error_mark_node
otherwise, for use by the automatic named return value optimization. */
#define current_function_return_value \
(cp_function_chain->x_return_value)
/* A type involving 'auto' to be used for return type deduction. */
#define current_function_auto_return_pattern \
(cp_function_chain->x_auto_return_pattern)
/* True if NAME is the IDENTIFIER_NODE for an overloaded "operator
new" or "operator delete". */
#define NEW_DELETE_OPNAME_P(NAME) \
((NAME) == ansi_opname (NEW_EXPR) \
|| (NAME) == ansi_opname (VEC_NEW_EXPR) \
|| (NAME) == ansi_opname (DELETE_EXPR) \
|| (NAME) == ansi_opname (VEC_DELETE_EXPR))
#define ansi_opname(CODE) \
(operator_name_info[(int) (CODE)].identifier)
#define ansi_assopname(CODE) \
(assignment_operator_name_info[(int) (CODE)].identifier)
/* TRUE if a tree code represents a statement. */
extern bool statement_code_p[MAX_TREE_CODES];
#define STATEMENT_CODE_P(CODE) statement_code_p[(int) (CODE)]
enum languages { lang_c, lang_cplusplus, lang_java };
/* Macros to make error reporting functions' lives easier. */
#define TYPE_IDENTIFIER(NODE) (DECL_NAME (TYPE_NAME (NODE)))
#define TYPE_LINKAGE_IDENTIFIER(NODE) \
(TYPE_IDENTIFIER (TYPE_MAIN_VARIANT (NODE)))
#define TYPE_NAME_STRING(NODE) (IDENTIFIER_POINTER (TYPE_IDENTIFIER (NODE)))
#define TYPE_NAME_LENGTH(NODE) (IDENTIFIER_LENGTH (TYPE_IDENTIFIER (NODE)))
/* Nonzero if NODE has no name for linkage purposes. */
#define TYPE_ANONYMOUS_P(NODE) \
(OVERLOAD_TYPE_P (NODE) && ANON_AGGRNAME_P (TYPE_LINKAGE_IDENTIFIER (NODE)))
/* The _DECL for this _TYPE. */
#define TYPE_MAIN_DECL(NODE) (TYPE_STUB_DECL (TYPE_MAIN_VARIANT (NODE)))
/* Nonzero if T is a type that could resolve to any kind of concrete type
at instantiation time. */
#define WILDCARD_TYPE_P(T) \
(TREE_CODE (T) == TEMPLATE_TYPE_PARM \
|| TREE_CODE (T) == TYPENAME_TYPE \
|| TREE_CODE (T) == TYPEOF_TYPE \
|| TREE_CODE (T) == BOUND_TEMPLATE_TEMPLATE_PARM \
|| TREE_CODE (T) == DECLTYPE_TYPE)
/* Nonzero if T is a class (or struct or union) type. Also nonzero
for template type parameters, typename types, and instantiated
template template parameters. Keep these checks in ascending code
order. */
#define MAYBE_CLASS_TYPE_P(T) (WILDCARD_TYPE_P (T) || CLASS_TYPE_P (T))
/* Set CLASS_TYPE_P for T to VAL. T must be a class, struct, or
union type. */
#define SET_CLASS_TYPE_P(T, VAL) \
(TYPE_LANG_FLAG_5 (T) = (VAL))
/* Nonzero if T is a class type. Zero for template type parameters,
typename types, and so forth. */
#define CLASS_TYPE_P(T) \
(RECORD_OR_UNION_CODE_P (TREE_CODE (T)) && TYPE_LANG_FLAG_5 (T))
/* Nonzero if T is a class type but not an union. */
#define NON_UNION_CLASS_TYPE_P(T) \
(CLASS_TYPE_P (T) && TREE_CODE (T) != UNION_TYPE)
/* Keep these checks in ascending code order. */
#define RECORD_OR_UNION_CODE_P(T) \
((T) == RECORD_TYPE || (T) == UNION_TYPE)
#define OVERLOAD_TYPE_P(T) \
(CLASS_TYPE_P (T) || TREE_CODE (T) == ENUMERAL_TYPE)
/* True if this a "Java" type, defined in 'extern "Java"'. */
#define TYPE_FOR_JAVA(NODE) TYPE_LANG_FLAG_3 (NODE)
/* True if this type is dependent. This predicate is only valid if
TYPE_DEPENDENT_P_VALID is true. */
#define TYPE_DEPENDENT_P(NODE) TYPE_LANG_FLAG_0 (NODE)
/* True if dependent_type_p has been called for this type, with the
result that TYPE_DEPENDENT_P is valid. */
#define TYPE_DEPENDENT_P_VALID(NODE) TYPE_LANG_FLAG_6(NODE)
/* Nonzero if this type is const-qualified. */
#define CP_TYPE_CONST_P(NODE) \
((cp_type_quals (NODE) & TYPE_QUAL_CONST) != 0)
/* Nonzero if this type is volatile-qualified. */
#define CP_TYPE_VOLATILE_P(NODE) \
((cp_type_quals (NODE) & TYPE_QUAL_VOLATILE) != 0)
/* Nonzero if this type is restrict-qualified. */
#define CP_TYPE_RESTRICT_P(NODE) \
((cp_type_quals (NODE) & TYPE_QUAL_RESTRICT) != 0)
/* Nonzero if this type is const-qualified, but not
volatile-qualified. Other qualifiers are ignored. This macro is
used to test whether or not it is OK to bind an rvalue to a
reference. */
#define CP_TYPE_CONST_NON_VOLATILE_P(NODE) \
((cp_type_quals (NODE) & (TYPE_QUAL_CONST | TYPE_QUAL_VOLATILE)) \
== TYPE_QUAL_CONST)
#define FUNCTION_ARG_CHAIN(NODE) \
TREE_CHAIN (TYPE_ARG_TYPES (TREE_TYPE (NODE)))
/* Given a FUNCTION_DECL, returns the first TREE_LIST out of TYPE_ARG_TYPES
which refers to a user-written parameter. */
#define FUNCTION_FIRST_USER_PARMTYPE(NODE) \
skip_artificial_parms_for ((NODE), TYPE_ARG_TYPES (TREE_TYPE (NODE)))
/* Similarly, but for DECL_ARGUMENTS. */
#define FUNCTION_FIRST_USER_PARM(NODE) \
skip_artificial_parms_for ((NODE), DECL_ARGUMENTS (NODE))
/* Nonzero iff TYPE is derived from PARENT. Ignores accessibility and
ambiguity issues. */
#define DERIVED_FROM_P(PARENT, TYPE) \
(lookup_base ((TYPE), (PARENT), ba_any, NULL, tf_none) != NULL_TREE)
/* Gives the visibility specification for a class type. */
#define CLASSTYPE_VISIBILITY(TYPE) \
DECL_VISIBILITY (TYPE_MAIN_DECL (TYPE))
#define CLASSTYPE_VISIBILITY_SPECIFIED(TYPE) \
DECL_VISIBILITY_SPECIFIED (TYPE_MAIN_DECL (TYPE))
typedef struct GTY (()) tree_pair_s {
tree purpose;
tree value;
} tree_pair_s;
typedef tree_pair_s *tree_pair_p;
/* This is a few header flags for 'struct lang_type'. Actually,
all but the first are used only for lang_type_class; they
are put in this structure to save space. */
struct GTY(()) lang_type_header {
BOOL_BITFIELD is_lang_type_class : 1;
BOOL_BITFIELD has_type_conversion : 1;
BOOL_BITFIELD has_copy_ctor : 1;
BOOL_BITFIELD has_default_ctor : 1;
BOOL_BITFIELD const_needs_init : 1;
BOOL_BITFIELD ref_needs_init : 1;
BOOL_BITFIELD has_const_copy_assign : 1;
BOOL_BITFIELD spare : 1;
};
/* This structure provides additional information above and beyond
what is provide in the ordinary tree_type. In the past, we used it
for the types of class types, template parameters types, typename
types, and so forth. However, there can be many (tens to hundreds
of thousands) of template parameter types in a compilation, and
there's no need for this additional information in that case.
Therefore, we now use this data structure only for class types.
In the past, it was thought that there would be relatively few
class types. However, in the presence of heavy use of templates,
many (i.e., thousands) of classes can easily be generated.
Therefore, we should endeavor to keep the size of this structure to
a minimum. */
struct GTY(()) lang_type_class {
struct lang_type_header h;
unsigned char align;
unsigned has_mutable : 1;
unsigned com_interface : 1;
unsigned non_pod_class : 1;
unsigned nearly_empty_p : 1;
unsigned user_align : 1;
unsigned has_copy_assign : 1;
unsigned has_new : 1;
unsigned has_array_new : 1;
unsigned gets_delete : 2;
unsigned interface_only : 1;
unsigned interface_unknown : 1;
unsigned contains_empty_class_p : 1;
unsigned anon_aggr : 1;
unsigned non_zero_init : 1;
unsigned empty_p : 1;
unsigned vec_new_uses_cookie : 1;
unsigned declared_class : 1;
unsigned diamond_shaped : 1;
unsigned repeated_base : 1;
unsigned being_defined : 1;
unsigned java_interface : 1;
unsigned debug_requested : 1;
unsigned fields_readonly : 1;
unsigned use_template : 2;
unsigned ptrmemfunc_flag : 1;
unsigned was_anonymous : 1;
unsigned lazy_default_ctor : 1;
unsigned lazy_copy_ctor : 1;
unsigned lazy_copy_assign : 1;
unsigned lazy_destructor : 1;
unsigned has_const_copy_ctor : 1;
unsigned has_complex_copy_ctor : 1;
unsigned has_complex_copy_assign : 1;
unsigned non_aggregate : 1;
unsigned has_complex_dflt : 1;
unsigned has_list_ctor : 1;
unsigned non_std_layout : 1;
unsigned is_literal : 1;
unsigned lazy_move_ctor : 1;
unsigned lazy_move_assign : 1;
unsigned has_complex_move_ctor : 1;
unsigned has_complex_move_assign : 1;
unsigned has_constexpr_ctor : 1;
/* When adding a flag here, consider whether or not it ought to
apply to a template instance if it applies to the template. If
so, make sure to copy it in instantiate_class_template! */
/* There are some bits left to fill out a 32-bit word. Keep track
of this by updating the size of this bitfield whenever you add or
remove a flag. */
unsigned dummy : 3;
tree primary_base;
vec<tree_pair_s, va_gc> *vcall_indices;
tree vtables;
tree typeinfo_var;
vec<tree, va_gc> *vbases;
binding_table nested_udts;
tree as_base;
vec<tree, va_gc> *pure_virtuals;
tree friend_classes;
vec<tree, va_gc> * GTY((reorder ("resort_type_method_vec"))) methods;
tree key_method;
tree decl_list;
tree template_info;
tree befriending_classes;
/* In a RECORD_TYPE, information specific to Objective-C++, such
as a list of adopted protocols or a pointer to a corresponding
@interface. See objc/objc-act.h for details. */
tree objc_info;
/* sorted_fields is sorted based on a pointer, so we need to be able
to resort it if pointers get rearranged. */
struct sorted_fields_type * GTY ((reorder ("resort_sorted_fields")))
sorted_fields;
/* FIXME reuse another field? */
tree lambda_expr;
};
struct GTY(()) lang_type_ptrmem {
struct lang_type_header h;
tree record;
};
struct GTY((variable_size)) lang_type {
union lang_type_u
{
struct lang_type_header GTY((skip (""))) h;
struct lang_type_class GTY((tag ("1"))) c;
struct lang_type_ptrmem GTY((tag ("0"))) ptrmem;
} GTY((desc ("%h.h.is_lang_type_class"))) u;
};
#if defined ENABLE_TREE_CHECKING && (GCC_VERSION >= 2007)
#define LANG_TYPE_CLASS_CHECK(NODE) __extension__ \
({ struct lang_type *lt = TYPE_LANG_SPECIFIC (NODE); \
if (! lt->u.h.is_lang_type_class) \
lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \
<->u.c; })
#define LANG_TYPE_PTRMEM_CHECK(NODE) __extension__ \
({ struct lang_type *lt = TYPE_LANG_SPECIFIC (NODE); \
if (lt->u.h.is_lang_type_class) \
lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \
<->u.ptrmem; })
#else
#define LANG_TYPE_CLASS_CHECK(NODE) (&TYPE_LANG_SPECIFIC (NODE)->u.c)
#define LANG_TYPE_PTRMEM_CHECK(NODE) (&TYPE_LANG_SPECIFIC (NODE)->u.ptrmem)
#endif /* ENABLE_TREE_CHECKING */
/* Nonzero for _CLASSTYPE means that operator delete is defined. */
#define TYPE_GETS_DELETE(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->gets_delete)
#define TYPE_GETS_REG_DELETE(NODE) (TYPE_GETS_DELETE (NODE) & 1)
/* Nonzero if `new NODE[x]' should cause the allocation of extra
storage to indicate how many array elements are in use. */
#define TYPE_VEC_NEW_USES_COOKIE(NODE) \
(CLASS_TYPE_P (NODE) \
&& LANG_TYPE_CLASS_CHECK (NODE)->vec_new_uses_cookie)
/* Nonzero means that this _CLASSTYPE node defines ways of converting
itself to other types. */
#define TYPE_HAS_CONVERSION(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->h.has_type_conversion)
/* Nonzero means that NODE (a class type) has a default constructor --
but that it has not yet been declared. */
#define CLASSTYPE_LAZY_DEFAULT_CTOR(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->lazy_default_ctor)
/* Nonzero means that NODE (a class type) has a copy constructor --
but that it has not yet been declared. */
#define CLASSTYPE_LAZY_COPY_CTOR(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->lazy_copy_ctor)
/* Nonzero means that NODE (a class type) has a move constructor --
but that it has not yet been declared. */
#define CLASSTYPE_LAZY_MOVE_CTOR(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->lazy_move_ctor)
/* Nonzero means that NODE (a class type) has an assignment operator
-- but that it has not yet been declared. */
#define CLASSTYPE_LAZY_COPY_ASSIGN(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->lazy_copy_assign)
/* Nonzero means that NODE (a class type) has an assignment operator
-- but that it has not yet been declared. */
#define CLASSTYPE_LAZY_MOVE_ASSIGN(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->lazy_move_assign)
/* Nonzero means that NODE (a class type) has a destructor -- but that
it has not yet been declared. */
#define CLASSTYPE_LAZY_DESTRUCTOR(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->lazy_destructor)
/* Nonzero means that NODE (a class type) is final */
#define CLASSTYPE_FINAL(NODE) \
TYPE_FINAL_P (NODE)
/* Nonzero means that this _CLASSTYPE node overloads operator=(X&). */
#define TYPE_HAS_COPY_ASSIGN(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_copy_assign)
/* True iff the class type NODE has an "operator =" whose parameter
has a parameter of type "const X&". */
#define TYPE_HAS_CONST_COPY_ASSIGN(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->h.has_const_copy_assign)
/* Nonzero means that this _CLASSTYPE node has an X(X&) constructor. */
#define TYPE_HAS_COPY_CTOR(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->h.has_copy_ctor)
#define TYPE_HAS_CONST_COPY_CTOR(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->has_const_copy_ctor)
/* Nonzero if this class has an X(initializer_list<T>) constructor. */
#define TYPE_HAS_LIST_CTOR(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->has_list_ctor)
/* Nonzero if this class has a constexpr constructor other than a copy/move
constructor. Note that a class can have constexpr constructors for
static initialization even if it isn't a literal class. */
#define TYPE_HAS_CONSTEXPR_CTOR(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->has_constexpr_ctor)
/* Nonzero if this class defines an overloaded operator new. (An
operator new [] doesn't count.) */
#define TYPE_HAS_NEW_OPERATOR(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->has_new)
/* Nonzero if this class defines an overloaded operator new[]. */
#define TYPE_HAS_ARRAY_NEW_OPERATOR(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->has_array_new)
/* Nonzero means that this type is being defined. I.e., the left brace
starting the definition of this type has been seen. */
#define TYPE_BEING_DEFINED(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->being_defined)
/* Nonzero means that this type is either complete or being defined, so we
can do lookup in it. */
#define COMPLETE_OR_OPEN_TYPE_P(NODE) \
(COMPLETE_TYPE_P (NODE) || (CLASS_TYPE_P (NODE) && TYPE_BEING_DEFINED (NODE)))
/* Mark bits for repeated base checks. */
#define TYPE_MARKED_P(NODE) TREE_LANG_FLAG_6 (TYPE_CHECK (NODE))
/* Nonzero if the class NODE has multiple paths to the same (virtual)
base object. */
#define CLASSTYPE_DIAMOND_SHAPED_P(NODE) \
(LANG_TYPE_CLASS_CHECK(NODE)->diamond_shaped)
/* Nonzero if the class NODE has multiple instances of the same base
type. */
#define CLASSTYPE_REPEATED_BASE_P(NODE) \
(LANG_TYPE_CLASS_CHECK(NODE)->repeated_base)
/* The member function with which the vtable will be emitted:
the first noninline non-pure-virtual member function. NULL_TREE
if there is no key function or if this is a class template */
#define CLASSTYPE_KEY_METHOD(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->key_method)
/* Vector member functions defined in this class. Each element is
either a FUNCTION_DECL, a TEMPLATE_DECL, or an OVERLOAD. All
functions with the same name end up in the same slot. The first
two elements are for constructors, and destructors, respectively.
All template conversion operators to innermost template dependent
types are overloaded on the next slot, if they exist. Note, the
names for these functions will not all be the same. The
non-template conversion operators & templated conversions to
non-innermost template types are next, followed by ordinary member
functions. There may be empty entries at the end of the vector.
The conversion operators are unsorted. The ordinary member
functions are sorted, once the class is complete. */
#define CLASSTYPE_METHOD_VEC(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->methods)
/* For class templates, this is a TREE_LIST of all member data,
functions, types, and friends in the order of declaration.
The TREE_PURPOSE of each TREE_LIST is NULL_TREE for a friend,
and the RECORD_TYPE for the class template otherwise. */
#define CLASSTYPE_DECL_LIST(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->decl_list)
/* The slot in the CLASSTYPE_METHOD_VEC where constructors go. */
#define CLASSTYPE_CONSTRUCTOR_SLOT 0
/* The slot in the CLASSTYPE_METHOD_VEC where destructors go. */
#define CLASSTYPE_DESTRUCTOR_SLOT 1
/* The first slot in the CLASSTYPE_METHOD_VEC where conversion
operators can appear. */
#define CLASSTYPE_FIRST_CONVERSION_SLOT 2
/* A FUNCTION_DECL or OVERLOAD for the constructors for NODE. These
are the constructors that take an in-charge parameter. */
#define CLASSTYPE_CONSTRUCTORS(NODE) \
((*CLASSTYPE_METHOD_VEC (NODE))[CLASSTYPE_CONSTRUCTOR_SLOT])
/* A FUNCTION_DECL for the destructor for NODE. These are the
destructors that take an in-charge parameter. If
CLASSTYPE_LAZY_DESTRUCTOR is true, then this entry will be NULL
until the destructor is created with lazily_declare_fn. */
#define CLASSTYPE_DESTRUCTORS(NODE) \
(CLASSTYPE_METHOD_VEC (NODE) \
? (*CLASSTYPE_METHOD_VEC (NODE))[CLASSTYPE_DESTRUCTOR_SLOT] \
: NULL_TREE)
/* A dictionary of the nested user-defined-types (class-types, or enums)
found within this class. This table includes nested member class
templates. */
#define CLASSTYPE_NESTED_UTDS(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->nested_udts)
/* Nonzero if NODE has a primary base class, i.e., a base class with
which it shares the virtual function table pointer. */
#define CLASSTYPE_HAS_PRIMARY_BASE_P(NODE) \
(CLASSTYPE_PRIMARY_BINFO (NODE) != NULL_TREE)
/* If non-NULL, this is the binfo for the primary base class, i.e.,
the base class which contains the virtual function table pointer
for this class. */
#define CLASSTYPE_PRIMARY_BINFO(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->primary_base)
/* A vector of BINFOs for the direct and indirect virtual base classes
that this type uses in a post-order depth-first left-to-right
order. (In other words, these bases appear in the order that they
should be initialized.) */
#define CLASSTYPE_VBASECLASSES(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->vbases)
/* The type corresponding to NODE when NODE is used as a base class,
i.e., NODE without virtual base classes. */
#define CLASSTYPE_AS_BASE(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->as_base)
/* True iff NODE is the CLASSTYPE_AS_BASE version of some type. */
#define IS_FAKE_BASE_TYPE(NODE) \
(TREE_CODE (NODE) == RECORD_TYPE \
&& TYPE_CONTEXT (NODE) && CLASS_TYPE_P (TYPE_CONTEXT (NODE)) \
&& CLASSTYPE_AS_BASE (TYPE_CONTEXT (NODE)) == (NODE))
/* These are the size and alignment of the type without its virtual
base classes, for when we use this type as a base itself. */
#define CLASSTYPE_SIZE(NODE) TYPE_SIZE (CLASSTYPE_AS_BASE (NODE))
#define CLASSTYPE_SIZE_UNIT(NODE) TYPE_SIZE_UNIT (CLASSTYPE_AS_BASE (NODE))
#define CLASSTYPE_ALIGN(NODE) TYPE_ALIGN (CLASSTYPE_AS_BASE (NODE))
#define CLASSTYPE_USER_ALIGN(NODE) TYPE_USER_ALIGN (CLASSTYPE_AS_BASE (NODE))
/* The alignment of NODE, without its virtual bases, in bytes. */
#define CLASSTYPE_ALIGN_UNIT(NODE) \
(CLASSTYPE_ALIGN (NODE) / BITS_PER_UNIT)
/* True if this a Java interface type, declared with
'__attribute__ ((java_interface))'. */
#define TYPE_JAVA_INTERFACE(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->java_interface)
/* A vec<tree> of virtual functions which cannot be inherited by
derived classes. When deriving from this type, the derived
class must provide its own definition for each of these functions. */
#define CLASSTYPE_PURE_VIRTUALS(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->pure_virtuals)
/* Nonzero means that this type is an abstract class type. */
#define ABSTRACT_CLASS_TYPE_P(NODE) \
(CLASS_TYPE_P (NODE) && CLASSTYPE_PURE_VIRTUALS(NODE))
/* Nonzero means that this type has an X() constructor. */
#define TYPE_HAS_DEFAULT_CONSTRUCTOR(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->h.has_default_ctor)
/* Nonzero means that this type contains a mutable member. */
#define CLASSTYPE_HAS_MUTABLE(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_mutable)
#define TYPE_HAS_MUTABLE_P(NODE) (cp_has_mutable_p (NODE))
/* Nonzero means that this class type is not POD for the purpose of layout
(as defined in the ABI). This is different from the language's POD. */
#define CLASSTYPE_NON_LAYOUT_POD_P(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->non_pod_class)
/* Nonzero means that this class type is a non-standard-layout class. */
#define CLASSTYPE_NON_STD_LAYOUT(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->non_std_layout)
/* Nonzero means that this class contains pod types whose default
initialization is not a zero initialization (namely, pointers to
data members). */
#define CLASSTYPE_NON_ZERO_INIT_P(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->non_zero_init)
/* Nonzero if this class is "empty" in the sense of the C++ ABI. */
#define CLASSTYPE_EMPTY_P(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->empty_p)
/* Nonzero if this class is "nearly empty", i.e., contains only a
virtual function table pointer. */
#define CLASSTYPE_NEARLY_EMPTY_P(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->nearly_empty_p)
/* Nonzero if this class contains an empty subobject. */
#define CLASSTYPE_CONTAINS_EMPTY_CLASS_P(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->contains_empty_class_p)
/* A list of class types of which this type is a friend. The
TREE_VALUE is normally a TYPE, but will be a TEMPLATE_DECL in the
case of a template friend. */
#define CLASSTYPE_FRIEND_CLASSES(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->friend_classes)
/* A list of the classes which grant friendship to this class. */
#define CLASSTYPE_BEFRIENDING_CLASSES(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->befriending_classes)
/* The associated LAMBDA_EXPR that made this class. */
#define CLASSTYPE_LAMBDA_EXPR(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->lambda_expr)
/* The extra mangling scope for this closure type. */
#define LAMBDA_TYPE_EXTRA_SCOPE(NODE) \
(LAMBDA_EXPR_EXTRA_SCOPE (CLASSTYPE_LAMBDA_EXPR (NODE)))
/* Say whether this node was declared as a "class" or a "struct". */
#define CLASSTYPE_DECLARED_CLASS(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->declared_class)
/* Nonzero if this class has const members
which have no specified initialization. */
#define CLASSTYPE_READONLY_FIELDS_NEED_INIT(NODE) \
(TYPE_LANG_SPECIFIC (NODE) \
? LANG_TYPE_CLASS_CHECK (NODE)->h.const_needs_init : 0)
#define SET_CLASSTYPE_READONLY_FIELDS_NEED_INIT(NODE, VALUE) \
(LANG_TYPE_CLASS_CHECK (NODE)->h.const_needs_init = (VALUE))
/* Nonzero if this class has ref members
which have no specified initialization. */
#define CLASSTYPE_REF_FIELDS_NEED_INIT(NODE) \
(TYPE_LANG_SPECIFIC (NODE) \
? LANG_TYPE_CLASS_CHECK (NODE)->h.ref_needs_init : 0)
#define SET_CLASSTYPE_REF_FIELDS_NEED_INIT(NODE, VALUE) \
(LANG_TYPE_CLASS_CHECK (NODE)->h.ref_needs_init = (VALUE))
/* Nonzero if this class is included from a header file which employs
`#pragma interface', and it is not included in its implementation file. */
#define CLASSTYPE_INTERFACE_ONLY(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->interface_only)
/* True if we have already determined whether or not vtables, VTTs,
typeinfo, and other similar per-class data should be emitted in
this translation unit. This flag does not indicate whether or not
these items should be emitted; it only indicates that we know one
way or the other. */
#define CLASSTYPE_INTERFACE_KNOWN(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->interface_unknown == 0)
/* The opposite of CLASSTYPE_INTERFACE_KNOWN. */
#define CLASSTYPE_INTERFACE_UNKNOWN(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->interface_unknown)
#define SET_CLASSTYPE_INTERFACE_UNKNOWN_X(NODE,X) \
(LANG_TYPE_CLASS_CHECK (NODE)->interface_unknown = !!(X))
#define SET_CLASSTYPE_INTERFACE_UNKNOWN(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->interface_unknown = 1)
#define SET_CLASSTYPE_INTERFACE_KNOWN(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->interface_unknown = 0)
/* Nonzero if a _DECL node requires us to output debug info for this class. */
#define CLASSTYPE_DEBUG_REQUESTED(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->debug_requested)
/* Additional macros for inheritance information. */
/* Nonzero means that this class is on a path leading to a new vtable. */
#define BINFO_VTABLE_PATH_MARKED(NODE) BINFO_FLAG_1 (NODE)
/* Nonzero means B (a BINFO) has its own vtable. Any copies will not
have this flag set. */
#define BINFO_NEW_VTABLE_MARKED(B) (BINFO_FLAG_2 (B))
/* Compare a BINFO_TYPE with another type for equality. For a binfo,
this is functionally equivalent to using same_type_p, but
measurably faster. At least one of the arguments must be a
BINFO_TYPE. The other can be a BINFO_TYPE or a regular type. If
BINFO_TYPE(T) ever stops being the main variant of the class the
binfo is for, this macro must change. */
#define SAME_BINFO_TYPE_P(A, B) ((A) == (B))
/* Any subobject that needs a new vtable must have a vptr and must not
be a non-virtual primary base (since it would then use the vtable from a
derived class and never become non-primary.) */
#define SET_BINFO_NEW_VTABLE_MARKED(B) \
(BINFO_NEW_VTABLE_MARKED (B) = 1, \
gcc_assert (!BINFO_PRIMARY_P (B) || BINFO_VIRTUAL_P (B)), \
gcc_assert (TYPE_VFIELD (BINFO_TYPE (B))))
/* Nonzero if this binfo is for a dependent base - one that should not
be searched. */
#define BINFO_DEPENDENT_BASE_P(NODE) BINFO_FLAG_3 (NODE)
/* Nonzero if this binfo has lost its primary base binfo (because that
is a nearly-empty virtual base that has been taken by some other
base in the complete hierarchy. */
#define BINFO_LOST_PRIMARY_P(NODE) BINFO_FLAG_4 (NODE)
/* Nonzero if this BINFO is a primary base class. */
#define BINFO_PRIMARY_P(NODE) BINFO_FLAG_5(NODE)
/* Used by various search routines. */
#define IDENTIFIER_MARKED(NODE) TREE_LANG_FLAG_0 (NODE)
/* A vec<tree_pair_s> of the vcall indices associated with the class
NODE. The PURPOSE of each element is a FUNCTION_DECL for a virtual
function. The VALUE is the index into the virtual table where the
vcall offset for that function is stored, when NODE is a virtual
base. */
#define CLASSTYPE_VCALL_INDICES(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->vcall_indices)
/* The various vtables for the class NODE. The primary vtable will be
first, followed by the construction vtables and VTT, if any. */
#define CLASSTYPE_VTABLES(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->vtables)
/* The std::type_info variable representing this class, or NULL if no
such variable has been created. This field is only set for the
TYPE_MAIN_VARIANT of the class. */
#define CLASSTYPE_TYPEINFO_VAR(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->typeinfo_var)
/* Accessor macros for the BINFO_VIRTUALS list. */
/* The number of bytes by which to adjust the `this' pointer when
calling this virtual function. Subtract this value from the this
pointer. Always non-NULL, might be constant zero though. */
#define BV_DELTA(NODE) (TREE_PURPOSE (NODE))
/* If non-NULL, the vtable index at which to find the vcall offset
when calling this virtual function. Add the value at that vtable
index to the this pointer. */
#define BV_VCALL_INDEX(NODE) (TREE_TYPE (NODE))
/* The function to call. */
#define BV_FN(NODE) (TREE_VALUE (NODE))
/* Whether or not this entry is for a lost primary virtual base. */
#define BV_LOST_PRIMARY(NODE) (TREE_LANG_FLAG_0 (NODE))
/* For FUNCTION_TYPE or METHOD_TYPE, a list of the exceptions that
this type can raise. Each TREE_VALUE is a _TYPE. The TREE_VALUE
will be NULL_TREE to indicate a throw specification of `()', or
no exceptions allowed. For a noexcept specification, TREE_VALUE
is NULL_TREE and TREE_PURPOSE is the constant-expression. For
a deferred noexcept-specification, TREE_PURPOSE is a DEFERRED_NOEXCEPT
(for templates) or an OVERLOAD list of functions (for implicitly
declared functions). */
#define TYPE_RAISES_EXCEPTIONS(NODE) \
TYPE_LANG_SLOT_1 (FUNC_OR_METHOD_CHECK (NODE))
/* For FUNCTION_TYPE or METHOD_TYPE, return 1 iff it is declared `throw()'
or noexcept(true). */
#define TYPE_NOTHROW_P(NODE) nothrow_spec_p (TYPE_RAISES_EXCEPTIONS (NODE))
/* For FUNCTION_TYPE or METHOD_TYPE, true if NODE is noexcept. This is the
case for things declared noexcept(true) and, with -fnothrow-opt, for
throw() functions. */
#define TYPE_NOEXCEPT_P(NODE) type_noexcept_p (NODE)
/* The binding level associated with the namespace. */
#define NAMESPACE_LEVEL(NODE) \
(LANG_DECL_NS_CHECK (NODE)->level)
/* Flags shared by all forms of DECL_LANG_SPECIFIC.
Some of the flags live here only to make lang_decl_min/fn smaller. Do
not make this struct larger than 32 bits; instead, make sel smaller. */
struct GTY(()) lang_decl_base {
unsigned selector : 16; /* Larger than necessary for faster access. */
ENUM_BITFIELD(languages) language : 4;
unsigned use_template : 2;
unsigned not_really_extern : 1; /* var or fn */
unsigned initialized_in_class : 1; /* var or fn */
unsigned repo_available_p : 1; /* var or fn */
unsigned threadprivate_or_deleted_p : 1; /* var or fn */
unsigned anticipated_p : 1; /* fn, type or template */
unsigned friend_attr : 1; /* fn, type or template */
unsigned template_conv_p : 1; /* var or template */
unsigned odr_used : 1; /* var or fn */
unsigned u2sel : 1;
/* 1 spare bit */
};
/* True for DECL codes which have template info and access. */
#define LANG_DECL_HAS_MIN(NODE) \
(VAR_OR_FUNCTION_DECL_P (NODE) \
|| TREE_CODE (NODE) == FIELD_DECL \
|| TREE_CODE (NODE) == CONST_DECL \
|| TREE_CODE (NODE) == TYPE_DECL \
|| TREE_CODE (NODE) == TEMPLATE_DECL \
|| TREE_CODE (NODE) == USING_DECL)
/* DECL_LANG_SPECIFIC for the above codes. */
struct GTY(()) lang_decl_min {
struct lang_decl_base base;
/* In a FUNCTION_DECL for which DECL_THUNK_P holds, this is
THUNK_ALIAS.
In a FUNCTION_DECL for which DECL_THUNK_P does not hold,
VAR_DECL, TYPE_DECL, or TEMPLATE_DECL, this is
DECL_TEMPLATE_INFO. */
tree template_info;
union lang_decl_u2 {
/* In a FUNCTION_DECL for which DECL_THUNK_P holds, this is
THUNK_VIRTUAL_OFFSET.
Otherwise this is DECL_ACCESS. */
tree GTY ((tag ("0"))) access;
/* For VAR_DECL in function, this is DECL_DISCRIMINATOR. */
int GTY ((tag ("1"))) discriminator;
} GTY ((desc ("%0.u.base.u2sel"))) u2;
};
/* Additional DECL_LANG_SPECIFIC information for functions. */
struct GTY(()) lang_decl_fn {
struct lang_decl_min min;
/* In an overloaded operator, this is the value of
DECL_OVERLOADED_OPERATOR_P. */
ENUM_BITFIELD (tree_code) operator_code : 16;
unsigned global_ctor_p : 1;
unsigned global_dtor_p : 1;
unsigned constructor_attr : 1;
unsigned destructor_attr : 1;
unsigned assignment_operator_p : 1;
unsigned static_function : 1;
unsigned pure_virtual : 1;
unsigned defaulted_p : 1;
unsigned has_in_charge_parm_p : 1;
unsigned has_vtt_parm_p : 1;
unsigned pending_inline_p : 1;
unsigned nonconverting : 1;
unsigned thunk_p : 1;
unsigned this_thunk_p : 1;
unsigned hidden_friend_p : 1;
unsigned omp_declare_reduction_p : 1;
/* No spare bits on 32-bit hosts, 32 on 64-bit hosts. */
/* For a non-thunk function decl, this is a tree list of
friendly classes. For a thunk function decl, it is the
thunked to function decl. */
tree befriending_classes;
/* For a non-virtual FUNCTION_DECL, this is
DECL_FRIEND_CONTEXT. For a virtual FUNCTION_DECL for which
DECL_THIS_THUNK_P does not hold, this is DECL_THUNKS. Both
this pointer and result pointer adjusting thunks are
chained here. This pointer thunks to return pointer thunks
will be chained on the return pointer thunk. */
tree context;
union lang_decl_u5
{
/* In a non-thunk FUNCTION_DECL or TEMPLATE_DECL, this is
DECL_CLONED_FUNCTION. */
tree GTY ((tag ("0"))) cloned_function;
/* In a FUNCTION_DECL for which THUNK_P holds this is the
THUNK_FIXED_OFFSET. */
HOST_WIDE_INT GTY ((tag ("1"))) fixed_offset;
} GTY ((desc ("%1.thunk_p"))) u5;
union lang_decl_u3
{
struct cp_token_cache * GTY ((tag ("1"))) pending_inline_info;
struct language_function * GTY ((tag ("0")))
saved_language_function;
} GTY ((desc ("%1.pending_inline_p"))) u;
};
/* DECL_LANG_SPECIFIC for namespaces. */
struct GTY(()) lang_decl_ns {
struct lang_decl_base base;
cp_binding_level *level;
};
/* DECL_LANG_SPECIFIC for parameters. */
struct GTY(()) lang_decl_parm {
struct lang_decl_base base;
int level;
int index;
};
/* DECL_LANG_SPECIFIC for all types. It would be nice to just make this a
union rather than a struct containing a union as its only field, but
tree.h declares it as a struct. */
struct GTY((variable_size)) lang_decl {
union GTY((desc ("%h.base.selector"))) lang_decl_u {
struct lang_decl_base GTY ((default)) base;
struct lang_decl_min GTY((tag ("0"))) min;
struct lang_decl_fn GTY ((tag ("1"))) fn;
struct lang_decl_ns GTY((tag ("2"))) ns;
struct lang_decl_parm GTY((tag ("3"))) parm;
} u;
};
/* Looks through a template (if present) to find what it declares. */
#define STRIP_TEMPLATE(NODE) \
(TREE_CODE (NODE) == TEMPLATE_DECL ? DECL_TEMPLATE_RESULT (NODE) : NODE)
#if defined ENABLE_TREE_CHECKING && (GCC_VERSION >= 2007)
#define LANG_DECL_MIN_CHECK(NODE) __extension__ \
({ struct lang_decl *lt = DECL_LANG_SPECIFIC (NODE); \
if (!LANG_DECL_HAS_MIN (NODE)) \
lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \
<->u.min; })
/* We want to be able to check DECL_CONSTRUCTOR_P and such on a function
template, not just on a FUNCTION_DECL. So when looking for things in
lang_decl_fn, look down through a TEMPLATE_DECL into its result. */
#define LANG_DECL_FN_CHECK(NODE) __extension__ \
({ struct lang_decl *lt = DECL_LANG_SPECIFIC (STRIP_TEMPLATE (NODE)); \
if (!DECL_DECLARES_FUNCTION_P (NODE) || lt->u.base.selector != 1) \
lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \
<->u.fn; })
#define LANG_DECL_NS_CHECK(NODE) __extension__ \
({ struct lang_decl *lt = DECL_LANG_SPECIFIC (NODE); \
if (TREE_CODE (NODE) != NAMESPACE_DECL || lt->u.base.selector != 2) \
lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \
<->u.ns; })
#define LANG_DECL_PARM_CHECK(NODE) __extension__ \
({ struct lang_decl *lt = DECL_LANG_SPECIFIC (NODE); \
if (TREE_CODE (NODE) != PARM_DECL) \
lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \
<->u.parm; })
#define LANG_DECL_U2_CHECK(NODE, TF) __extension__ \
({ struct lang_decl *lt = DECL_LANG_SPECIFIC (NODE); \
if (!LANG_DECL_HAS_MIN (NODE) || lt->u.base.u2sel != TF) \
lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \
<->u.min.u2; })
#else
#define LANG_DECL_MIN_CHECK(NODE) \
(&DECL_LANG_SPECIFIC (NODE)->u.min)
#define LANG_DECL_FN_CHECK(NODE) \
(&DECL_LANG_SPECIFIC (STRIP_TEMPLATE (NODE))->u.fn)
#define LANG_DECL_NS_CHECK(NODE) \
(&DECL_LANG_SPECIFIC (NODE)->u.ns)
#define LANG_DECL_PARM_CHECK(NODE) \
(&DECL_LANG_SPECIFIC (NODE)->u.parm)
#define LANG_DECL_U2_CHECK(NODE, TF) \
(&DECL_LANG_SPECIFIC (NODE)->u.min.u2)
#endif /* ENABLE_TREE_CHECKING */
/* For a FUNCTION_DECL or a VAR_DECL, the language linkage for the
declaration. Some entities (like a member function in a local
class, or a local variable) do not have linkage at all, and this
macro should not be used in those cases.
Implementation note: A FUNCTION_DECL without DECL_LANG_SPECIFIC was
created by language-independent code, and has C linkage. Most
VAR_DECLs have C++ linkage, and do not have DECL_LANG_SPECIFIC, but
we do create DECL_LANG_SPECIFIC for variables with non-C++ linkage. */
#define DECL_LANGUAGE(NODE) \
(DECL_LANG_SPECIFIC (NODE) \
? DECL_LANG_SPECIFIC (NODE)->u.base.language \
: (TREE_CODE (NODE) == FUNCTION_DECL \
? lang_c : lang_cplusplus))
/* Set the language linkage for NODE to LANGUAGE. */
#define SET_DECL_LANGUAGE(NODE, LANGUAGE) \
(DECL_LANG_SPECIFIC (NODE)->u.base.language = (LANGUAGE))
/* For FUNCTION_DECLs and TEMPLATE_DECLs: nonzero means that this function
is a constructor. */
#define DECL_CONSTRUCTOR_P(NODE) \
DECL_CXX_CONSTRUCTOR_P (STRIP_TEMPLATE (NODE))
/* Nonzero if NODE (a FUNCTION_DECL) is a constructor for a complete
object. */
#define DECL_COMPLETE_CONSTRUCTOR_P(NODE) \
(DECL_CONSTRUCTOR_P (NODE) \
&& DECL_NAME (NODE) == complete_ctor_identifier)
/* Nonzero if NODE (a FUNCTION_DECL) is a constructor for a base
object. */
#define DECL_BASE_CONSTRUCTOR_P(NODE) \
(DECL_CONSTRUCTOR_P (NODE) \
&& DECL_NAME (NODE) == base_ctor_identifier)
/* Nonzero if NODE (a FUNCTION_DECL) is a constructor, but not either the
specialized in-charge constructor or the specialized not-in-charge
constructor. */
#define DECL_MAYBE_IN_CHARGE_CONSTRUCTOR_P(NODE) \
(DECL_DECLARES_FUNCTION_P (NODE) && DECL_CONSTRUCTOR_P (NODE) \
&& !DECL_CLONED_FUNCTION_P (NODE))
/* Nonzero if NODE (a FUNCTION_DECL) is a copy constructor. */
#define DECL_COPY_CONSTRUCTOR_P(NODE) \
(DECL_CONSTRUCTOR_P (NODE) && copy_fn_p (NODE) > 0)
/* Nonzero if NODE (a FUNCTION_DECL) is a move constructor. */
#define DECL_MOVE_CONSTRUCTOR_P(NODE) \
(DECL_CONSTRUCTOR_P (NODE) && move_fn_p (NODE))
/* Nonzero if NODE (a FUNCTION_DECL or TEMPLATE_DECL)
is a destructor. */
#define DECL_DESTRUCTOR_P(NODE) \
DECL_CXX_DESTRUCTOR_P (STRIP_TEMPLATE (NODE))
/* Nonzero if NODE (a FUNCTION_DECL) is a destructor, but not the
specialized in-charge constructor, in-charge deleting constructor,
or the base destructor. */
#define DECL_MAYBE_IN_CHARGE_DESTRUCTOR_P(NODE) \
(DECL_DECLARES_FUNCTION_P (NODE) && DECL_DESTRUCTOR_P (NODE) \
&& !DECL_CLONED_FUNCTION_P (NODE))
/* Nonzero if NODE (a FUNCTION_DECL) is a destructor for a complete
object. */
#define DECL_COMPLETE_DESTRUCTOR_P(NODE) \
(DECL_DESTRUCTOR_P (NODE) \
&& DECL_NAME (NODE) == complete_dtor_identifier)
/* Nonzero if NODE (a FUNCTION_DECL) is a destructor for a base
object. */
#define DECL_BASE_DESTRUCTOR_P(NODE) \
(DECL_DESTRUCTOR_P (NODE) \
&& DECL_NAME (NODE) == base_dtor_identifier)
/* Nonzero if NODE (a FUNCTION_DECL) is a destructor for a complete
object that deletes the object after it has been destroyed. */
#define DECL_DELETING_DESTRUCTOR_P(NODE) \
(DECL_DESTRUCTOR_P (NODE) \
&& DECL_NAME (NODE) == deleting_dtor_identifier)
/* Nonzero if NODE (a FUNCTION_DECL) is a cloned constructor or
destructor. */
#define DECL_CLONED_FUNCTION_P(NODE) (!!decl_cloned_function_p (NODE, true))
/* If DECL_CLONED_FUNCTION_P holds, this is the function that was
cloned. */
#define DECL_CLONED_FUNCTION(NODE) (*decl_cloned_function_p (NODE, false))
/* Perform an action for each clone of FN, if FN is a function with
clones. This macro should be used like:
FOR_EACH_CLONE (clone, fn)
{ ... }
*/
#define FOR_EACH_CLONE(CLONE, FN) \
if (TREE_CODE (FN) == FUNCTION_DECL \
&& (DECL_MAYBE_IN_CHARGE_CONSTRUCTOR_P (FN) \
|| DECL_MAYBE_IN_CHARGE_DESTRUCTOR_P (FN))) \
for (CLONE = DECL_CHAIN (FN); \
CLONE && DECL_CLONED_FUNCTION_P (CLONE); \
CLONE = DECL_CHAIN (CLONE))
/* Nonzero if NODE has DECL_DISCRIMINATOR and not DECL_ACCESS. */
#define DECL_DISCRIMINATOR_P(NODE) \
(VAR_P (NODE) && DECL_FUNCTION_SCOPE_P (NODE))
/* Discriminator for name mangling. */
#define DECL_DISCRIMINATOR(NODE) (LANG_DECL_U2_CHECK (NODE, 1)->discriminator)
/* True iff DECL_DISCRIMINATOR is set for a DECL_DISCRIMINATOR_P decl. */
#define DECL_DISCRIMINATOR_SET_P(NODE) \
(DECL_LANG_SPECIFIC (NODE) && DECL_LANG_SPECIFIC (NODE)->u.base.u2sel == 1)
/* The index of a user-declared parameter in its function, starting at 1.
All artificial parameters will have index 0. */
#define DECL_PARM_INDEX(NODE) \
(LANG_DECL_PARM_CHECK (NODE)->index)
/* The level of a user-declared parameter in its function, starting at 1.
A parameter of the function will have level 1; a parameter of the first
nested function declarator (i.e. t in void f (void (*p)(T t))) will have
level 2. */
#define DECL_PARM_LEVEL(NODE) \
(LANG_DECL_PARM_CHECK (NODE)->level)
/* Nonzero if the VTT parm has been added to NODE. */
#define DECL_HAS_VTT_PARM_P(NODE) \
(LANG_DECL_FN_CHECK (NODE)->has_vtt_parm_p)
/* Nonzero if NODE is a FUNCTION_DECL for which a VTT parameter is
required. */
#define DECL_NEEDS_VTT_PARM_P(NODE) \
(CLASSTYPE_VBASECLASSES (DECL_CONTEXT (NODE)) \
&& (DECL_BASE_CONSTRUCTOR_P (NODE) \
|| DECL_BASE_DESTRUCTOR_P (NODE)))
/* Nonzero if NODE is a user-defined conversion operator. */
#define DECL_CONV_FN_P(NODE) \
(DECL_NAME (NODE) && IDENTIFIER_TYPENAME_P (DECL_NAME (NODE)))
/* If FN is a conversion operator, the type to which it converts.
Otherwise, NULL_TREE. */
#define DECL_CONV_FN_TYPE(FN) \
(DECL_CONV_FN_P (FN) ? TREE_TYPE (DECL_NAME (FN)) : NULL_TREE)
/* Nonzero if NODE, which is a TEMPLATE_DECL, is a template
conversion operator to a type dependent on the innermost template
args. */
#define DECL_TEMPLATE_CONV_FN_P(NODE) \
(DECL_LANG_SPECIFIC (TEMPLATE_DECL_CHECK (NODE))->u.base.template_conv_p)
/* Nonzero if NODE, a static data member, was declared in its class as an
array of unknown bound. */
#define VAR_HAD_UNKNOWN_BOUND(NODE) \
(DECL_LANG_SPECIFIC (VAR_DECL_CHECK (NODE)) \
? DECL_LANG_SPECIFIC (NODE)->u.base.template_conv_p \
: false)
#define SET_VAR_HAD_UNKNOWN_BOUND(NODE) \
(DECL_LANG_SPECIFIC (VAR_DECL_CHECK (NODE))->u.base.template_conv_p = true)
/* Set the overloaded operator code for NODE to CODE. */
#define SET_OVERLOADED_OPERATOR_CODE(NODE, CODE) \
(LANG_DECL_FN_CHECK (NODE)->operator_code = (CODE))
/* If NODE is an overloaded operator, then this returns the TREE_CODE
associated with the overloaded operator.
DECL_ASSIGNMENT_OPERATOR_P must also be checked to determine
whether or not NODE is an assignment operator. If NODE is not an
overloaded operator, ERROR_MARK is returned. Since the numerical
value of ERROR_MARK is zero, this macro can be used as a predicate
to test whether or not NODE is an overloaded operator. */
#define DECL_OVERLOADED_OPERATOR_P(NODE) \
(IDENTIFIER_OPNAME_P (DECL_NAME (NODE)) \
? LANG_DECL_FN_CHECK (NODE)->operator_code : ERROR_MARK)
/* Nonzero if NODE is an assignment operator (including += and such). */
#define DECL_ASSIGNMENT_OPERATOR_P(NODE) \
(LANG_DECL_FN_CHECK (NODE)->assignment_operator_p)
/* For FUNCTION_DECLs: nonzero means that this function is a
constructor or a destructor with an extra in-charge parameter to
control whether or not virtual bases are constructed. */
#define DECL_HAS_IN_CHARGE_PARM_P(NODE) \
(LANG_DECL_FN_CHECK (NODE)->has_in_charge_parm_p)
/* Nonzero if DECL is a declaration of __builtin_constant_p. */
#define DECL_IS_BUILTIN_CONSTANT_P(NODE) \
(TREE_CODE (NODE) == FUNCTION_DECL \
&& DECL_BUILT_IN_CLASS (NODE) == BUILT_IN_NORMAL \
&& DECL_FUNCTION_CODE (NODE) == BUILT_IN_CONSTANT_P)
/* Nonzero for _DECL means that this decl appears in (or will appear
in) as a member in a RECORD_TYPE or UNION_TYPE node. It is also for
detecting circularity in case members are multiply defined. In the
case of a VAR_DECL, it is also used to determine how program storage
should be allocated. */
#define DECL_IN_AGGR_P(NODE) (DECL_LANG_FLAG_3 (NODE))
/* Nonzero for a VAR_DECL means that the variable's initialization (if
any) has been processed. (In general, DECL_INITIALIZED_P is
!DECL_EXTERNAL, but static data members may be initialized even if
not defined.) */
#define DECL_INITIALIZED_P(NODE) \
(TREE_LANG_FLAG_1 (VAR_DECL_CHECK (NODE)))
/* Nonzero for a VAR_DECL iff an explicit initializer was provided
or a non-trivial constructor is called. */
#define DECL_NONTRIVIALLY_INITIALIZED_P(NODE) \
(TREE_LANG_FLAG_3 (VAR_DECL_CHECK (NODE)))
/* Nonzero for a VAR_DECL that was initialized with a
constant-expression. */
#define DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P(NODE) \
(TREE_LANG_FLAG_2 (VAR_DECL_CHECK (NODE)))
/* Nonzero if the DECL was initialized in the class definition itself,
rather than outside the class. This is used for both static member
VAR_DECLS, and FUNCTION_DECLS that are defined in the class. */
#define DECL_INITIALIZED_IN_CLASS_P(DECL) \
(DECL_LANG_SPECIFIC (VAR_OR_FUNCTION_DECL_CHECK (DECL)) \
->u.base.initialized_in_class)
/* Nonzero if the DECL is used in the sense of 3.2 [basic.def.odr].
Only available for decls with DECL_LANG_SPECIFIC. */
#define DECL_ODR_USED(DECL) \
(DECL_LANG_SPECIFIC (VAR_OR_FUNCTION_DECL_CHECK (DECL)) \
->u.base.odr_used)
/* Nonzero for DECL means that this decl is just a friend declaration,
and should not be added to the list of members for this class. */
#define DECL_FRIEND_P(NODE) \
(DECL_LANG_SPECIFIC (TYPE_FUNCTION_OR_TEMPLATE_DECL_CHECK (NODE)) \
->u.base.friend_attr)
/* A TREE_LIST of the types which have befriended this FUNCTION_DECL. */
#define DECL_BEFRIENDING_CLASSES(NODE) \
(LANG_DECL_FN_CHECK (NODE)->befriending_classes)
/* Nonzero for FUNCTION_DECL means that this decl is a static
member function. */
#define DECL_STATIC_FUNCTION_P(NODE) \
(LANG_DECL_FN_CHECK (NODE)->static_function)
/* Nonzero for FUNCTION_DECL means that this decl is a non-static
member function. */
#define DECL_NONSTATIC_MEMBER_FUNCTION_P(NODE) \
(TREE_CODE (TREE_TYPE (NODE)) == METHOD_TYPE)
/* Nonzero for FUNCTION_DECL means that this decl is a member function
(static or non-static). */
#define DECL_FUNCTION_MEMBER_P(NODE) \
(DECL_NONSTATIC_MEMBER_FUNCTION_P (NODE) || DECL_STATIC_FUNCTION_P (NODE))
/* Nonzero for FUNCTION_DECL means that this member function
has `this' as const X *const. */
#define DECL_CONST_MEMFUNC_P(NODE) \
(DECL_NONSTATIC_MEMBER_FUNCTION_P (NODE) \
&& CP_TYPE_CONST_P (TREE_TYPE (TREE_VALUE \
(TYPE_ARG_TYPES (TREE_TYPE (NODE))))))
/* Nonzero for FUNCTION_DECL means that this member function
has `this' as volatile X *const. */
#define DECL_VOLATILE_MEMFUNC_P(NODE) \
(DECL_NONSTATIC_MEMBER_FUNCTION_P (NODE) \
&& CP_TYPE_VOLATILE_P (TREE_TYPE (TREE_VALUE \
(TYPE_ARG_TYPES (TREE_TYPE (NODE))))))
/* Nonzero for a DECL means that this member is a non-static member. */
#define DECL_NONSTATIC_MEMBER_P(NODE) \
(DECL_NONSTATIC_MEMBER_FUNCTION_P (NODE) \
|| TREE_CODE (NODE) == FIELD_DECL)
/* Nonzero for _DECL means that this member object type
is mutable. */
#define DECL_MUTABLE_P(NODE) (DECL_LANG_FLAG_0 (NODE))
/* Nonzero for _DECL means that this constructor or conversion function is
non-converting. */
#define DECL_NONCONVERTING_P(NODE) \
(LANG_DECL_FN_CHECK (NODE)->nonconverting)
/* Nonzero for FUNCTION_DECL means that this member function is a pure
virtual function. */
#define DECL_PURE_VIRTUAL_P(NODE) \
(LANG_DECL_FN_CHECK (NODE)->pure_virtual)
/* True (in a FUNCTION_DECL) if NODE is a virtual function that is an
invalid overrider for a function from a base class. Once we have
complained about an invalid overrider we avoid complaining about it
again. */
#define DECL_INVALID_OVERRIDER_P(NODE) \
(DECL_LANG_FLAG_4 (NODE))
/* True (in a FUNCTION_DECL) if NODE is a function declared with
an override virt-specifier */
#define DECL_OVERRIDE_P(NODE) (TREE_LANG_FLAG_0 (NODE))
/* The thunks associated with NODE, a FUNCTION_DECL. */
#define DECL_THUNKS(NODE) \
(DECL_VIRTUAL_P (NODE) ? LANG_DECL_FN_CHECK (NODE)->context : NULL_TREE)
/* Set DECL_THUNKS. */
#define SET_DECL_THUNKS(NODE,THUNKS) \
(LANG_DECL_FN_CHECK (NODE)->context = (THUNKS))
/* If NODE, a FUNCTION_DECL, is a C++11 inheriting constructor, then this
is the base it inherits from. */
#define DECL_INHERITED_CTOR_BASE(NODE) \
(DECL_CONSTRUCTOR_P (NODE) ? LANG_DECL_FN_CHECK (NODE)->context : NULL_TREE)
/* Set the inherited base. */
#define SET_DECL_INHERITED_CTOR_BASE(NODE,INH) \
(LANG_DECL_FN_CHECK (NODE)->context = (INH))
/* Nonzero if NODE is a thunk, rather than an ordinary function. */
#define DECL_THUNK_P(NODE) \
(TREE_CODE (NODE) == FUNCTION_DECL \
&& DECL_LANG_SPECIFIC (NODE) \
&& LANG_DECL_FN_CHECK (NODE)->thunk_p)
/* Set DECL_THUNK_P for node. */
#define SET_DECL_THUNK_P(NODE, THIS_ADJUSTING) \
(LANG_DECL_FN_CHECK (NODE)->thunk_p = 1, \
LANG_DECL_FN_CHECK (NODE)->this_thunk_p = (THIS_ADJUSTING))
/* Nonzero if NODE is a this pointer adjusting thunk. */
#define DECL_THIS_THUNK_P(NODE) \
(DECL_THUNK_P (NODE) && LANG_DECL_FN_CHECK (NODE)->this_thunk_p)
/* Nonzero if NODE is a result pointer adjusting thunk. */
#define DECL_RESULT_THUNK_P(NODE) \
(DECL_THUNK_P (NODE) && !LANG_DECL_FN_CHECK (NODE)->this_thunk_p)
/* Nonzero if NODE is a FUNCTION_DECL, but not a thunk. */
#define DECL_NON_THUNK_FUNCTION_P(NODE) \
(TREE_CODE (NODE) == FUNCTION_DECL && !DECL_THUNK_P (NODE))
/* Nonzero if NODE is `extern "C"'. */
#define DECL_EXTERN_C_P(NODE) \
(DECL_LANGUAGE (NODE) == lang_c)
/* Nonzero if NODE is an `extern "C"' function. */
#define DECL_EXTERN_C_FUNCTION_P(NODE) \
(DECL_NON_THUNK_FUNCTION_P (NODE) && DECL_EXTERN_C_P (NODE))
/* True iff DECL is an entity with vague linkage whose definition is
available in this translation unit. */
#define DECL_REPO_AVAILABLE_P(NODE) \
(DECL_LANG_SPECIFIC (NODE)->u.base.repo_available_p)
/* True if DECL is declared 'constexpr'. */
#define DECL_DECLARED_CONSTEXPR_P(DECL) \
DECL_LANG_FLAG_8 (VAR_OR_FUNCTION_DECL_CHECK (STRIP_TEMPLATE (DECL)))
/* Nonzero if this DECL is the __PRETTY_FUNCTION__ variable in a
template function. */
#define DECL_PRETTY_FUNCTION_P(NODE) \
(DECL_NAME (NODE) \
&& !strcmp (IDENTIFIER_POINTER (DECL_NAME (NODE)), "__PRETTY_FUNCTION__"))
/* Nonzero if the thread-local variable was declared with __thread
as opposed to thread_local. */
#define DECL_GNU_TLS_P(NODE) \
(TREE_LANG_FLAG_0 (VAR_DECL_CHECK (NODE)))
/* The _TYPE context in which this _DECL appears. This field holds the
class where a virtual function instance is actually defined. */
#define DECL_CLASS_CONTEXT(NODE) \
(DECL_CLASS_SCOPE_P (NODE) ? DECL_CONTEXT (NODE) : NULL_TREE)
/* For a non-member friend function, the class (if any) in which this
friend was defined. For example, given:
struct S { friend void f (); };
the DECL_FRIEND_CONTEXT for `f' will be `S'. */
#define DECL_FRIEND_CONTEXT(NODE) \
((DECL_DECLARES_FUNCTION_P (NODE) \
&& DECL_FRIEND_P (NODE) && !DECL_FUNCTION_MEMBER_P (NODE)) \
? LANG_DECL_FN_CHECK (NODE)->context \
: NULL_TREE)
/* Set the DECL_FRIEND_CONTEXT for NODE to CONTEXT. */
#define SET_DECL_FRIEND_CONTEXT(NODE, CONTEXT) \
(LANG_DECL_FN_CHECK (NODE)->context = (CONTEXT))
#define CP_DECL_CONTEXT(NODE) \
(!DECL_FILE_SCOPE_P (NODE) ? DECL_CONTEXT (NODE) : global_namespace)
#define CP_TYPE_CONTEXT(NODE) \
(!TYPE_FILE_SCOPE_P (NODE) ? TYPE_CONTEXT (NODE) : global_namespace)
#define FROB_CONTEXT(NODE) \
((NODE) == global_namespace ? DECL_CONTEXT (NODE) : (NODE))
/* 1 iff NODE has namespace scope, including the global namespace. */
#define DECL_NAMESPACE_SCOPE_P(NODE) \
(!DECL_TEMPLATE_PARM_P (NODE) \
&& TREE_CODE (CP_DECL_CONTEXT (NODE)) == NAMESPACE_DECL)
#define TYPE_NAMESPACE_SCOPE_P(NODE) \
(TREE_CODE (CP_TYPE_CONTEXT (NODE)) == NAMESPACE_DECL)
#define NAMESPACE_SCOPE_P(NODE) \
((DECL_P (NODE) && DECL_NAMESPACE_SCOPE_P (NODE)) \
|| (TYPE_P (NODE) && TYPE_NAMESPACE_SCOPE_P (NODE)))
/* 1 iff NODE is a class member. */
#define DECL_CLASS_SCOPE_P(NODE) \
(DECL_CONTEXT (NODE) && TYPE_P (DECL_CONTEXT (NODE)))
#define TYPE_CLASS_SCOPE_P(NODE) \
(TYPE_CONTEXT (NODE) && TYPE_P (TYPE_CONTEXT (NODE)))
/* 1 iff NODE is function-local. */
#define DECL_FUNCTION_SCOPE_P(NODE) \
(DECL_CONTEXT (NODE) \
&& TREE_CODE (DECL_CONTEXT (NODE)) == FUNCTION_DECL)
#define TYPE_FUNCTION_SCOPE_P(NODE) \
(TYPE_CONTEXT (NODE) && TREE_CODE (TYPE_CONTEXT (NODE)) == FUNCTION_DECL)
/* 1 iff VAR_DECL node NODE is a type-info decl. This flag is set for
both the primary typeinfo object and the associated NTBS name. */
#define DECL_TINFO_P(NODE) TREE_LANG_FLAG_4 (VAR_DECL_CHECK (NODE))
/* 1 iff VAR_DECL node NODE is virtual table or VTT. */
#define DECL_VTABLE_OR_VTT_P(NODE) TREE_LANG_FLAG_5 (VAR_DECL_CHECK (NODE))
/* 1 iff FUNCTION_TYPE or METHOD_TYPE has a ref-qualifier (either & or &&). */
#define FUNCTION_REF_QUALIFIED(NODE) \
TREE_LANG_FLAG_4 (FUNC_OR_METHOD_CHECK (NODE))
/* 1 iff FUNCTION_TYPE or METHOD_TYPE has &&-ref-qualifier. */
#define FUNCTION_RVALUE_QUALIFIED(NODE) \
TREE_LANG_FLAG_5 (FUNC_OR_METHOD_CHECK (NODE))
/* Returns 1 iff VAR_DECL is a construction virtual table.
DECL_VTABLE_OR_VTT_P will be true in this case and must be checked
before using this macro. */
#define DECL_CONSTRUCTION_VTABLE_P(NODE) \
TREE_LANG_FLAG_6 (VAR_DECL_CHECK (NODE))
/* 1 iff NODE is function-local, but for types. */
#define LOCAL_CLASS_P(NODE) \
(decl_function_context (TYPE_MAIN_DECL (NODE)) != NULL_TREE)
/* For a NAMESPACE_DECL: the list of using namespace directives
The PURPOSE is the used namespace, the value is the namespace
that is the common ancestor. */
#define DECL_NAMESPACE_USING(NODE) DECL_VINDEX (NAMESPACE_DECL_CHECK (NODE))
/* In a NAMESPACE_DECL, the DECL_INITIAL is used to record all users
of a namespace, to record the transitive closure of using namespace. */
#define DECL_NAMESPACE_USERS(NODE) DECL_INITIAL (NAMESPACE_DECL_CHECK (NODE))
/* In a NAMESPACE_DECL, the list of namespaces which have associated
themselves with this one. */
#define DECL_NAMESPACE_ASSOCIATIONS(NODE) \
(NAMESPACE_DECL_CHECK (NODE)->decl_non_common.saved_tree)
/* In a NAMESPACE_DECL, points to the original namespace if this is
a namespace alias. */
#define DECL_NAMESPACE_ALIAS(NODE) \
DECL_ABSTRACT_ORIGIN (NAMESPACE_DECL_CHECK (NODE))
#define ORIGINAL_NAMESPACE(NODE) \
(DECL_NAMESPACE_ALIAS (NODE) ? DECL_NAMESPACE_ALIAS (NODE) : (NODE))
/* Nonzero if NODE is the std namespace. */
#define DECL_NAMESPACE_STD_P(NODE) \
(TREE_CODE (NODE) == NAMESPACE_DECL \
&& CP_DECL_CONTEXT (NODE) == global_namespace \
&& DECL_NAME (NODE) == std_identifier)
/* In a TREE_LIST concatenating using directives, indicate indirect
directives */
#define TREE_INDIRECT_USING(NODE) TREE_LANG_FLAG_0 (TREE_LIST_CHECK (NODE))
/* In a TREE_LIST in an attribute list, indicates that the attribute
must be applied at instantiation time. */
#define ATTR_IS_DEPENDENT(NODE) TREE_LANG_FLAG_0 (TREE_LIST_CHECK (NODE))
/* In a TREE_LIST in the argument of attribute abi_tag, indicates that the tag
was inherited from a template parameter, not explicitly indicated. */
#define ABI_TAG_IMPLICIT(NODE) TREE_LANG_FLAG_0 (TREE_LIST_CHECK (NODE))
extern tree decl_shadowed_for_var_lookup (tree);
extern void decl_shadowed_for_var_insert (tree, tree);
/* Non zero if this is a using decl for a dependent scope. */
#define DECL_DEPENDENT_P(NODE) DECL_LANG_FLAG_0 (USING_DECL_CHECK (NODE))
/* The scope named in a using decl. */
#define USING_DECL_SCOPE(NODE) TREE_TYPE (USING_DECL_CHECK (NODE))
/* The decls named by a using decl. */
#define USING_DECL_DECLS(NODE) DECL_INITIAL (USING_DECL_CHECK (NODE))
/* Non zero if the using decl refers to a dependent type. */
#define USING_DECL_TYPENAME_P(NODE) DECL_LANG_FLAG_1 (USING_DECL_CHECK (NODE))
/* In a VAR_DECL, true if we have a shadowed local variable
in the shadowed var table for this VAR_DECL. */
#define DECL_HAS_SHADOWED_FOR_VAR_P(NODE) \
(VAR_DECL_CHECK (NODE)->decl_with_vis.shadowed_for_var_p)
/* In a VAR_DECL for a variable declared in a for statement,
this is the shadowed (local) variable. */
#define DECL_SHADOWED_FOR_VAR(NODE) \
(DECL_HAS_SHADOWED_FOR_VAR_P(NODE) ? decl_shadowed_for_var_lookup (NODE) : NULL)
#define SET_DECL_SHADOWED_FOR_VAR(NODE, VAL) \
(decl_shadowed_for_var_insert (NODE, VAL))
/* In a FUNCTION_DECL, this is nonzero if this function was defined in
the class definition. We have saved away the text of the function,
but have not yet processed it. */
#define DECL_PENDING_INLINE_P(NODE) \
(LANG_DECL_FN_CHECK (NODE)->pending_inline_p)
/* If DECL_PENDING_INLINE_P holds, this is the saved text of the
function. */
#define DECL_PENDING_INLINE_INFO(NODE) \
(LANG_DECL_FN_CHECK (NODE)->u.pending_inline_info)
/* Nonzero for TYPE_DECL means that it was written 'using name = type'. */
#define TYPE_DECL_ALIAS_P(NODE) \
DECL_LANG_FLAG_6 (TYPE_DECL_CHECK (NODE))
/* Nonzero for a type which is an alias for another type; i.e, a type
which declaration was written 'using name-of-type =
another-type'. */
#define TYPE_ALIAS_P(NODE) \
(TYPE_P (NODE) \
&& TYPE_NAME (NODE) \
&& TREE_CODE (TYPE_NAME (NODE)) == TYPE_DECL \
&& TYPE_DECL_ALIAS_P (TYPE_NAME (NODE)))
/* For a class type: if this structure has many fields, we'll sort them
and put them into a TREE_VEC. */
#define CLASSTYPE_SORTED_FIELDS(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->sorted_fields)
/* If non-NULL for a VAR_DECL, FUNCTION_DECL, TYPE_DECL or
TEMPLATE_DECL, the entity is either a template specialization (if
DECL_USE_TEMPLATE is nonzero) or the abstract instance of the
template itself.
In either case, DECL_TEMPLATE_INFO is a TREE_LIST, whose
TREE_PURPOSE is the TEMPLATE_DECL of which this entity is a
specialization or abstract instance. The TREE_VALUE is the
template arguments used to specialize the template.
Consider:
template <typename T> struct S { friend void f(T) {} };
In this case, S<int>::f is, from the point of view of the compiler,
an instantiation of a template -- but, from the point of view of
the language, each instantiation of S results in a wholly unrelated
global function f. In this case, DECL_TEMPLATE_INFO for S<int>::f
will be non-NULL, but DECL_USE_TEMPLATE will be zero. */
#define DECL_TEMPLATE_INFO(NODE) \
(DECL_LANG_SPECIFIC (VAR_TEMPL_TYPE_FIELD_OR_FUNCTION_DECL_CHECK (NODE)) \
->u.min.template_info)
/* For a VAR_DECL, indicates that the variable is actually a
non-static data member of anonymous union that has been promoted to
variable status. */
#define DECL_ANON_UNION_VAR_P(NODE) \
(DECL_LANG_FLAG_4 (VAR_DECL_CHECK (NODE)))
/* Template information for a RECORD_TYPE or UNION_TYPE. */
#define CLASSTYPE_TEMPLATE_INFO(NODE) \
(LANG_TYPE_CLASS_CHECK (RECORD_OR_UNION_CHECK (NODE))->template_info)
/* Template information for an ENUMERAL_TYPE. Although an enumeration may
not be a primary template, it may be declared within the scope of a
primary template and the enumeration constants may depend on
non-type template parameters. */
#define ENUM_TEMPLATE_INFO(NODE) \
(TYPE_LANG_SLOT_1 (ENUMERAL_TYPE_CHECK (NODE)))
/* Template information for a template template parameter. */
#define TEMPLATE_TEMPLATE_PARM_TEMPLATE_INFO(NODE) \
(LANG_TYPE_CLASS_CHECK (BOUND_TEMPLATE_TEMPLATE_PARM_TYPE_CHECK (NODE)) \
->template_info)
/* Template information for an ENUMERAL_, RECORD_, UNION_TYPE, or
BOUND_TEMPLATE_TEMPLATE_PARM type. Note that if NODE is a
specialization of an alias template, this accessor returns the
template info for the alias template, not the one (if any) for the
template of the underlying type. */
#define TYPE_TEMPLATE_INFO(NODE) \
((TYPE_ALIAS_P (NODE) && DECL_LANG_SPECIFIC (TYPE_NAME (NODE))) \
? (DECL_LANG_SPECIFIC (TYPE_NAME (NODE)) \
? DECL_TEMPLATE_INFO (TYPE_NAME (NODE)) \
: NULL_TREE) \
: ((TREE_CODE (NODE) == ENUMERAL_TYPE) \
? ENUM_TEMPLATE_INFO (NODE) \
: ((TREE_CODE (NODE) == BOUND_TEMPLATE_TEMPLATE_PARM) \
? TEMPLATE_TEMPLATE_PARM_TEMPLATE_INFO (NODE) \
: (CLASS_TYPE_P (NODE) \
? CLASSTYPE_TEMPLATE_INFO (NODE) \
: NULL_TREE))))
/* Set the template information for an ENUMERAL_, RECORD_, or
UNION_TYPE to VAL. */
#define SET_TYPE_TEMPLATE_INFO(NODE, VAL) \
(TREE_CODE (NODE) == ENUMERAL_TYPE \
? (ENUM_TEMPLATE_INFO (NODE) = (VAL)) \
: ((CLASS_TYPE_P (NODE) && !TYPE_ALIAS_P (NODE)) \
? (CLASSTYPE_TEMPLATE_INFO (NODE) = (VAL)) \
: (DECL_TEMPLATE_INFO (TYPE_NAME (NODE)) = (VAL))))
#define TI_TEMPLATE(NODE) TREE_TYPE (TEMPLATE_INFO_CHECK (NODE))
#define TI_ARGS(NODE) TREE_CHAIN (TEMPLATE_INFO_CHECK (NODE))
#define TI_PENDING_TEMPLATE_FLAG(NODE) TREE_LANG_FLAG_1 (NODE)
/* For a given TREE_VEC containing a template argument list,
this property contains the number of arguments that are not
defaulted. */
#define NON_DEFAULT_TEMPLATE_ARGS_COUNT(NODE) TREE_CHAIN (TREE_VEC_CHECK (NODE))
/* Below are the setter and getter of the NON_DEFAULT_TEMPLATE_ARGS_COUNT
property. */
#define SET_NON_DEFAULT_TEMPLATE_ARGS_COUNT(NODE, INT_VALUE) \
NON_DEFAULT_TEMPLATE_ARGS_COUNT(NODE) = build_int_cst (NULL_TREE, INT_VALUE)
#ifdef ENABLE_CHECKING
#define GET_NON_DEFAULT_TEMPLATE_ARGS_COUNT(NODE) \
int_cst_value (NON_DEFAULT_TEMPLATE_ARGS_COUNT (NODE))
#else
#define GET_NON_DEFAULT_TEMPLATE_ARGS_COUNT(NODE) \
NON_DEFAULT_TEMPLATE_ARGS_COUNT (NODE) \
? int_cst_value (NON_DEFAULT_TEMPLATE_ARGS_COUNT (NODE)) \
: TREE_VEC_LENGTH (INNERMOST_TEMPLATE_ARGS (NODE))
#endif
/* The list of typedefs - used in the template - that need
access checking at template instantiation time.
FIXME this should be associated with the TEMPLATE_DECL, not the
TEMPLATE_INFO. */
#define TI_TYPEDEFS_NEEDING_ACCESS_CHECKING(NODE) \
((struct tree_template_info*)TEMPLATE_INFO_CHECK \
(NODE))->typedefs_needing_access_checking
/* We use TREE_VECs to hold template arguments. If there is only one
level of template arguments, then the TREE_VEC contains the
arguments directly. If there is more than one level of template
arguments, then each entry in the TREE_VEC is itself a TREE_VEC,
containing the template arguments for a single level. The first
entry in the outer TREE_VEC is the outermost level of template
parameters; the last is the innermost.
It is incorrect to ever form a template argument vector containing
only one level of arguments, but which is a TREE_VEC containing as
its only entry the TREE_VEC for that level.
For each TREE_VEC containing the template arguments for a single
level, it's possible to get or set the number of non defaulted
template arguments by using the accessor macros
GET_NON_DEFAULT_TEMPLATE_ARGS_COUNT or
SET_NON_DEFAULT_TEMPLATE_ARGS_COUNT. */
/* Nonzero if the template arguments is actually a vector of vectors,
rather than just a vector. */
#define TMPL_ARGS_HAVE_MULTIPLE_LEVELS(NODE) \
(NODE && TREE_VEC_LENGTH (NODE) && TREE_VEC_ELT (NODE, 0) \
&& TREE_CODE (TREE_VEC_ELT (NODE, 0)) == TREE_VEC)
/* The depth of a template argument vector. When called directly by
the parser, we use a TREE_LIST rather than a TREE_VEC to represent
template arguments. In fact, we may even see NULL_TREE if there
are no template arguments. In both of those cases, there is only
one level of template arguments. */
#define TMPL_ARGS_DEPTH(NODE) \
(TMPL_ARGS_HAVE_MULTIPLE_LEVELS (NODE) ? TREE_VEC_LENGTH (NODE) : 1)
/* The LEVELth level of the template ARGS. The outermost level of
args is level 1, not level 0. */
#define TMPL_ARGS_LEVEL(ARGS, LEVEL) \
(TMPL_ARGS_HAVE_MULTIPLE_LEVELS (ARGS) \
? TREE_VEC_ELT (ARGS, (LEVEL) - 1) : (ARGS))
/* Set the LEVELth level of the template ARGS to VAL. This macro does
not work with single-level argument vectors. */
#define SET_TMPL_ARGS_LEVEL(ARGS, LEVEL, VAL) \
(TREE_VEC_ELT (ARGS, (LEVEL) - 1) = (VAL))
/* Accesses the IDXth parameter in the LEVELth level of the ARGS. */
#define TMPL_ARG(ARGS, LEVEL, IDX) \
(TREE_VEC_ELT (TMPL_ARGS_LEVEL (ARGS, LEVEL), IDX))
/* Given a single level of template arguments in NODE, return the
number of arguments. */
#define NUM_TMPL_ARGS(NODE) \
(TREE_VEC_LENGTH (NODE))
/* Returns the innermost level of template arguments in ARGS. */
#define INNERMOST_TEMPLATE_ARGS(NODE) \
(get_innermost_template_args ((NODE), 1))
/* The number of levels of template parameters given by NODE. */
#define TMPL_PARMS_DEPTH(NODE) \
((HOST_WIDE_INT) TREE_INT_CST_LOW (TREE_PURPOSE (NODE)))
/* The TEMPLATE_DECL instantiated or specialized by NODE. This
TEMPLATE_DECL will be the immediate parent, not the most general
template. For example, in:
template <class T> struct S { template <class U> void f(U); }
the FUNCTION_DECL for S<int>::f<double> will have, as its
DECL_TI_TEMPLATE, `template <class U> S<int>::f<U>'.
As a special case, for a member friend template of a template
class, this value will not be a TEMPLATE_DECL, but rather an
IDENTIFIER_NODE or OVERLOAD indicating the name of the template and
any explicit template arguments provided. For example, in:
template <class T> struct S { friend void f<int>(int, double); }
the DECL_TI_TEMPLATE will be an IDENTIFIER_NODE for `f' and the
DECL_TI_ARGS will be {int}.
For a FIELD_DECL with a non-static data member initializer, this value
is the FIELD_DECL it was instantiated from. */
#define DECL_TI_TEMPLATE(NODE) TI_TEMPLATE (DECL_TEMPLATE_INFO (NODE))
/* The template arguments used to obtain this decl from the most
general form of DECL_TI_TEMPLATE. For the example given for
DECL_TI_TEMPLATE, the DECL_TI_ARGS will be {int, double}. These
are always the full set of arguments required to instantiate this
declaration from the most general template specialized here. */
#define DECL_TI_ARGS(NODE) TI_ARGS (DECL_TEMPLATE_INFO (NODE))
/* The TEMPLATE_DECL associated with NODE, a class type. Even if NODE
will be generated from a partial specialization, the TEMPLATE_DECL
referred to here will be the original template. For example,
given:
template <typename T> struct S {};
template <typename T> struct S<T*> {};
the CLASSTPYE_TI_TEMPLATE for S<int*> will be S, not the S<T*>. */
#define CLASSTYPE_TI_TEMPLATE(NODE) TI_TEMPLATE (CLASSTYPE_TEMPLATE_INFO (NODE))
#define CLASSTYPE_TI_ARGS(NODE) TI_ARGS (CLASSTYPE_TEMPLATE_INFO (NODE))
/* For a template instantiation TYPE, returns the TYPE corresponding
to the primary template. Otherwise returns TYPE itself. */
#define CLASSTYPE_PRIMARY_TEMPLATE_TYPE(TYPE) \
((CLASSTYPE_USE_TEMPLATE ((TYPE)) \
&& !CLASSTYPE_TEMPLATE_SPECIALIZATION ((TYPE))) \
? TREE_TYPE (DECL_TEMPLATE_RESULT (DECL_PRIMARY_TEMPLATE \
(CLASSTYPE_TI_TEMPLATE ((TYPE))))) \
: (TYPE))
/* Like CLASS_TI_TEMPLATE, but also works for ENUMERAL_TYPEs. */
#define TYPE_TI_TEMPLATE(NODE) \
(TI_TEMPLATE (TYPE_TEMPLATE_INFO (NODE)))
/* Like DECL_TI_ARGS, but for an ENUMERAL_, RECORD_, or UNION_TYPE. */
#define TYPE_TI_ARGS(NODE) \
(TI_ARGS (TYPE_TEMPLATE_INFO (NODE)))
#define INNERMOST_TEMPLATE_PARMS(NODE) TREE_VALUE (NODE)
/* Nonzero if NODE (a TEMPLATE_DECL) is a member template, in the
sense of [temp.mem]. */
#define DECL_MEMBER_TEMPLATE_P(NODE) \
(DECL_LANG_FLAG_1 (TEMPLATE_DECL_CHECK (NODE)))
/* Nonzero if the NODE corresponds to the template parameters for a
member template, whose inline definition is being processed after
the class definition is complete. */
#define TEMPLATE_PARMS_FOR_INLINE(NODE) TREE_LANG_FLAG_1 (NODE)
/* Determine if a declaration (PARM_DECL or FIELD_DECL) is a pack. */
#define DECL_PACK_P(NODE) \
(DECL_P (NODE) && PACK_EXPANSION_P (TREE_TYPE (NODE)))
/* Determines if NODE is an expansion of one or more parameter packs,
e.g., a TYPE_PACK_EXPANSION or EXPR_PACK_EXPANSION. */
#define PACK_EXPANSION_P(NODE) \
(TREE_CODE (NODE) == TYPE_PACK_EXPANSION \
|| TREE_CODE (NODE) == EXPR_PACK_EXPANSION)
/* Extracts the type or expression pattern from a TYPE_PACK_EXPANSION or
EXPR_PACK_EXPANSION. */
#define PACK_EXPANSION_PATTERN(NODE) \
(TREE_CODE (NODE) == TYPE_PACK_EXPANSION? TREE_TYPE (NODE) \
: TREE_OPERAND (NODE, 0))
/* Sets the type or expression pattern for a TYPE_PACK_EXPANSION or
EXPR_PACK_EXPANSION. */
#define SET_PACK_EXPANSION_PATTERN(NODE,VALUE) \
if (TREE_CODE (NODE) == TYPE_PACK_EXPANSION) \
TREE_TYPE (NODE) = VALUE; \
else \
TREE_OPERAND (NODE, 0) = VALUE
/* The list of parameter packs used in the PACK_EXPANSION_* node. The
TREE_VALUE of each TREE_LIST contains the parameter packs. */
#define PACK_EXPANSION_PARAMETER_PACKS(NODE) \
*(TREE_CODE (NODE) == EXPR_PACK_EXPANSION \
? &TREE_OPERAND (NODE, 1) \
: &TYPE_MINVAL (TYPE_PACK_EXPANSION_CHECK (NODE)))
/* Any additional template args to be applied when substituting into
the pattern, set by tsubst_pack_expansion for partial instantiations. */
#define PACK_EXPANSION_EXTRA_ARGS(NODE) \
*(TREE_CODE (NODE) == TYPE_PACK_EXPANSION \
? &TYPE_MAXVAL (NODE) \
: &TREE_OPERAND ((NODE), 2))
/* True iff this pack expansion is within a function context. */
#define PACK_EXPANSION_LOCAL_P(NODE) TREE_LANG_FLAG_0 (NODE)
/* Determine if this is an argument pack. */
#define ARGUMENT_PACK_P(NODE) \
(TREE_CODE (NODE) == TYPE_ARGUMENT_PACK \
|| TREE_CODE (NODE) == NONTYPE_ARGUMENT_PACK)
/* The arguments stored in an argument pack. Arguments are stored in a
TREE_VEC, which may have length zero. */
#define ARGUMENT_PACK_ARGS(NODE) \
(TREE_CODE (NODE) == TYPE_ARGUMENT_PACK? TREE_TYPE (NODE) \
: TREE_OPERAND (NODE, 0))
/* Set the arguments stored in an argument pack. VALUE must be a
TREE_VEC. */
#define SET_ARGUMENT_PACK_ARGS(NODE,VALUE) \
if (TREE_CODE (NODE) == TYPE_ARGUMENT_PACK) \
TREE_TYPE (NODE) = VALUE; \
else \
TREE_OPERAND (NODE, 0) = VALUE
/* Whether the argument pack is "incomplete", meaning that more
arguments can still be deduced. Incomplete argument packs are only
used when the user has provided an explicit template argument list
for a variadic function template. Some of the explicit template
arguments will be placed into the beginning of the argument pack,
but additional arguments might still be deduced. */
#define ARGUMENT_PACK_INCOMPLETE_P(NODE) \
TREE_ADDRESSABLE (ARGUMENT_PACK_ARGS (NODE))
/* When ARGUMENT_PACK_INCOMPLETE_P, stores the explicit template
arguments used to fill this pack. */
#define ARGUMENT_PACK_EXPLICIT_ARGS(NODE) \
TREE_TYPE (ARGUMENT_PACK_ARGS (NODE))
/* In an ARGUMENT_PACK_SELECT, the argument pack from which an
argument will be selected. */
#define ARGUMENT_PACK_SELECT_FROM_PACK(NODE) \
(((struct tree_argument_pack_select *)ARGUMENT_PACK_SELECT_CHECK (NODE))->argument_pack)
/* In an ARGUMENT_PACK_SELECT, the index of the argument we want to
select. */
#define ARGUMENT_PACK_SELECT_INDEX(NODE) \
(((struct tree_argument_pack_select *)ARGUMENT_PACK_SELECT_CHECK (NODE))->index)
/* In an ARGUMENT_PACK_SELECT, the actual underlying argument that the
ARGUMENT_PACK_SELECT represents. */
#define ARGUMENT_PACK_SELECT_ARG(NODE) \
TREE_VEC_ELT (ARGUMENT_PACK_ARGS (ARGUMENT_PACK_SELECT_FROM_PACK (NODE)), \
ARGUMENT_PACK_SELECT_INDEX (NODE));
/* In a FUNCTION_DECL, the saved language-specific per-function data. */
#define DECL_SAVED_FUNCTION_DATA(NODE) \
(LANG_DECL_FN_CHECK (FUNCTION_DECL_CHECK (NODE)) \
->u.saved_language_function)
/* True if NODE is an implicit INDIRECT_EXPR from convert_from_reference. */
#define REFERENCE_REF_P(NODE) \
(INDIRECT_REF_P (NODE) \
&& TREE_TYPE (TREE_OPERAND (NODE, 0)) \
&& (TREE_CODE (TREE_TYPE (TREE_OPERAND ((NODE), 0))) \
== REFERENCE_TYPE))
/* True if NODE is a REFERENCE_TYPE which is OK to instantiate to be a
reference to VLA type, because it's used for VLA capture. */
#define REFERENCE_VLA_OK(NODE) \
(TYPE_LANG_FLAG_5 (REFERENCE_TYPE_CHECK (NODE)))
#define NEW_EXPR_USE_GLOBAL(NODE) \
TREE_LANG_FLAG_0 (NEW_EXPR_CHECK (NODE))
#define DELETE_EXPR_USE_GLOBAL(NODE) \
TREE_LANG_FLAG_0 (DELETE_EXPR_CHECK (NODE))
#define DELETE_EXPR_USE_VEC(NODE) \
TREE_LANG_FLAG_1 (DELETE_EXPR_CHECK (NODE))
/* Indicates that this is a non-dependent COMPOUND_EXPR which will
resolve to a function call. */
#define COMPOUND_EXPR_OVERLOADED(NODE) \
TREE_LANG_FLAG_0 (COMPOUND_EXPR_CHECK (NODE))
/* In a CALL_EXPR appearing in a template, true if Koenig lookup
should be performed at instantiation time. */
#define KOENIG_LOOKUP_P(NODE) TREE_LANG_FLAG_0 (CALL_EXPR_CHECK (NODE))
/* True if CALL_EXPR expresses list-initialization of an object. */
#define CALL_EXPR_LIST_INIT_P(NODE) \
TREE_LANG_FLAG_3 (TREE_CHECK2 ((NODE),CALL_EXPR,AGGR_INIT_EXPR))
/* Indicates whether a string literal has been parenthesized. Such
usages are disallowed in certain circumstances. */
#define PAREN_STRING_LITERAL_P(NODE) \
TREE_LANG_FLAG_0 (STRING_CST_CHECK (NODE))
/* Indicates whether a COMPONENT_REF has been parenthesized, or an
INDIRECT_REF comes from parenthesizing a VAR_DECL. Currently only set
some of the time in C++14 mode. */
#define REF_PARENTHESIZED_P(NODE) \
TREE_LANG_FLAG_2 (TREE_CHECK2 ((NODE), COMPONENT_REF, INDIRECT_REF))
/* Nonzero if this AGGR_INIT_EXPR provides for initialization via a
constructor call, rather than an ordinary function call. */
#define AGGR_INIT_VIA_CTOR_P(NODE) \
TREE_LANG_FLAG_0 (AGGR_INIT_EXPR_CHECK (NODE))
/* Nonzero if expanding this AGGR_INIT_EXPR should first zero-initialize
the object. */
#define AGGR_INIT_ZERO_FIRST(NODE) \
TREE_LANG_FLAG_2 (AGGR_INIT_EXPR_CHECK (NODE))
/* AGGR_INIT_EXPR accessors. These are equivalent to the CALL_EXPR
accessors, except for AGGR_INIT_EXPR_SLOT (which takes the place of
CALL_EXPR_STATIC_CHAIN). */
#define AGGR_INIT_EXPR_FN(NODE) TREE_OPERAND (AGGR_INIT_EXPR_CHECK (NODE), 1)
#define AGGR_INIT_EXPR_SLOT(NODE) \
TREE_OPERAND (AGGR_INIT_EXPR_CHECK (NODE), 2)
#define AGGR_INIT_EXPR_ARG(NODE, I) \
TREE_OPERAND (AGGR_INIT_EXPR_CHECK (NODE), (I) + 3)
#define aggr_init_expr_nargs(NODE) (VL_EXP_OPERAND_LENGTH(NODE) - 3)
/* AGGR_INIT_EXPR_ARGP returns a pointer to the argument vector for NODE.
We can't use &AGGR_INIT_EXPR_ARG (NODE, 0) because that will complain if
the argument count is zero when checking is enabled. Instead, do
the pointer arithmetic to advance past the 3 fixed operands in a
AGGR_INIT_EXPR. That produces a valid pointer to just past the end of
the operand array, even if it's not valid to dereference it. */
#define AGGR_INIT_EXPR_ARGP(NODE) \
(&(TREE_OPERAND (AGGR_INIT_EXPR_CHECK (NODE), 0)) + 3)
/* Abstract iterators for AGGR_INIT_EXPRs. */
/* Structure containing iterator state. */
typedef struct aggr_init_expr_arg_iterator_d {
tree t; /* the aggr_init_expr */
int n; /* argument count */
int i; /* next argument index */
} aggr_init_expr_arg_iterator;
/* Initialize the abstract argument list iterator object ITER with the
arguments from AGGR_INIT_EXPR node EXP. */
inline void
init_aggr_init_expr_arg_iterator (tree exp,
aggr_init_expr_arg_iterator *iter)
{
iter->t = exp;
iter->n = aggr_init_expr_nargs (exp);
iter->i = 0;
}
/* Return the next argument from abstract argument list iterator object ITER,
and advance its state. Return NULL_TREE if there are no more arguments. */
inline tree
next_aggr_init_expr_arg (aggr_init_expr_arg_iterator *iter)
{
tree result;
if (iter->i >= iter->n)
return NULL_TREE;
result = AGGR_INIT_EXPR_ARG (iter->t, iter->i);
iter->i++;
return result;
}
/* Initialize the abstract argument list iterator object ITER, then advance
past and return the first argument. Useful in for expressions, e.g.
for (arg = first_aggr_init_expr_arg (exp, &iter); arg;
arg = next_aggr_init_expr_arg (&iter)) */
inline tree
first_aggr_init_expr_arg (tree exp, aggr_init_expr_arg_iterator *iter)
{
init_aggr_init_expr_arg_iterator (exp, iter);
return next_aggr_init_expr_arg (iter);
}
/* Test whether there are more arguments in abstract argument list iterator
ITER, without changing its state. */
inline bool
more_aggr_init_expr_args_p (const aggr_init_expr_arg_iterator *iter)
{
return (iter->i < iter->n);
}
/* Iterate through each argument ARG of AGGR_INIT_EXPR CALL, using variable
ITER (of type aggr_init_expr_arg_iterator) to hold the iteration state. */
#define FOR_EACH_AGGR_INIT_EXPR_ARG(arg, iter, call) \
for ((arg) = first_aggr_init_expr_arg ((call), &(iter)); (arg); \
(arg) = next_aggr_init_expr_arg (&(iter)))
/* VEC_INIT_EXPR accessors. */
#define VEC_INIT_EXPR_SLOT(NODE) TREE_OPERAND (VEC_INIT_EXPR_CHECK (NODE), 0)
#define VEC_INIT_EXPR_INIT(NODE) TREE_OPERAND (VEC_INIT_EXPR_CHECK (NODE), 1)
/* Indicates that a VEC_INIT_EXPR is a potential constant expression.
Only set when the current function is constexpr. */
#define VEC_INIT_EXPR_IS_CONSTEXPR(NODE) \
TREE_LANG_FLAG_0 (VEC_INIT_EXPR_CHECK (NODE))
/* Indicates that a VEC_INIT_EXPR is expressing value-initialization. */
#define VEC_INIT_EXPR_VALUE_INIT(NODE) \
TREE_LANG_FLAG_1 (VEC_INIT_EXPR_CHECK (NODE))
/* The condition under which this MUST_NOT_THROW_EXPR actually blocks
exceptions. NULL_TREE means 'true'. */
#define MUST_NOT_THROW_COND(NODE) \
TREE_OPERAND (MUST_NOT_THROW_EXPR_CHECK (NODE), 1)
/* The TYPE_MAIN_DECL for a class template type is a TYPE_DECL, not a
TEMPLATE_DECL. This macro determines whether or not a given class
type is really a template type, as opposed to an instantiation or
specialization of one. */
#define CLASSTYPE_IS_TEMPLATE(NODE) \
(CLASSTYPE_TEMPLATE_INFO (NODE) \
&& !CLASSTYPE_USE_TEMPLATE (NODE) \
&& PRIMARY_TEMPLATE_P (CLASSTYPE_TI_TEMPLATE (NODE)))
/* The name used by the user to name the typename type. Typically,
this is an IDENTIFIER_NODE, and the same as the DECL_NAME on the
corresponding TYPE_DECL. However, this may also be a
TEMPLATE_ID_EXPR if we had something like `typename X::Y<T>'. */
#define TYPENAME_TYPE_FULLNAME(NODE) \
(TYPE_VALUES_RAW (TYPENAME_TYPE_CHECK (NODE)))
/* True if a TYPENAME_TYPE was declared as an "enum". */
#define TYPENAME_IS_ENUM_P(NODE) \
(TREE_LANG_FLAG_0 (TYPENAME_TYPE_CHECK (NODE)))
/* True if a TYPENAME_TYPE was declared as a "class", "struct", or
"union". */
#define TYPENAME_IS_CLASS_P(NODE) \
(TREE_LANG_FLAG_1 (TYPENAME_TYPE_CHECK (NODE)))
/* True if a TYPENAME_TYPE is in the process of being resolved. */
#define TYPENAME_IS_RESOLVING_P(NODE) \
(TREE_LANG_FLAG_2 (TYPENAME_TYPE_CHECK (NODE)))
/* [class.virtual]
A class that declares or inherits a virtual function is called a
polymorphic class. */
#define TYPE_POLYMORPHIC_P(NODE) (TREE_LANG_FLAG_2 (NODE))
/* Nonzero if this class has a virtual function table pointer. */
#define TYPE_CONTAINS_VPTR_P(NODE) \
(TYPE_POLYMORPHIC_P (NODE) || CLASSTYPE_VBASECLASSES (NODE))
/* This flag is true of a local VAR_DECL if it was declared in a for
statement, but we are no longer in the scope of the for. */
#define DECL_DEAD_FOR_LOCAL(NODE) DECL_LANG_FLAG_7 (VAR_DECL_CHECK (NODE))
/* This flag is set on a VAR_DECL that is a DECL_DEAD_FOR_LOCAL
if we already emitted a warning about using it. */
#define DECL_ERROR_REPORTED(NODE) DECL_LANG_FLAG_0 (VAR_DECL_CHECK (NODE))
/* Nonzero if NODE is a FUNCTION_DECL (for a function with global
scope) declared in a local scope. */
#define DECL_LOCAL_FUNCTION_P(NODE) \
DECL_LANG_FLAG_0 (FUNCTION_DECL_CHECK (NODE))
/* True if NODE was declared with auto in its return type, but it has
started compilation and so the return type might have been changed by
return type deduction; its declared return type should be found in
DECL_STRUCT_FUNCTION(NODE)->language->x_auto_return_pattern. */
#define FNDECL_USED_AUTO(NODE) \
TREE_LANG_FLAG_2 (FUNCTION_DECL_CHECK (NODE))
/* Nonzero if NODE is a DECL which we know about but which has not
been explicitly declared, such as a built-in function or a friend
declared inside a class. In the latter case DECL_HIDDEN_FRIEND_P
will be set. */
#define DECL_ANTICIPATED(NODE) \
(DECL_LANG_SPECIFIC (TYPE_FUNCTION_OR_TEMPLATE_DECL_CHECK (NODE)) \
->u.base.anticipated_p)
/* Nonzero if NODE is a FUNCTION_DECL which was declared as a friend
within a class but has not been declared in the surrounding scope.
The function is invisible except via argument dependent lookup. */
#define DECL_HIDDEN_FRIEND_P(NODE) \
(LANG_DECL_FN_CHECK (DECL_COMMON_CHECK (NODE))->hidden_friend_p)
/* Nonzero if NODE is an artificial FUNCTION_DECL for
#pragma omp declare reduction. */
#define DECL_OMP_DECLARE_REDUCTION_P(NODE) \
(LANG_DECL_FN_CHECK (DECL_COMMON_CHECK (NODE))->omp_declare_reduction_p)
/* Nonzero if DECL has been declared threadprivate by
#pragma omp threadprivate. */
#define CP_DECL_THREADPRIVATE_P(DECL) \
(DECL_LANG_SPECIFIC (VAR_DECL_CHECK (DECL))->u.base.threadprivate_or_deleted_p)
/* Nonzero if DECL was declared with '= delete'. */
#define DECL_DELETED_FN(DECL) \
(LANG_DECL_FN_CHECK (DECL)->min.base.threadprivate_or_deleted_p)
/* Nonzero if DECL was declared with '= default' (maybe implicitly). */
#define DECL_DEFAULTED_FN(DECL) \
(LANG_DECL_FN_CHECK (DECL)->defaulted_p)
/* Nonzero if DECL is explicitly defaulted in the class body. */
#define DECL_DEFAULTED_IN_CLASS_P(DECL) \
(DECL_DEFAULTED_FN (DECL) && DECL_INITIALIZED_IN_CLASS_P (DECL))
/* Nonzero if DECL was defaulted outside the class body. */
#define DECL_DEFAULTED_OUTSIDE_CLASS_P(DECL) \
(DECL_DEFAULTED_FN (DECL) \
&& !(DECL_ARTIFICIAL (DECL) || DECL_INITIALIZED_IN_CLASS_P (DECL)))
/* Record whether a typedef for type `int' was actually `signed int'. */
#define C_TYPEDEF_EXPLICITLY_SIGNED(EXP) DECL_LANG_FLAG_1 (EXP)
/* Returns nonzero if DECL has external linkage, as specified by the
language standard. (This predicate may hold even when the
corresponding entity is not actually given external linkage in the
object file; see decl_linkage for details.) */
#define DECL_EXTERNAL_LINKAGE_P(DECL) \
(decl_linkage (DECL) == lk_external)
/* Keep these codes in ascending code order. */
#define INTEGRAL_CODE_P(CODE) \
((CODE) == ENUMERAL_TYPE \
|| (CODE) == BOOLEAN_TYPE \
|| (CODE) == INTEGER_TYPE)
/* [basic.fundamental]
Types bool, char, wchar_t, and the signed and unsigned integer types
are collectively called integral types.
Note that INTEGRAL_TYPE_P, as defined in tree.h, allows enumeration
types as well, which is incorrect in C++. Keep these checks in
ascending code order. */
#define CP_INTEGRAL_TYPE_P(TYPE) \
(TREE_CODE (TYPE) == BOOLEAN_TYPE \
|| TREE_CODE (TYPE) == INTEGER_TYPE)
/* Returns true if TYPE is an integral or enumeration name. Keep
these checks in ascending code order. */
#define INTEGRAL_OR_ENUMERATION_TYPE_P(TYPE) \
(TREE_CODE (TYPE) == ENUMERAL_TYPE || CP_INTEGRAL_TYPE_P (TYPE))
/* Returns true if TYPE is an integral or unscoped enumeration type. */
#define INTEGRAL_OR_UNSCOPED_ENUMERATION_TYPE_P(TYPE) \
(UNSCOPED_ENUM_P (TYPE) || CP_INTEGRAL_TYPE_P (TYPE))
/* True if the class type TYPE is a literal type. */
#define CLASSTYPE_LITERAL_P(TYPE) \
(LANG_TYPE_CLASS_CHECK (TYPE)->is_literal)
/* [basic.fundamental]
Integral and floating types are collectively called arithmetic
types.
As a GNU extension, we also accept complex types.
Keep these checks in ascending code order. */
#define ARITHMETIC_TYPE_P(TYPE) \
(CP_INTEGRAL_TYPE_P (TYPE) \
|| TREE_CODE (TYPE) == REAL_TYPE \
|| TREE_CODE (TYPE) == COMPLEX_TYPE)
/* True iff TYPE is cv decltype(nullptr). */
#define NULLPTR_TYPE_P(TYPE) (TREE_CODE (TYPE) == NULLPTR_TYPE)
/* [basic.types]
Arithmetic types, enumeration types, pointer types,
pointer-to-member types, and std::nullptr_t are collectively called
scalar types.
Keep these checks in ascending code order. */
#define SCALAR_TYPE_P(TYPE) \
(TYPE_PTRDATAMEM_P (TYPE) \
|| TREE_CODE (TYPE) == ENUMERAL_TYPE \
|| ARITHMETIC_TYPE_P (TYPE) \
|| TYPE_PTR_P (TYPE) \
|| TYPE_PTRMEMFUNC_P (TYPE) \
|| NULLPTR_TYPE_P (TYPE))
/* Determines whether this type is a C++0x scoped enumeration
type. Scoped enumerations types are introduced via "enum class" or
"enum struct", e.g.,
enum class Color {
Red, Green, Blue
};
Scoped enumeration types are different from normal (unscoped)
enumeration types in several ways:
- The enumerators of a scoped enumeration type are only available
within the scope of the enumeration type and not in the
enclosing scope. For example, the Red color can be referred to
with "Color::Red" but not "Red".
- Scoped enumerators and enumerations do not implicitly convert
to integers or 'bool'.
- The underlying type of the enum is well-defined. */
#define SCOPED_ENUM_P(TYPE) \
(TREE_CODE (TYPE) == ENUMERAL_TYPE && ENUM_IS_SCOPED (TYPE))
/* Determine whether this is an unscoped enumeration type. */
#define UNSCOPED_ENUM_P(TYPE) \
(TREE_CODE (TYPE) == ENUMERAL_TYPE && !ENUM_IS_SCOPED (TYPE))
/* Set the flag indicating whether an ENUMERAL_TYPE is a C++0x scoped
enumeration type (1) or a normal (unscoped) enumeration type
(0). */
#define SET_SCOPED_ENUM_P(TYPE, VAL) \
(ENUM_IS_SCOPED (TYPE) = (VAL))
#define SET_OPAQUE_ENUM_P(TYPE, VAL) \
(ENUM_IS_OPAQUE (TYPE) = (VAL))
#define OPAQUE_ENUM_P(TYPE) \
(TREE_CODE (TYPE) == ENUMERAL_TYPE && ENUM_IS_OPAQUE (TYPE))
/* Determines whether an ENUMERAL_TYPE has an explicit
underlying type. */
#define ENUM_FIXED_UNDERLYING_TYPE_P(NODE) (TYPE_LANG_FLAG_5 (NODE))
/* Returns the underlying type of the given enumeration type. The
underlying type is determined in different ways, depending on the
properties of the enum:
- In C++0x, the underlying type can be explicitly specified, e.g.,
enum E1 : char { ... } // underlying type is char
- In a C++0x scoped enumeration, the underlying type is int
unless otherwises specified:
enum class E2 { ... } // underlying type is int
- Otherwise, the underlying type is determined based on the
values of the enumerators. In this case, the
ENUM_UNDERLYING_TYPE will not be set until after the definition
of the enumeration is completed by finish_enum. */
#define ENUM_UNDERLYING_TYPE(TYPE) \
TREE_TYPE (ENUMERAL_TYPE_CHECK (TYPE))
/* [dcl.init.aggr]
An aggregate is an array or a class with no user-provided
constructors, no brace-or-equal-initializers for non-static data
members, no private or protected non-static data members, no
base classes, and no virtual functions.
As an extension, we also treat vectors as aggregates. Keep these
checks in ascending code order. */
#define CP_AGGREGATE_TYPE_P(TYPE) \
(TREE_CODE (TYPE) == VECTOR_TYPE \
||TREE_CODE (TYPE) == ARRAY_TYPE \
|| (CLASS_TYPE_P (TYPE) && !CLASSTYPE_NON_AGGREGATE (TYPE)))
/* Nonzero for a class type means that the class type has a
user-declared constructor. */
#define TYPE_HAS_USER_CONSTRUCTOR(NODE) (TYPE_LANG_FLAG_1 (NODE))
/* When appearing in an INDIRECT_REF, it means that the tree structure
underneath is actually a call to a constructor. This is needed
when the constructor must initialize local storage (which can
be automatically destroyed), rather than allowing it to allocate
space from the heap.
When appearing in a SAVE_EXPR, it means that underneath
is a call to a constructor.
When appearing in a CONSTRUCTOR, the expression is a
compound literal.
When appearing in a FIELD_DECL, it means that this field
has been duly initialized in its constructor. */
#define TREE_HAS_CONSTRUCTOR(NODE) (TREE_LANG_FLAG_4 (NODE))
/* True if NODE is a brace-enclosed initializer. */
#define BRACE_ENCLOSED_INITIALIZER_P(NODE) \
(TREE_CODE (NODE) == CONSTRUCTOR && TREE_TYPE (NODE) == init_list_type_node)
/* True if NODE is a compound-literal, i.e., a brace-enclosed
initializer cast to a particular type. */
#define COMPOUND_LITERAL_P(NODE) \
(TREE_CODE (NODE) == CONSTRUCTOR && TREE_HAS_CONSTRUCTOR (NODE))
#define EMPTY_CONSTRUCTOR_P(NODE) (TREE_CODE (NODE) == CONSTRUCTOR \
&& vec_safe_is_empty(CONSTRUCTOR_ELTS(NODE))\
&& !TREE_HAS_CONSTRUCTOR (NODE))
/* True if NODE is a init-list used as a direct-initializer, i.e.
B b{1,2}, not B b({1,2}) or B b = {1,2}. */
#define CONSTRUCTOR_IS_DIRECT_INIT(NODE) (TREE_LANG_FLAG_0 (CONSTRUCTOR_CHECK (NODE)))
#define DIRECT_LIST_INIT_P(NODE) \
(BRACE_ENCLOSED_INITIALIZER_P (NODE) && CONSTRUCTOR_IS_DIRECT_INIT (NODE))
/* True if NODE represents a conversion for direct-initialization in a
template. Set by perform_implicit_conversion_flags. */
#define IMPLICIT_CONV_EXPR_DIRECT_INIT(NODE) \
(TREE_LANG_FLAG_0 (IMPLICIT_CONV_EXPR_CHECK (NODE)))
/* Nonzero means that an object of this type can not be initialized using
an initializer list. */
#define CLASSTYPE_NON_AGGREGATE(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->non_aggregate)
#define TYPE_NON_AGGREGATE_CLASS(NODE) \
(CLASS_TYPE_P (NODE) && CLASSTYPE_NON_AGGREGATE (NODE))
/* Nonzero if there is a non-trivial X::op=(cv X&) for this class. */
#define TYPE_HAS_COMPLEX_COPY_ASSIGN(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_complex_copy_assign)
/* Nonzero if there is a non-trivial X::X(cv X&) for this class. */
#define TYPE_HAS_COMPLEX_COPY_CTOR(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_complex_copy_ctor)
/* Nonzero if there is a non-trivial X::op=(X&&) for this class. */
#define TYPE_HAS_COMPLEX_MOVE_ASSIGN(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_complex_move_assign)
/* Nonzero if there is a non-trivial X::X(X&&) for this class. */
#define TYPE_HAS_COMPLEX_MOVE_CTOR(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_complex_move_ctor)
/* Nonzero if there is a non-trivial default constructor for this class. */
#define TYPE_HAS_COMPLEX_DFLT(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_complex_dflt)
/* Nonzero if TYPE has a trivial destructor. From [class.dtor]:
A destructor is trivial if it is an implicitly declared
destructor and if:
- all of the direct base classes of its class have trivial
destructors,
- for all of the non-static data members of its class that are
of class type (or array thereof), each such class has a
trivial destructor. */
#define TYPE_HAS_TRIVIAL_DESTRUCTOR(NODE) \
(!TYPE_HAS_NONTRIVIAL_DESTRUCTOR (NODE))
/* Nonzero for _TYPE node means that this type does not have a trivial
destructor. Therefore, destroying an object of this type will
involve a call to a destructor. This can apply to objects of
ARRAY_TYPE is the type of the elements needs a destructor. */
#define TYPE_HAS_NONTRIVIAL_DESTRUCTOR(NODE) \
(TYPE_LANG_FLAG_4 (NODE))
/* Nonzero for class type means that the default constructor is trivial. */
#define TYPE_HAS_TRIVIAL_DFLT(NODE) \
(TYPE_HAS_DEFAULT_CONSTRUCTOR (NODE) && ! TYPE_HAS_COMPLEX_DFLT (NODE))
/* Nonzero for class type means that copy initialization of this type can use
a bitwise copy. */
#define TYPE_HAS_TRIVIAL_COPY_CTOR(NODE) \
(TYPE_HAS_COPY_CTOR (NODE) && ! TYPE_HAS_COMPLEX_COPY_CTOR (NODE))
/* Nonzero for class type means that assignment of this type can use
a bitwise copy. */
#define TYPE_HAS_TRIVIAL_COPY_ASSIGN(NODE) \
(TYPE_HAS_COPY_ASSIGN (NODE) && ! TYPE_HAS_COMPLEX_COPY_ASSIGN (NODE))
/* Returns true if NODE is a pointer-to-data-member. */
#define TYPE_PTRDATAMEM_P(NODE) \
(TREE_CODE (NODE) == OFFSET_TYPE)
/* Returns true if NODE is a pointer. */
#define TYPE_PTR_P(NODE) \
(TREE_CODE (NODE) == POINTER_TYPE)
/* Returns true if NODE is an object type:
[basic.types]
An object type is a (possibly cv-qualified) type that is not a
function type, not a reference type, and not a void type.
Keep these checks in ascending order, for speed. */
#define TYPE_OBJ_P(NODE) \
(TREE_CODE (NODE) != REFERENCE_TYPE \
&& !VOID_TYPE_P (NODE) \
&& TREE_CODE (NODE) != FUNCTION_TYPE \
&& TREE_CODE (NODE) != METHOD_TYPE)
/* Returns true if NODE is a pointer to an object. Keep these checks
in ascending tree code order. */
#define TYPE_PTROB_P(NODE) \
(TYPE_PTR_P (NODE) && TYPE_OBJ_P (TREE_TYPE (NODE)))
/* Returns true if NODE is a reference to an object. Keep these checks
in ascending tree code order. */
#define TYPE_REF_OBJ_P(NODE) \
(TREE_CODE (NODE) == REFERENCE_TYPE && TYPE_OBJ_P (TREE_TYPE (NODE)))
/* Returns true if NODE is a pointer to an object, or a pointer to
void. Keep these checks in ascending tree code order. */
#define TYPE_PTROBV_P(NODE) \
(TYPE_PTR_P (NODE) \
&& !(TREE_CODE (TREE_TYPE (NODE)) == FUNCTION_TYPE \
|| TREE_CODE (TREE_TYPE (NODE)) == METHOD_TYPE))
/* Returns true if NODE is a pointer to function. */
#define TYPE_PTRFN_P(NODE) \
(TYPE_PTR_P (NODE) \
&& TREE_CODE (TREE_TYPE (NODE)) == FUNCTION_TYPE)
/* Returns true if NODE is a reference to function. */
#define TYPE_REFFN_P(NODE) \
(TREE_CODE (NODE) == REFERENCE_TYPE \
&& TREE_CODE (TREE_TYPE (NODE)) == FUNCTION_TYPE)
/* Nonzero for _TYPE node means that this type is a pointer to member
function type. */
#define TYPE_PTRMEMFUNC_P(NODE) \
(TREE_CODE (NODE) == RECORD_TYPE \
&& TYPE_LANG_SPECIFIC (NODE) \
&& TYPE_PTRMEMFUNC_FLAG (NODE))
#define TYPE_PTRMEMFUNC_FLAG(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->ptrmemfunc_flag)
/* Returns true if NODE is a pointer-to-member. */
#define TYPE_PTRMEM_P(NODE) \
(TYPE_PTRDATAMEM_P (NODE) || TYPE_PTRMEMFUNC_P (NODE))
/* Returns true if NODE is a pointer or a pointer-to-member. */
#define TYPE_PTR_OR_PTRMEM_P(NODE) \
(TYPE_PTR_P (NODE) || TYPE_PTRMEM_P (NODE))
/* Indicates when overload resolution may resolve to a pointer to
member function. [expr.unary.op]/3 */
#define PTRMEM_OK_P(NODE) \
TREE_LANG_FLAG_0 (TREE_CHECK3 ((NODE), ADDR_EXPR, OFFSET_REF, SCOPE_REF))
/* Get the POINTER_TYPE to the METHOD_TYPE associated with this
pointer to member function. TYPE_PTRMEMFUNC_P _must_ be true,
before using this macro. */
#define TYPE_PTRMEMFUNC_FN_TYPE(NODE) \
(TREE_TYPE (TYPE_FIELDS (NODE)))
/* Returns `A' for a type like `int (A::*)(double)' */
#define TYPE_PTRMEMFUNC_OBJECT_TYPE(NODE) \
TYPE_METHOD_BASETYPE (TREE_TYPE (TYPE_PTRMEMFUNC_FN_TYPE (NODE)))
/* These are use to manipulate the canonical RECORD_TYPE from the
hashed POINTER_TYPE, and can only be used on the POINTER_TYPE. */
#define TYPE_GET_PTRMEMFUNC_TYPE(NODE) \
(TYPE_LANG_SPECIFIC (NODE) ? LANG_TYPE_PTRMEM_CHECK (NODE)->record : NULL)
#define TYPE_SET_PTRMEMFUNC_TYPE(NODE, VALUE) \
do { \
if (TYPE_LANG_SPECIFIC (NODE) == NULL) \
{ \
TYPE_LANG_SPECIFIC (NODE) = ggc_alloc_cleared_lang_type \
(sizeof (struct lang_type_ptrmem)); \
TYPE_LANG_SPECIFIC (NODE)->u.ptrmem.h.is_lang_type_class = 0; \
} \
TYPE_LANG_SPECIFIC (NODE)->u.ptrmem.record = (VALUE); \
} while (0)
/* For a pointer-to-member type of the form `T X::*', this is `X'.
For a type like `void (X::*)() const', this type is `X', not `const
X'. To get at the `const X' you have to look at the
TYPE_PTRMEM_POINTED_TO_TYPE; there, the first parameter will have
type `const X*'. */
#define TYPE_PTRMEM_CLASS_TYPE(NODE) \
(TYPE_PTRDATAMEM_P (NODE) \
? TYPE_OFFSET_BASETYPE (NODE) \
: TYPE_PTRMEMFUNC_OBJECT_TYPE (NODE))
/* For a pointer-to-member type of the form `T X::*', this is `T'. */
#define TYPE_PTRMEM_POINTED_TO_TYPE(NODE) \
(TYPE_PTRDATAMEM_P (NODE) \
? TREE_TYPE (NODE) \
: TREE_TYPE (TYPE_PTRMEMFUNC_FN_TYPE (NODE)))
/* For a pointer-to-member constant `X::Y' this is the RECORD_TYPE for
`X'. */
#define PTRMEM_CST_CLASS(NODE) \
TYPE_PTRMEM_CLASS_TYPE (TREE_TYPE (PTRMEM_CST_CHECK (NODE)))
/* For a pointer-to-member constant `X::Y' this is the _DECL for
`Y'. */
#define PTRMEM_CST_MEMBER(NODE) (((ptrmem_cst_t)PTRMEM_CST_CHECK (NODE))->member)
/* The expression in question for a TYPEOF_TYPE. */
#define TYPEOF_TYPE_EXPR(NODE) (TYPE_VALUES_RAW (TYPEOF_TYPE_CHECK (NODE)))
/* The type in question for an UNDERLYING_TYPE. */
#define UNDERLYING_TYPE_TYPE(NODE) \
(TYPE_VALUES_RAW (UNDERLYING_TYPE_CHECK (NODE)))
/* The type in question for BASES. */
#define BASES_TYPE(NODE) \
(TYPE_VALUES_RAW (BASES_CHECK (NODE)))
#define BASES_DIRECT(NODE) \
TREE_LANG_FLAG_0 (BASES_CHECK (NODE))
/* The expression in question for a DECLTYPE_TYPE. */
#define DECLTYPE_TYPE_EXPR(NODE) (TYPE_VALUES_RAW (DECLTYPE_TYPE_CHECK (NODE)))
/* Whether the DECLTYPE_TYPE_EXPR of NODE was originally parsed as an
id-expression or a member-access expression. When false, it was
parsed as a full expression. */
#define DECLTYPE_TYPE_ID_EXPR_OR_MEMBER_ACCESS_P(NODE) \
(DECLTYPE_TYPE_CHECK (NODE))->type_common.string_flag
/* These flags indicate that we want different semantics from normal
decltype: lambda capture just drops references, init capture
uses auto semantics, lambda proxies look through implicit dereference. */
#define DECLTYPE_FOR_LAMBDA_CAPTURE(NODE) \
TREE_LANG_FLAG_0 (DECLTYPE_TYPE_CHECK (NODE))
#define DECLTYPE_FOR_INIT_CAPTURE(NODE) \
TREE_LANG_FLAG_1 (DECLTYPE_TYPE_CHECK (NODE))
#define DECLTYPE_FOR_LAMBDA_PROXY(NODE) \
TREE_LANG_FLAG_2 (DECLTYPE_TYPE_CHECK (NODE))
/* Nonzero for VAR_DECL and FUNCTION_DECL node means that `extern' was
specified in its declaration. This can also be set for an
erroneously declared PARM_DECL. */
#define DECL_THIS_EXTERN(NODE) \
DECL_LANG_FLAG_2 (VAR_FUNCTION_OR_PARM_DECL_CHECK (NODE))
/* Nonzero for VAR_DECL and FUNCTION_DECL node means that `static' was
specified in its declaration. This can also be set for an
erroneously declared PARM_DECL. */
#define DECL_THIS_STATIC(NODE) \
DECL_LANG_FLAG_6 (VAR_FUNCTION_OR_PARM_DECL_CHECK (NODE))
/* Nonzero for FIELD_DECL node means that this field is a lambda capture
field for an array of runtime bound. */
#define DECL_VLA_CAPTURE_P(NODE) \
DECL_LANG_FLAG_1 (FIELD_DECL_CHECK (NODE))
/* Nonzero for FIELD_DECL node means that this field is a base class
of the parent object, as opposed to a member field. */
#define DECL_FIELD_IS_BASE(NODE) \
DECL_LANG_FLAG_6 (FIELD_DECL_CHECK (NODE))
/* Nonzero for FIELD_DECL node means that this field is a simple (no
explicit initializer) lambda capture field, making it invisible to
name lookup in unevaluated contexts. */
#define DECL_NORMAL_CAPTURE_P(NODE) \
DECL_LANG_FLAG_7 (FIELD_DECL_CHECK (NODE))
/* Nonzero if TYPE is an anonymous union or struct type. We have to use a
flag for this because "A union for which objects or pointers are
declared is not an anonymous union" [class.union]. */
#define ANON_AGGR_TYPE_P(NODE) \
(CLASS_TYPE_P (NODE) && LANG_TYPE_CLASS_CHECK (NODE)->anon_aggr)
#define SET_ANON_AGGR_TYPE_P(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->anon_aggr = 1)
/* Nonzero if TYPE is an anonymous union type. */
#define ANON_UNION_TYPE_P(NODE) \
(TREE_CODE (NODE) == UNION_TYPE && ANON_AGGR_TYPE_P (NODE))
/* Define fields and accessors for nodes representing declared names. */
#define TYPE_WAS_ANONYMOUS(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->was_anonymous)
/* C++: all of these are overloaded! These apply only to TYPE_DECLs. */
/* The format of each node in the DECL_FRIENDLIST is as follows:
The TREE_PURPOSE will be the name of a function, i.e., an
IDENTIFIER_NODE. The TREE_VALUE will be itself a TREE_LIST, whose
TREE_VALUEs are friends with the given name. */
#define DECL_FRIENDLIST(NODE) (DECL_INITIAL (NODE))
#define FRIEND_NAME(LIST) (TREE_PURPOSE (LIST))
#define FRIEND_DECLS(LIST) (TREE_VALUE (LIST))
/* The DECL_ACCESS, if non-NULL, is a TREE_LIST. The TREE_PURPOSE of
each node is a type; the TREE_VALUE is the access granted for this
DECL in that type. The DECL_ACCESS is set by access declarations.
For example, if a member that would normally be public in a
derived class is made protected, then the derived class and the
protected_access_node will appear in the DECL_ACCESS for the node. */
#define DECL_ACCESS(NODE) (LANG_DECL_U2_CHECK (NODE, 0)->access)
/* Nonzero if the FUNCTION_DECL is a global constructor. */
#define DECL_GLOBAL_CTOR_P(NODE) \
(LANG_DECL_FN_CHECK (NODE)->global_ctor_p)
/* Nonzero if the FUNCTION_DECL is a global destructor. */
#define DECL_GLOBAL_DTOR_P(NODE) \
(LANG_DECL_FN_CHECK (NODE)->global_dtor_p)
/* Accessor macros for C++ template decl nodes. */
/* The DECL_TEMPLATE_PARMS are a list. The TREE_PURPOSE of each node
is a INT_CST whose TREE_INT_CST_LOW indicates the level of the
template parameters, with 1 being the outermost set of template
parameters. The TREE_VALUE is a vector, whose elements are the
template parameters at each level. Each element in the vector is a
TREE_LIST, whose TREE_VALUE is a PARM_DECL (if the parameter is a
non-type parameter), or a TYPE_DECL (if the parameter is a type
parameter). The TREE_PURPOSE is the default value, if any. The
TEMPLATE_PARM_INDEX for the parameter is available as the
DECL_INITIAL (for a PARM_DECL) or as the TREE_TYPE (for a
TYPE_DECL). */
#define DECL_TEMPLATE_PARMS(NODE) \
TEMPLATE_DECL_CHECK (NODE)->decl_non_common.arguments
#define DECL_INNERMOST_TEMPLATE_PARMS(NODE) \
INNERMOST_TEMPLATE_PARMS (DECL_TEMPLATE_PARMS (NODE))
#define DECL_NTPARMS(NODE) \
TREE_VEC_LENGTH (DECL_INNERMOST_TEMPLATE_PARMS (NODE))
/* For function, method, class-data templates. */
#define DECL_TEMPLATE_RESULT(NODE) \
DECL_RESULT_FLD (TEMPLATE_DECL_CHECK (NODE))
/* For a function template at namespace scope, DECL_TEMPLATE_INSTANTIATIONS
lists all instantiations and specializations of the function so that
tsubst_friend_function can reassign them to another template if we find
that the namespace-scope template is really a partial instantiation of a
friend template.
For a class template the DECL_TEMPLATE_INSTANTIATIONS lists holds
all instantiations and specializations of the class type, including
partial instantiations and partial specializations, so that if we
explicitly specialize a partial instantiation we can walk the list
in maybe_process_partial_specialization and reassign them or complain
as appropriate.
In both cases, the TREE_PURPOSE of each node contains the arguments
used; the TREE_VALUE contains the generated variable. The template
arguments are always complete. For example, given:
template <class T> struct S1 {
template <class U> struct S2 {};
template <class U> struct S2<U*> {};
};
the record for the partial specialization will contain, as its
argument list, { {T}, {U*} }, and will be on the
DECL_TEMPLATE_INSTANTIATIONS list for `template <class T> template
<class U> struct S1<T>::S2'.
This list is not used for other templates. */
#define DECL_TEMPLATE_INSTANTIATIONS(NODE) \
DECL_VINDEX (TEMPLATE_DECL_CHECK (NODE))
/* For a class template, this list contains the partial
specializations of this template. (Full specializations are not
recorded on this list.) The TREE_PURPOSE holds the arguments used
in the partial specialization (e.g., for `template <class T> struct
S<T*, int>' this will be `T*, int'.) The arguments will also include
any outer template arguments. The TREE_VALUE holds the TEMPLATE_DECL
for the partial specialization. The TREE_TYPE is the _TYPE node for
the partial specialization.
This list is not used for other templates. */
#define DECL_TEMPLATE_SPECIALIZATIONS(NODE) \
DECL_SIZE (TEMPLATE_DECL_CHECK (NODE))
/* Nonzero for a DECL which is actually a template parameter. Keep
these checks in ascending tree code order. */
#define DECL_TEMPLATE_PARM_P(NODE) \
(DECL_LANG_FLAG_0 (NODE) \
&& (TREE_CODE (NODE) == CONST_DECL \
|| TREE_CODE (NODE) == PARM_DECL \
|| TREE_CODE (NODE) == TYPE_DECL \
|| TREE_CODE (NODE) == TEMPLATE_DECL))
/* Mark NODE as a template parameter. */
#define SET_DECL_TEMPLATE_PARM_P(NODE) \
(DECL_LANG_FLAG_0 (NODE) = 1)
/* Nonzero if NODE is a template template parameter. */
#define DECL_TEMPLATE_TEMPLATE_PARM_P(NODE) \
(TREE_CODE (NODE) == TEMPLATE_DECL && DECL_TEMPLATE_PARM_P (NODE))
/* Nonzero for a DECL that represents a function template. */
#define DECL_FUNCTION_TEMPLATE_P(NODE) \
(TREE_CODE (NODE) == TEMPLATE_DECL \
&& DECL_TEMPLATE_RESULT (NODE) != NULL_TREE \
&& TREE_CODE (DECL_TEMPLATE_RESULT (NODE)) == FUNCTION_DECL)
/* Nonzero for a DECL that represents a class template or alias
template. */
#define DECL_TYPE_TEMPLATE_P(NODE) \
(TREE_CODE (NODE) == TEMPLATE_DECL \
&& DECL_TEMPLATE_RESULT (NODE) != NULL_TREE \
&& TREE_CODE (DECL_TEMPLATE_RESULT (NODE)) == TYPE_DECL)
/* Nonzero for a DECL that represents a class template. */
#define DECL_CLASS_TEMPLATE_P(NODE) \
(DECL_TYPE_TEMPLATE_P (NODE) \
&& DECL_IMPLICIT_TYPEDEF_P (DECL_TEMPLATE_RESULT (NODE)))
/* Nonzero for a TEMPLATE_DECL that represents an alias template. */
#define DECL_ALIAS_TEMPLATE_P(NODE) \
(DECL_TYPE_TEMPLATE_P (NODE) \
&& !DECL_ARTIFICIAL (DECL_TEMPLATE_RESULT (NODE)))
/* Nonzero for a NODE which declares a type. */
#define DECL_DECLARES_TYPE_P(NODE) \
(TREE_CODE (NODE) == TYPE_DECL || DECL_TYPE_TEMPLATE_P (NODE))
/* Nonzero if NODE declares a function. */
#define DECL_DECLARES_FUNCTION_P(NODE) \
(TREE_CODE (NODE) == FUNCTION_DECL || DECL_FUNCTION_TEMPLATE_P (NODE))
/* Nonzero if NODE is the typedef implicitly generated for a type when
the type is declared. In C++, `struct S {};' is roughly
equivalent to `struct S {}; typedef struct S S;' in C.
DECL_IMPLICIT_TYPEDEF_P will hold for the typedef indicated in this
example. In C++, there is a second implicit typedef for each
class, in the scope of `S' itself, so that you can say `S::S'.
DECL_SELF_REFERENCE_P will hold for that second typedef. */
#define DECL_IMPLICIT_TYPEDEF_P(NODE) \
(TREE_CODE (NODE) == TYPE_DECL && DECL_LANG_FLAG_2 (NODE))
#define SET_DECL_IMPLICIT_TYPEDEF_P(NODE) \
(DECL_LANG_FLAG_2 (NODE) = 1)
#define DECL_SELF_REFERENCE_P(NODE) \
(TREE_CODE (NODE) == TYPE_DECL && DECL_LANG_FLAG_4 (NODE))
#define SET_DECL_SELF_REFERENCE_P(NODE) \
(DECL_LANG_FLAG_4 (NODE) = 1)
/* A `primary' template is one that has its own template header and is not
a partial specialization. A member function of a class template is a
template, but not primary. A member template is primary. Friend
templates are primary, too. */
/* Returns the primary template corresponding to these parameters. */
#define DECL_PRIMARY_TEMPLATE(NODE) \
(TREE_TYPE (DECL_INNERMOST_TEMPLATE_PARMS (NODE)))
/* Returns nonzero if NODE is a primary template. */
#define PRIMARY_TEMPLATE_P(NODE) (DECL_PRIMARY_TEMPLATE (NODE) == (NODE))
/* Nonzero iff NODE is a specialization of a template. The value
indicates the type of specializations:
1=implicit instantiation
2=partial or explicit specialization, e.g.:
template <> int min<int> (int, int),
3=explicit instantiation, e.g.:
template int min<int> (int, int);
Note that NODE will be marked as a specialization even if the
template it is instantiating is not a primary template. For
example, given:
template <typename T> struct O {
void f();
struct I {};
};
both O<int>::f and O<int>::I will be marked as instantiations.
If DECL_USE_TEMPLATE is nonzero, then DECL_TEMPLATE_INFO will also
be non-NULL. */
#define DECL_USE_TEMPLATE(NODE) (DECL_LANG_SPECIFIC (NODE)->u.base.use_template)
/* Like DECL_USE_TEMPLATE, but for class types. */
#define CLASSTYPE_USE_TEMPLATE(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->use_template)
/* True if NODE is a specialization of a primary template. */
#define CLASSTYPE_SPECIALIZATION_OF_PRIMARY_TEMPLATE_P(NODE) \
(CLASS_TYPE_P (NODE) \
&& CLASSTYPE_USE_TEMPLATE (NODE) \
&& PRIMARY_TEMPLATE_P (CLASSTYPE_TI_TEMPLATE (NODE)))
#define DECL_TEMPLATE_INSTANTIATION(NODE) (DECL_USE_TEMPLATE (NODE) & 1)
#define CLASSTYPE_TEMPLATE_INSTANTIATION(NODE) \
(CLASSTYPE_USE_TEMPLATE (NODE) & 1)
#define DECL_TEMPLATE_SPECIALIZATION(NODE) (DECL_USE_TEMPLATE (NODE) == 2)
#define SET_DECL_TEMPLATE_SPECIALIZATION(NODE) (DECL_USE_TEMPLATE (NODE) = 2)
/* Returns true for an explicit or partial specialization of a class
template. */
#define CLASSTYPE_TEMPLATE_SPECIALIZATION(NODE) \
(CLASSTYPE_USE_TEMPLATE (NODE) == 2)
#define SET_CLASSTYPE_TEMPLATE_SPECIALIZATION(NODE) \
(CLASSTYPE_USE_TEMPLATE (NODE) = 2)
#define DECL_IMPLICIT_INSTANTIATION(NODE) (DECL_USE_TEMPLATE (NODE) == 1)
#define SET_DECL_IMPLICIT_INSTANTIATION(NODE) (DECL_USE_TEMPLATE (NODE) = 1)
#define CLASSTYPE_IMPLICIT_INSTANTIATION(NODE) \
(CLASSTYPE_USE_TEMPLATE (NODE) == 1)
#define SET_CLASSTYPE_IMPLICIT_INSTANTIATION(NODE) \
(CLASSTYPE_USE_TEMPLATE (NODE) = 1)
#define DECL_EXPLICIT_INSTANTIATION(NODE) (DECL_USE_TEMPLATE (NODE) == 3)
#define SET_DECL_EXPLICIT_INSTANTIATION(NODE) (DECL_USE_TEMPLATE (NODE) = 3)
#define CLASSTYPE_EXPLICIT_INSTANTIATION(NODE) \
(CLASSTYPE_USE_TEMPLATE (NODE) == 3)
#define SET_CLASSTYPE_EXPLICIT_INSTANTIATION(NODE) \
(CLASSTYPE_USE_TEMPLATE (NODE) = 3)
/* Nonzero if DECL is a friend function which is an instantiation
from the point of view of the compiler, but not from the point of
view of the language. For example given:
template <class T> struct S { friend void f(T) {}; };
the declaration of `void f(int)' generated when S<int> is
instantiated will not be a DECL_TEMPLATE_INSTANTIATION, but will be
a DECL_FRIEND_PSEUDO_TEMPLATE_INSTANTIATION. */
#define DECL_FRIEND_PSEUDO_TEMPLATE_INSTANTIATION(DECL) \
(DECL_TEMPLATE_INFO (DECL) && !DECL_USE_TEMPLATE (DECL))
/* Nonzero if DECL is a function generated from a function 'temploid',
i.e. template, member of class template, or dependent friend. */
#define DECL_TEMPLOID_INSTANTIATION(DECL) \
(DECL_TEMPLATE_INSTANTIATION (DECL) \
|| DECL_FRIEND_PSEUDO_TEMPLATE_INSTANTIATION (DECL))
/* Nonzero if DECL is either defined implicitly by the compiler or
generated from a temploid. */
#define DECL_GENERATED_P(DECL) \
(DECL_TEMPLOID_INSTANTIATION (DECL) || DECL_DEFAULTED_FN (DECL))
/* Nonzero iff we are currently processing a declaration for an
entity with its own template parameter list, and which is not a
full specialization. */
#define PROCESSING_REAL_TEMPLATE_DECL_P() \
(processing_template_decl > template_class_depth (current_scope ()))
/* Nonzero if this VAR_DECL or FUNCTION_DECL has already been
instantiated, i.e. its definition has been generated from the
pattern given in the template. */
#define DECL_TEMPLATE_INSTANTIATED(NODE) \
DECL_LANG_FLAG_1 (VAR_OR_FUNCTION_DECL_CHECK (NODE))
/* We know what we're doing with this decl now. */
#define DECL_INTERFACE_KNOWN(NODE) DECL_LANG_FLAG_5 (NODE)
/* DECL_EXTERNAL must be set on a decl until the decl is actually emitted,
so that assemble_external will work properly. So we have this flag to
tell us whether the decl is really not external.
This flag does not indicate whether or not the decl is defined in the
current translation unit; it indicates whether or not we should emit the
decl at the end of compilation if it is defined and needed. */
#define DECL_NOT_REALLY_EXTERN(NODE) \
(DECL_LANG_SPECIFIC (NODE)->u.base.not_really_extern)
#define DECL_REALLY_EXTERN(NODE) \
(DECL_EXTERNAL (NODE) && ! DECL_NOT_REALLY_EXTERN (NODE))
/* A thunk is a stub function.
A thunk is an alternate entry point for an ordinary FUNCTION_DECL.
The address of the ordinary FUNCTION_DECL is given by the
DECL_INITIAL, which is always an ADDR_EXPR whose operand is a
FUNCTION_DECL. The job of the thunk is to either adjust the this
pointer before transferring control to the FUNCTION_DECL, or call
FUNCTION_DECL and then adjust the result value. Note, the result
pointer adjusting thunk must perform a call to the thunked
function, (or be implemented via passing some invisible parameter
to the thunked function, which is modified to perform the
adjustment just before returning).
A thunk may perform either, or both, of the following operations:
o Adjust the this or result pointer by a constant offset.
o Adjust the this or result pointer by looking up a vcall or vbase offset
in the vtable.
A this pointer adjusting thunk converts from a base to a derived
class, and hence adds the offsets. A result pointer adjusting thunk
converts from a derived class to a base, and hence subtracts the
offsets. If both operations are performed, then the constant
adjustment is performed first for this pointer adjustment and last
for the result pointer adjustment.
The constant adjustment is given by THUNK_FIXED_OFFSET. If the
vcall or vbase offset is required, THUNK_VIRTUAL_OFFSET is
used. For this pointer adjusting thunks, it is the vcall offset
into the vtable. For result pointer adjusting thunks it is the
binfo of the virtual base to convert to. Use that binfo's vbase
offset.
It is possible to have equivalent covariant thunks. These are
distinct virtual covariant thunks whose vbase offsets happen to
have the same value. THUNK_ALIAS is used to pick one as the
canonical thunk, which will get all the this pointer adjusting
thunks attached to it. */
/* An integer indicating how many bytes should be subtracted from the
this or result pointer when this function is called. */
#define THUNK_FIXED_OFFSET(DECL) \
(DECL_LANG_SPECIFIC (THUNK_FUNCTION_CHECK (DECL))->u.fn.u5.fixed_offset)
/* A tree indicating how to perform the virtual adjustment. For a this
adjusting thunk it is the number of bytes to be added to the vtable
to find the vcall offset. For a result adjusting thunk, it is the
binfo of the relevant virtual base. If NULL, then there is no
virtual adjust. (The vptr is always located at offset zero from
the this or result pointer.) (If the covariant type is within the
class hierarchy being laid out, the vbase index is not yet known
at the point we need to create the thunks, hence the need to use
binfos.) */
#define THUNK_VIRTUAL_OFFSET(DECL) \
(LANG_DECL_U2_CHECK (FUNCTION_DECL_CHECK (DECL), 0)->access)
/* A thunk which is equivalent to another thunk. */
#define THUNK_ALIAS(DECL) \
(DECL_LANG_SPECIFIC (FUNCTION_DECL_CHECK (DECL))->u.min.template_info)
/* For thunk NODE, this is the FUNCTION_DECL thunked to. It is
possible for the target to be a thunk too. */
#define THUNK_TARGET(NODE) \
(LANG_DECL_FN_CHECK (NODE)->befriending_classes)
/* True for a SCOPE_REF iff the "template" keyword was used to
indicate that the qualified name denotes a template. */
#define QUALIFIED_NAME_IS_TEMPLATE(NODE) \
(TREE_LANG_FLAG_1 (SCOPE_REF_CHECK (NODE)))
/* True for an OMP_ATOMIC that has dependent parameters. These are stored
as an expr in operand 1, and integer_zero_node in operand 0. */
#define OMP_ATOMIC_DEPENDENT_P(NODE) \
(TREE_CODE (TREE_OPERAND (OMP_ATOMIC_CHECK (NODE), 0)) == INTEGER_CST)
/* Used while gimplifying continue statements bound to OMP_FOR nodes. */
#define OMP_FOR_GIMPLIFYING_P(NODE) \
(TREE_LANG_FLAG_0 (OMP_LOOP_CHECK (NODE)))
/* A language-specific token attached to the OpenMP data clauses to
hold code (or code fragments) related to ctors, dtors, and op=.
See semantics.c for details. */
#define CP_OMP_CLAUSE_INFO(NODE) \
TREE_TYPE (OMP_CLAUSE_RANGE_CHECK (NODE, OMP_CLAUSE_PRIVATE, \
OMP_CLAUSE_LINEAR))
/* Nonzero if this transaction expression's body contains statements. */
#define TRANSACTION_EXPR_IS_STMT(NODE) \
TREE_LANG_FLAG_0 (TRANSACTION_EXPR_CHECK (NODE))
/* These macros provide convenient access to the various _STMT nodes
created when parsing template declarations. */
#define TRY_STMTS(NODE) TREE_OPERAND (TRY_BLOCK_CHECK (NODE), 0)
#define TRY_HANDLERS(NODE) TREE_OPERAND (TRY_BLOCK_CHECK (NODE), 1)
#define EH_SPEC_STMTS(NODE) TREE_OPERAND (EH_SPEC_BLOCK_CHECK (NODE), 0)
#define EH_SPEC_RAISES(NODE) TREE_OPERAND (EH_SPEC_BLOCK_CHECK (NODE), 1)
#define USING_STMT_NAMESPACE(NODE) TREE_OPERAND (USING_STMT_CHECK (NODE), 0)
/* Nonzero if this try block is a function try block. */
#define FN_TRY_BLOCK_P(NODE) TREE_LANG_FLAG_3 (TRY_BLOCK_CHECK (NODE))
#define HANDLER_PARMS(NODE) TREE_OPERAND (HANDLER_CHECK (NODE), 0)
#define HANDLER_BODY(NODE) TREE_OPERAND (HANDLER_CHECK (NODE), 1)
#define HANDLER_TYPE(NODE) TREE_TYPE (HANDLER_CHECK (NODE))
/* CLEANUP_STMT accessors. The statement(s) covered, the cleanup to run
and the VAR_DECL for which this cleanup exists. */
#define CLEANUP_BODY(NODE) TREE_OPERAND (CLEANUP_STMT_CHECK (NODE), 0)
#define CLEANUP_EXPR(NODE) TREE_OPERAND (CLEANUP_STMT_CHECK (NODE), 1)
#define CLEANUP_DECL(NODE) TREE_OPERAND (CLEANUP_STMT_CHECK (NODE), 2)
/* IF_STMT accessors. These give access to the condition of the if
statement, the then block of the if statement, and the else block
of the if statement if it exists. */
#define IF_COND(NODE) TREE_OPERAND (IF_STMT_CHECK (NODE), 0)
#define THEN_CLAUSE(NODE) TREE_OPERAND (IF_STMT_CHECK (NODE), 1)
#define ELSE_CLAUSE(NODE) TREE_OPERAND (IF_STMT_CHECK (NODE), 2)
#define IF_SCOPE(NODE) TREE_OPERAND (IF_STMT_CHECK (NODE), 3)
/* WHILE_STMT accessors. These give access to the condition of the
while statement and the body of the while statement, respectively. */
#define WHILE_COND(NODE) TREE_OPERAND (WHILE_STMT_CHECK (NODE), 0)
#define WHILE_BODY(NODE) TREE_OPERAND (WHILE_STMT_CHECK (NODE), 1)
/* DO_STMT accessors. These give access to the condition of the do
statement and the body of the do statement, respectively. */
#define DO_COND(NODE) TREE_OPERAND (DO_STMT_CHECK (NODE), 0)
#define DO_BODY(NODE) TREE_OPERAND (DO_STMT_CHECK (NODE), 1)
/* FOR_STMT accessors. These give access to the init statement,
condition, update expression, and body of the for statement,
respectively. */
#define FOR_INIT_STMT(NODE) TREE_OPERAND (FOR_STMT_CHECK (NODE), 0)
#define FOR_COND(NODE) TREE_OPERAND (FOR_STMT_CHECK (NODE), 1)
#define FOR_EXPR(NODE) TREE_OPERAND (FOR_STMT_CHECK (NODE), 2)
#define FOR_BODY(NODE) TREE_OPERAND (FOR_STMT_CHECK (NODE), 3)
#define FOR_SCOPE(NODE) TREE_OPERAND (FOR_STMT_CHECK (NODE), 4)
/* RANGE_FOR_STMT accessors. These give access to the declarator,
expression, body, and scope of the statement, respectively. */
#define RANGE_FOR_DECL(NODE) TREE_OPERAND (RANGE_FOR_STMT_CHECK (NODE), 0)
#define RANGE_FOR_EXPR(NODE) TREE_OPERAND (RANGE_FOR_STMT_CHECK (NODE), 1)
#define RANGE_FOR_BODY(NODE) TREE_OPERAND (RANGE_FOR_STMT_CHECK (NODE), 2)
#define RANGE_FOR_SCOPE(NODE) TREE_OPERAND (RANGE_FOR_STMT_CHECK (NODE), 3)
#define RANGE_FOR_IVDEP(NODE) TREE_LANG_FLAG_6 (RANGE_FOR_STMT_CHECK (NODE))
#define SWITCH_STMT_COND(NODE) TREE_OPERAND (SWITCH_STMT_CHECK (NODE), 0)
#define SWITCH_STMT_BODY(NODE) TREE_OPERAND (SWITCH_STMT_CHECK (NODE), 1)
#define SWITCH_STMT_TYPE(NODE) TREE_OPERAND (SWITCH_STMT_CHECK (NODE), 2)
#define SWITCH_STMT_SCOPE(NODE) TREE_OPERAND (SWITCH_STMT_CHECK (NODE), 3)
/* STMT_EXPR accessor. */
#define STMT_EXPR_STMT(NODE) TREE_OPERAND (STMT_EXPR_CHECK (NODE), 0)
/* EXPR_STMT accessor. This gives the expression associated with an
expression statement. */
#define EXPR_STMT_EXPR(NODE) TREE_OPERAND (EXPR_STMT_CHECK (NODE), 0)
/* True if this TARGET_EXPR was created by build_cplus_new, and so we can
discard it if it isn't useful. */
#define TARGET_EXPR_IMPLICIT_P(NODE) \
TREE_LANG_FLAG_0 (TARGET_EXPR_CHECK (NODE))
/* True if this TARGET_EXPR is the result of list-initialization of a
temporary. */
#define TARGET_EXPR_LIST_INIT_P(NODE) \
TREE_LANG_FLAG_1 (TARGET_EXPR_CHECK (NODE))
/* True if this TARGET_EXPR expresses direct-initialization of an object
to be named later. */
#define TARGET_EXPR_DIRECT_INIT_P(NODE) \
TREE_LANG_FLAG_2 (TARGET_EXPR_CHECK (NODE))
/* True if EXPR expresses direct-initialization of a TYPE. */
#define DIRECT_INIT_EXPR_P(TYPE,EXPR) \
(TREE_CODE (EXPR) == TARGET_EXPR && TREE_LANG_FLAG_2 (EXPR) \
&& same_type_ignoring_top_level_qualifiers_p (TYPE, TREE_TYPE (EXPR)))
/* True if this CONVERT_EXPR is for a conversion to virtual base in
an NSDMI, and should be re-evaluated when used in a constructor. */
#define CONVERT_EXPR_VBASE_PATH(NODE) \
TREE_LANG_FLAG_0 (CONVERT_EXPR_CHECK (NODE))
/* True if SIZEOF_EXPR argument is type. */
#define SIZEOF_EXPR_TYPE_P(NODE) \
TREE_LANG_FLAG_0 (SIZEOF_EXPR_CHECK (NODE))
/* An enumeration of the kind of tags that C++ accepts. */
enum tag_types {
none_type = 0, /* Not a tag type. */
record_type, /* "struct" types. */
class_type, /* "class" types. */
union_type, /* "union" types. */
enum_type, /* "enum" types. */
typename_type /* "typename" types. */
};
/* The various kinds of lvalues we distinguish. */
enum cp_lvalue_kind_flags {
clk_none = 0, /* Things that are not an lvalue. */
clk_ordinary = 1, /* An ordinary lvalue. */
clk_rvalueref = 2,/* An xvalue (rvalue formed using an rvalue reference) */
clk_class = 4, /* A prvalue of class-type. */
clk_bitfield = 8, /* An lvalue for a bit-field. */
clk_packed = 16 /* An lvalue for a packed field. */
};
/* This type is used for parameters and variables which hold
combinations of the flags in enum cp_lvalue_kind_flags. */
typedef int cp_lvalue_kind;
/* Various kinds of template specialization, instantiation, etc. */
typedef enum tmpl_spec_kind {
tsk_none, /* Not a template at all. */
tsk_invalid_member_spec, /* An explicit member template
specialization, but the enclosing
classes have not all been explicitly
specialized. */
tsk_invalid_expl_inst, /* An explicit instantiation containing
template parameter lists. */
tsk_excessive_parms, /* A template declaration with too many
template parameter lists. */
tsk_insufficient_parms, /* A template declaration with too few
parameter lists. */
tsk_template, /* A template declaration. */
tsk_expl_spec, /* An explicit specialization. */
tsk_expl_inst /* An explicit instantiation. */
} tmpl_spec_kind;
/* The various kinds of access. BINFO_ACCESS depends on these being
two bit quantities. The numerical values are important; they are
used to initialize RTTI data structures, so changing them changes
the ABI. */
typedef enum access_kind {
ak_none = 0, /* Inaccessible. */
ak_public = 1, /* Accessible, as a `public' thing. */
ak_protected = 2, /* Accessible, as a `protected' thing. */
ak_private = 3 /* Accessible, as a `private' thing. */
} access_kind;
/* The various kinds of special functions. If you add to this list,
you should update special_function_p as well. */
typedef enum special_function_kind {
sfk_none = 0, /* Not a special function. This enumeral
must have value zero; see
special_function_p. */
sfk_constructor, /* A constructor. */
sfk_copy_constructor, /* A copy constructor. */
sfk_move_constructor, /* A move constructor. */
sfk_copy_assignment, /* A copy assignment operator. */
sfk_move_assignment, /* A move assignment operator. */
sfk_destructor, /* A destructor. */
sfk_complete_destructor, /* A destructor for complete objects. */
sfk_base_destructor, /* A destructor for base subobjects. */
sfk_deleting_destructor, /* A destructor for complete objects that
deletes the object after it has been
destroyed. */
sfk_conversion, /* A conversion operator. */
sfk_inheriting_constructor /* An inheriting constructor */
} special_function_kind;
/* The various kinds of linkage. From [basic.link],
A name is said to have linkage when it might denote the same
object, reference, function, type, template, namespace or value
as a name introduced in another scope:
-- When a name has external linkage, the entity it denotes can
be referred to from scopes of other translation units or from
other scopes of the same translation unit.
-- When a name has internal linkage, the entity it denotes can
be referred to by names from other scopes in the same
translation unit.
-- When a name has no linkage, the entity it denotes cannot be
referred to by names from other scopes. */
typedef enum linkage_kind {
lk_none, /* No linkage. */
lk_internal, /* Internal linkage. */
lk_external /* External linkage. */
} linkage_kind;
typedef enum duration_kind {
dk_static,
dk_thread,
dk_auto,
dk_dynamic
} duration_kind;
/* Bitmask flags to control type substitution. */
enum tsubst_flags {
tf_none = 0, /* nothing special */
tf_error = 1 << 0, /* give error messages */
tf_warning = 1 << 1, /* give warnings too */
tf_ignore_bad_quals = 1 << 2, /* ignore bad cvr qualifiers */
tf_keep_type_decl = 1 << 3, /* retain typedef type decls
(make_typename_type use) */
tf_ptrmem_ok = 1 << 4, /* pointers to member ok (internal
instantiate_type use) */
tf_user = 1 << 5, /* found template must be a user template
(lookup_template_class use) */
tf_conv = 1 << 6, /* We are determining what kind of
conversion might be permissible,
not actually performing the
conversion. */
tf_decltype = 1 << 7, /* We are the operand of decltype.
Used to implement the special rules
for calls in decltype (5.2.2/11). */
tf_partial = 1 << 8, /* Doing initial explicit argument
substitution in fn_type_unification. */
/* Convenient substitution flags combinations. */
tf_warning_or_error = tf_warning | tf_error
};
/* This type is used for parameters and variables which hold
combinations of the flags in enum tsubst_flags. */
typedef int tsubst_flags_t;
/* The kind of checking we can do looking in a class hierarchy. */
enum base_access_flags {
ba_any = 0, /* Do not check access, allow an ambiguous base,
prefer a non-virtual base */
ba_unique = 1 << 0, /* Must be a unique base. */
ba_check_bit = 1 << 1, /* Check access. */
ba_check = ba_unique | ba_check_bit,
ba_ignore_scope = 1 << 2 /* Ignore access allowed by local scope. */
};
/* This type is used for parameters and variables which hold
combinations of the flags in enum base_access_flags. */
typedef int base_access;
/* The various kinds of access check during parsing. */
typedef enum deferring_kind {
dk_no_deferred = 0, /* Check access immediately */
dk_deferred = 1, /* Deferred check */
dk_no_check = 2 /* No access check */
} deferring_kind;
/* The kind of base we can find, looking in a class hierarchy.
Values <0 indicate we failed. */
typedef enum base_kind {
bk_inaccessible = -3, /* The base is inaccessible */
bk_ambig = -2, /* The base is ambiguous */
bk_not_base = -1, /* It is not a base */
bk_same_type = 0, /* It is the same type */
bk_proper_base = 1, /* It is a proper base */
bk_via_virtual = 2 /* It is a proper base, but via a virtual
path. This might not be the canonical
binfo. */
} base_kind;
/* Node for "pointer to (virtual) function".
This may be distinct from ptr_type_node so gdb can distinguish them. */
#define vfunc_ptr_type_node vtable_entry_type
/* For building calls to `delete'. */
extern GTY(()) tree integer_two_node;
/* The number of function bodies which we are currently processing.
(Zero if we are at namespace scope, one inside the body of a
function, two inside the body of a function in a local class, etc.) */
extern int function_depth;
/* Nonzero if we are inside eq_specializations, which affects comparison of
PARM_DECLs in cp_tree_equal. */
extern int comparing_specializations;
/* A type-qualifier, or bitmask therefore, using the TYPE_QUAL
constants. */
typedef int cp_cv_quals;
/* In parser.c. */
/* Nonzero if we are parsing an unevaluated operand: an operand to
sizeof, typeof, or alignof. This is a count since operands to
sizeof can be nested. */
extern int cp_unevaluated_operand;
extern tree cp_convert_range_for (tree, tree, tree, bool);
extern bool parsing_nsdmi (void);
extern void inject_this_parameter (tree, cp_cv_quals);
/* in pt.c */
/* These values are used for the `STRICT' parameter to type_unification and
fn_type_unification. Their meanings are described with the
documentation for fn_type_unification. */
typedef enum unification_kind_t {
DEDUCE_CALL,
DEDUCE_CONV,
DEDUCE_EXACT
} unification_kind_t;
/* in class.c */
extern int current_class_depth;
/* An array of all local classes present in this translation unit, in
declaration order. */
extern GTY(()) vec<tree, va_gc> *local_classes;
/* Here's where we control how name mangling takes place. */
/* Cannot use '$' up front, because this confuses gdb
(names beginning with '$' are gdb-local identifiers).
Note that all forms in which the '$' is significant are long enough
for direct indexing (meaning that if we know there is a '$'
at a particular location, we can index into the string at
any other location that provides distinguishing characters). */
/* Define NO_DOT_IN_LABEL in your favorite tm file if your assembler
doesn't allow '.' in symbol names. */
#ifndef NO_DOT_IN_LABEL
#define JOINER '.'
#define AUTO_TEMP_NAME "_.tmp_"
#define VFIELD_BASE ".vf"
#define VFIELD_NAME "_vptr."
#define VFIELD_NAME_FORMAT "_vptr.%s"
#else /* NO_DOT_IN_LABEL */
#ifndef NO_DOLLAR_IN_LABEL
#define JOINER '$'
#define AUTO_TEMP_NAME "_$tmp_"
#define VFIELD_BASE "$vf"
#define VFIELD_NAME "_vptr$"
#define VFIELD_NAME_FORMAT "_vptr$%s"
#else /* NO_DOLLAR_IN_LABEL */
#define AUTO_TEMP_NAME "__tmp_"
#define TEMP_NAME_P(ID_NODE) \
(!strncmp (IDENTIFIER_POINTER (ID_NODE), AUTO_TEMP_NAME, \
sizeof (AUTO_TEMP_NAME) - 1))
#define VTABLE_NAME "__vt_"
#define VTABLE_NAME_P(ID_NODE) \
(!strncmp (IDENTIFIER_POINTER (ID_NODE), VTABLE_NAME, \
sizeof (VTABLE_NAME) - 1))
#define VFIELD_BASE "__vfb"
#define VFIELD_NAME "__vptr_"
#define VFIELD_NAME_P(ID_NODE) \
(!strncmp (IDENTIFIER_POINTER (ID_NODE), VFIELD_NAME, \
sizeof (VFIELD_NAME) - 1))
#define VFIELD_NAME_FORMAT "__vptr_%s"
#endif /* NO_DOLLAR_IN_LABEL */
#endif /* NO_DOT_IN_LABEL */
#define THIS_NAME "this"
#define IN_CHARGE_NAME "__in_chrg"
#define VTBL_PTR_TYPE "__vtbl_ptr_type"
#define VTABLE_DELTA_NAME "__delta"
#define VTABLE_PFN_NAME "__pfn"
#define LAMBDANAME_PREFIX "__lambda"
#define LAMBDANAME_FORMAT LAMBDANAME_PREFIX "%d"
#define UDLIT_OP_ANSI_PREFIX "operator\"\""
#define UDLIT_OP_ANSI_FORMAT UDLIT_OP_ANSI_PREFIX "%s"
#define UDLIT_OP_MANGLED_PREFIX "li"
#define UDLIT_OP_MANGLED_FORMAT UDLIT_OP_MANGLED_PREFIX "%s"
#define UDLIT_OPER_P(ID_NODE) \
(!strncmp (IDENTIFIER_POINTER (ID_NODE), \
UDLIT_OP_ANSI_PREFIX, \
sizeof (UDLIT_OP_ANSI_PREFIX) - 1))
#define UDLIT_OP_SUFFIX(ID_NODE) \
(IDENTIFIER_POINTER (ID_NODE) + sizeof (UDLIT_OP_ANSI_PREFIX) - 1)
#if !defined(NO_DOLLAR_IN_LABEL) || !defined(NO_DOT_IN_LABEL)
#define VTABLE_NAME_P(ID_NODE) (IDENTIFIER_POINTER (ID_NODE)[1] == 'v' \
&& IDENTIFIER_POINTER (ID_NODE)[2] == 't' \
&& IDENTIFIER_POINTER (ID_NODE)[3] == JOINER)
#define TEMP_NAME_P(ID_NODE) \
(!strncmp (IDENTIFIER_POINTER (ID_NODE), AUTO_TEMP_NAME, sizeof (AUTO_TEMP_NAME)-1))
#define VFIELD_NAME_P(ID_NODE) \
(!strncmp (IDENTIFIER_POINTER (ID_NODE), VFIELD_NAME, sizeof(VFIELD_NAME)-1))
#endif /* !defined(NO_DOLLAR_IN_LABEL) || !defined(NO_DOT_IN_LABEL) */
/* Nonzero if we're done parsing and into end-of-file activities. */
extern int at_eof;
/* A list of namespace-scope objects which have constructors or
destructors which reside in the global scope. The decl is stored
in the TREE_VALUE slot and the initializer is stored in the
TREE_PURPOSE slot. */
extern GTY(()) tree static_aggregates;
/* Likewise, for thread local storage. */
extern GTY(()) tree tls_aggregates;
enum overload_flags { NO_SPECIAL = 0, DTOR_FLAG, TYPENAME_FLAG };
/* These are uses as bits in flags passed to various functions to
control their behavior. Despite the LOOKUP_ prefix, many of these
do not control name lookup. ??? Functions using these flags should
probably be modified to accept explicit boolean flags for the
behaviors relevant to them. */
/* Check for access violations. */
#define LOOKUP_PROTECT (1 << 0)
#define LOOKUP_NORMAL (LOOKUP_PROTECT)
/* Even if the function found by lookup is a virtual function, it
should be called directly. */
#define LOOKUP_NONVIRTUAL (1 << 1)
/* Non-converting (i.e., "explicit") constructors are not tried. This flag
indicates that we are not performing direct-initialization. */
#define LOOKUP_ONLYCONVERTING (1 << 2)
#define LOOKUP_IMPLICIT (LOOKUP_NORMAL | LOOKUP_ONLYCONVERTING)
/* If a temporary is created, it should be created so that it lives
as long as the current variable bindings; otherwise it only lives
until the end of the complete-expression. It also forces
direct-initialization in cases where other parts of the compiler
have already generated a temporary, such as reference
initialization and the catch parameter. */
#define DIRECT_BIND (1 << 3)
/* We're performing a user-defined conversion, so more user-defined
conversions are not permitted (only built-in conversions). */
#define LOOKUP_NO_CONVERSION (1 << 4)
/* The user has explicitly called a destructor. (Therefore, we do
not need to check that the object is non-NULL before calling the
destructor.) */
#define LOOKUP_DESTRUCTOR (1 << 5)
/* Do not permit references to bind to temporaries. */
#define LOOKUP_NO_TEMP_BIND (1 << 6)
/* Do not accept objects, and possibly namespaces. */
#define LOOKUP_PREFER_TYPES (1 << 7)
/* Do not accept objects, and possibly types. */
#define LOOKUP_PREFER_NAMESPACES (1 << 8)
/* Accept types or namespaces. */
#define LOOKUP_PREFER_BOTH (LOOKUP_PREFER_TYPES | LOOKUP_PREFER_NAMESPACES)
/* Return friend declarations and un-declared builtin functions.
(Normally, these entities are registered in the symbol table, but
not found by lookup.) */
#define LOOKUP_HIDDEN (LOOKUP_PREFER_NAMESPACES << 1)
/* Prefer that the lvalue be treated as an rvalue. */
#define LOOKUP_PREFER_RVALUE (LOOKUP_HIDDEN << 1)
/* We're inside an init-list, so narrowing conversions are ill-formed. */
#define LOOKUP_NO_NARROWING (LOOKUP_PREFER_RVALUE << 1)
/* We're looking up a constructor for list-initialization. */
#define LOOKUP_LIST_INIT_CTOR (LOOKUP_NO_NARROWING << 1)
/* This is the first parameter of a copy constructor. */
#define LOOKUP_COPY_PARM (LOOKUP_LIST_INIT_CTOR << 1)
/* We only want to consider list constructors. */
#define LOOKUP_LIST_ONLY (LOOKUP_COPY_PARM << 1)
/* Return after determining which function to call and checking access.
Used by sythesized_method_walk to determine which functions will
be called to initialize subobjects, in order to determine exception
specification and possible implicit delete.
This is kind of a hack, but exiting early avoids problems with trying
to perform argument conversions when the class isn't complete yet. */
#define LOOKUP_SPECULATIVE (LOOKUP_LIST_ONLY << 1)
/* Used by calls from defaulted functions to limit the overload set to avoid
cycles trying to declare them (core issue 1092). */
#define LOOKUP_DEFAULTED (LOOKUP_SPECULATIVE << 1)
/* Used in calls to store_init_value to suppress its usual call to
digest_init. */
#define LOOKUP_ALREADY_DIGESTED (LOOKUP_DEFAULTED << 1)
/* An instantiation with explicit template arguments. */
#define LOOKUP_EXPLICIT_TMPL_ARGS (LOOKUP_ALREADY_DIGESTED << 1)
/* Like LOOKUP_NO_TEMP_BIND, but also prevent binding to xvalues. */
#define LOOKUP_NO_RVAL_BIND (LOOKUP_EXPLICIT_TMPL_ARGS << 1)
/* Used by case_conversion to disregard non-integral conversions. */
#define LOOKUP_NO_NON_INTEGRAL (LOOKUP_NO_RVAL_BIND << 1)
/* Used for delegating constructors in order to diagnose self-delegation. */
#define LOOKUP_DELEGATING_CONS (LOOKUP_NO_NON_INTEGRAL << 1)
#define LOOKUP_NAMESPACES_ONLY(F) \
(((F) & LOOKUP_PREFER_NAMESPACES) && !((F) & LOOKUP_PREFER_TYPES))
#define LOOKUP_TYPES_ONLY(F) \
(!((F) & LOOKUP_PREFER_NAMESPACES) && ((F) & LOOKUP_PREFER_TYPES))
#define LOOKUP_QUALIFIERS_ONLY(F) ((F) & LOOKUP_PREFER_BOTH)
/* These flags are used by the conversion code.
CONV_IMPLICIT : Perform implicit conversions (standard and user-defined).
CONV_STATIC : Perform the explicit conversions for static_cast.
CONV_CONST : Perform the explicit conversions for const_cast.
CONV_REINTERPRET: Perform the explicit conversions for reinterpret_cast.
CONV_PRIVATE : Perform upcasts to private bases.
CONV_FORCE_TEMP : Require a new temporary when converting to the same
aggregate type. */
#define CONV_IMPLICIT 1
#define CONV_STATIC 2
#define CONV_CONST 4
#define CONV_REINTERPRET 8
#define CONV_PRIVATE 16
/* #define CONV_NONCONVERTING 32 */
#define CONV_FORCE_TEMP 64
#define CONV_OLD_CONVERT (CONV_IMPLICIT | CONV_STATIC | CONV_CONST \
| CONV_REINTERPRET)
#define CONV_C_CAST (CONV_IMPLICIT | CONV_STATIC | CONV_CONST \
| CONV_REINTERPRET | CONV_PRIVATE | CONV_FORCE_TEMP)
/* Used by build_expr_type_conversion to indicate which types are
acceptable as arguments to the expression under consideration. */
#define WANT_INT 1 /* integer types, including bool */
#define WANT_FLOAT 2 /* floating point types */
#define WANT_ENUM 4 /* enumerated types */
#define WANT_POINTER 8 /* pointer types */
#define WANT_NULL 16 /* null pointer constant */
#define WANT_VECTOR_OR_COMPLEX 32 /* vector or complex types */
#define WANT_ARITH (WANT_INT | WANT_FLOAT | WANT_VECTOR_OR_COMPLEX)
/* Used with comptypes, and related functions, to guide type
comparison. */
#define COMPARE_STRICT 0 /* Just check if the types are the
same. */
#define COMPARE_BASE 1 /* Check to see if the second type is
derived from the first. */
#define COMPARE_DERIVED 2 /* Like COMPARE_BASE, but in
reverse. */
#define COMPARE_REDECLARATION 4 /* The comparison is being done when
another declaration of an existing
entity is seen. */
#define COMPARE_STRUCTURAL 8 /* The comparison is intended to be
structural. The actual comparison
will be identical to
COMPARE_STRICT. */
/* Used with push_overloaded_decl. */
#define PUSH_GLOBAL 0 /* Push the DECL into namespace scope,
regardless of the current scope. */
#define PUSH_LOCAL 1 /* Push the DECL into the current
scope. */
#define PUSH_USING 2 /* We are pushing this DECL as the
result of a using declaration. */
/* Used with start function. */
#define SF_DEFAULT 0 /* No flags. */
#define SF_PRE_PARSED 1 /* The function declaration has
already been parsed. */
#define SF_INCLASS_INLINE 2 /* The function is an inline, defined
in the class body. */
/* Used with start_decl's initialized parameter. */
#define SD_UNINITIALIZED 0
#define SD_INITIALIZED 1
#define SD_DEFAULTED 2
#define SD_DELETED 3
/* Returns nonzero iff TYPE1 and TYPE2 are the same type, or if TYPE2
is derived from TYPE1, or if TYPE2 is a pointer (reference) to a
class derived from the type pointed to (referred to) by TYPE1. */
#define same_or_base_type_p(TYPE1, TYPE2) \
comptypes ((TYPE1), (TYPE2), COMPARE_BASE)
/* These macros are used to access a TEMPLATE_PARM_INDEX. */
#define TEMPLATE_PARM_INDEX_CAST(NODE) \
((template_parm_index*)TEMPLATE_PARM_INDEX_CHECK (NODE))
#define TEMPLATE_PARM_IDX(NODE) (TEMPLATE_PARM_INDEX_CAST (NODE)->index)
#define TEMPLATE_PARM_LEVEL(NODE) (TEMPLATE_PARM_INDEX_CAST (NODE)->level)
#define TEMPLATE_PARM_DESCENDANTS(NODE) (TREE_CHAIN (NODE))
#define TEMPLATE_PARM_ORIG_LEVEL(NODE) (TEMPLATE_PARM_INDEX_CAST (NODE)->orig_level)
#define TEMPLATE_PARM_DECL(NODE) (TEMPLATE_PARM_INDEX_CAST (NODE)->decl)
#define TEMPLATE_PARM_PARAMETER_PACK(NODE) \
(TREE_LANG_FLAG_0 (TEMPLATE_PARM_INDEX_CHECK (NODE)))
/* These macros are for accessing the fields of TEMPLATE_TYPE_PARM,
TEMPLATE_TEMPLATE_PARM and BOUND_TEMPLATE_TEMPLATE_PARM nodes. */
#define TEMPLATE_TYPE_PARM_INDEX(NODE) \
(TYPE_VALUES_RAW (TREE_CHECK3 ((NODE), TEMPLATE_TYPE_PARM, \
TEMPLATE_TEMPLATE_PARM, \
BOUND_TEMPLATE_TEMPLATE_PARM)))
#define TEMPLATE_TYPE_IDX(NODE) \
(TEMPLATE_PARM_IDX (TEMPLATE_TYPE_PARM_INDEX (NODE)))
#define TEMPLATE_TYPE_LEVEL(NODE) \
(TEMPLATE_PARM_LEVEL (TEMPLATE_TYPE_PARM_INDEX (NODE)))
#define TEMPLATE_TYPE_ORIG_LEVEL(NODE) \
(TEMPLATE_PARM_ORIG_LEVEL (TEMPLATE_TYPE_PARM_INDEX (NODE)))
#define TEMPLATE_TYPE_DECL(NODE) \
(TEMPLATE_PARM_DECL (TEMPLATE_TYPE_PARM_INDEX (NODE)))
#define TEMPLATE_TYPE_PARAMETER_PACK(NODE) \
(TEMPLATE_PARM_PARAMETER_PACK (TEMPLATE_TYPE_PARM_INDEX (NODE)))
/* True iff this TEMPLATE_TYPE_PARM represents decltype(auto). */
#define AUTO_IS_DECLTYPE(NODE) \
(TYPE_LANG_FLAG_5 (TEMPLATE_TYPE_PARM_CHECK (NODE)))
/* These constants can used as bit flags in the process of tree formatting.
TFF_PLAIN_IDENTIFIER: unqualified part of a name.
TFF_SCOPE: include the class and namespace scope of the name.
TFF_CHASE_TYPEDEF: print the original type-id instead of the typedef-name.
TFF_DECL_SPECIFIERS: print decl-specifiers.
TFF_CLASS_KEY_OR_ENUM: precede a class-type name (resp. enum name) with
a class-key (resp. `enum').
TFF_RETURN_TYPE: include function return type.
TFF_FUNCTION_DEFAULT_ARGUMENTS: include function default parameter values.
TFF_EXCEPTION_SPECIFICATION: show function exception specification.
TFF_TEMPLATE_HEADER: show the template<...> header in a
template-declaration.
TFF_TEMPLATE_NAME: show only template-name.
TFF_EXPR_IN_PARENS: parenthesize expressions.
TFF_NO_FUNCTION_ARGUMENTS: don't show function arguments.
TFF_UNQUALIFIED_NAME: do not print the qualifying scope of the
top-level entity.
TFF_NO_OMIT_DEFAULT_TEMPLATE_ARGUMENTS: do not omit template arguments
identical to their defaults.
TFF_NO_TEMPLATE_BINDINGS: do not print information about the template
arguments for a function template specialization. */
#define TFF_PLAIN_IDENTIFIER (0)
#define TFF_SCOPE (1)
#define TFF_CHASE_TYPEDEF (1 << 1)
#define TFF_DECL_SPECIFIERS (1 << 2)
#define TFF_CLASS_KEY_OR_ENUM (1 << 3)
#define TFF_RETURN_TYPE (1 << 4)
#define TFF_FUNCTION_DEFAULT_ARGUMENTS (1 << 5)
#define TFF_EXCEPTION_SPECIFICATION (1 << 6)
#define TFF_TEMPLATE_HEADER (1 << 7)
#define TFF_TEMPLATE_NAME (1 << 8)
#define TFF_EXPR_IN_PARENS (1 << 9)
#define TFF_NO_FUNCTION_ARGUMENTS (1 << 10)
#define TFF_UNQUALIFIED_NAME (1 << 11)
#define TFF_NO_OMIT_DEFAULT_TEMPLATE_ARGUMENTS (1 << 12)
#define TFF_NO_TEMPLATE_BINDINGS (1 << 13)
/* Returns the TEMPLATE_DECL associated to a TEMPLATE_TEMPLATE_PARM
node. */
#define TEMPLATE_TEMPLATE_PARM_TEMPLATE_DECL(NODE) \
((TREE_CODE (NODE) == BOUND_TEMPLATE_TEMPLATE_PARM) \
? TYPE_TI_TEMPLATE (NODE) \
: TYPE_NAME (NODE))
/* in lex.c */
extern void init_reswords (void);
typedef struct GTY(()) operator_name_info_t {
/* The IDENTIFIER_NODE for the operator. */
tree identifier;
/* The name of the operator. */
const char *name;
/* The mangled name of the operator. */
const char *mangled_name;
/* The arity of the operator. */
int arity;
} operator_name_info_t;
/* A mapping from tree codes to operator name information. */
extern GTY(()) operator_name_info_t operator_name_info
[(int) MAX_TREE_CODES];
/* Similar, but for assignment operators. */
extern GTY(()) operator_name_info_t assignment_operator_name_info
[(int) MAX_TREE_CODES];
/* Non-static member functions have an optional virt-specifier-seq.
There is a VIRT_SPEC value for each virt-specifier.
They can be combined by bitwise-or to form the complete set of
virt-specifiers for a member function. */
enum virt_specifier
{
VIRT_SPEC_UNSPECIFIED = 0x0,
VIRT_SPEC_FINAL = 0x1,
VIRT_SPEC_OVERRIDE = 0x2
};
/* A type-qualifier, or bitmask therefore, using the VIRT_SPEC
constants. */
typedef int cp_virt_specifiers;
/* Wherever there is a function-cv-qual, there could also be a ref-qualifier:
[dcl.fct]
The return type, the parameter-type-list, the ref-qualifier, and
the cv-qualifier-seq, but not the default arguments or the exception
specification, are part of the function type.
REF_QUAL_NONE Ordinary member function with no ref-qualifier
REF_QUAL_LVALUE Member function with the &-ref-qualifier
REF_QUAL_RVALUE Member function with the &&-ref-qualifier */
enum cp_ref_qualifier {
REF_QUAL_NONE = 0,
REF_QUAL_LVALUE = 1,
REF_QUAL_RVALUE = 2
};
/* A storage class. */
typedef enum cp_storage_class {
/* sc_none must be zero so that zeroing a cp_decl_specifier_seq
sets the storage_class field to sc_none. */
sc_none = 0,
sc_auto,
sc_register,
sc_static,
sc_extern,
sc_mutable
} cp_storage_class;
/* An individual decl-specifier. This is used to index the array of
locations for the declspecs in struct cp_decl_specifier_seq
below. */
typedef enum cp_decl_spec {
ds_first,
ds_signed = ds_first,
ds_unsigned,
ds_short,
ds_long,
ds_const,
ds_volatile,
ds_restrict,
ds_inline,
ds_virtual,
ds_explicit,
ds_friend,
ds_typedef,
ds_alias,
ds_constexpr,
ds_complex,
ds_thread,
ds_type_spec,
ds_redefined_builtin_type_spec,
ds_attribute,
ds_std_attribute,
ds_storage_class,
ds_long_long,
ds_last /* This enumerator must always be the last one. */
} cp_decl_spec;
/* A decl-specifier-seq. */
typedef struct cp_decl_specifier_seq {
/* An array of locations for the declaration sepecifiers, indexed by
enum cp_decl_spec_word. */
source_location locations[ds_last];
/* The primary type, if any, given by the decl-specifier-seq.
Modifiers, like "short", "const", and "unsigned" are not
reflected here. This field will be a TYPE, unless a typedef-name
was used, in which case it will be a TYPE_DECL. */
tree type;
/* The attributes, if any, provided with the specifier sequence. */
tree attributes;
/* The c++11 attributes that follows the type specifier. */
tree std_attributes;
/* If non-NULL, a built-in type that the user attempted to redefine
to some other type. */
tree redefined_builtin_type;
/* The storage class specified -- or sc_none if no storage class was
explicitly specified. */
cp_storage_class storage_class;
/* True iff TYPE_SPEC defines a class or enum. */
BOOL_BITFIELD type_definition_p : 1;
/* True iff multiple types were (erroneously) specified for this
decl-specifier-seq. */
BOOL_BITFIELD multiple_types_p : 1;
/* True iff multiple storage classes were (erroneously) specified
for this decl-specifier-seq or a combination of a storage class
with a typedef specifier. */
BOOL_BITFIELD conflicting_specifiers_p : 1;
/* True iff at least one decl-specifier was found. */
BOOL_BITFIELD any_specifiers_p : 1;
/* True iff at least one type-specifier was found. */
BOOL_BITFIELD any_type_specifiers_p : 1;
/* True iff "int" was explicitly provided. */
BOOL_BITFIELD explicit_int_p : 1;
/* True iff "__int128" was explicitly provided. */
BOOL_BITFIELD explicit_int128_p : 1;
/* True iff "char" was explicitly provided. */
BOOL_BITFIELD explicit_char_p : 1;
/* True iff ds_thread is set for __thread, not thread_local. */
BOOL_BITFIELD gnu_thread_keyword_p : 1;
} cp_decl_specifier_seq;
/* The various kinds of declarators. */
typedef enum cp_declarator_kind {
cdk_id,
cdk_function,
cdk_array,
cdk_pointer,
cdk_reference,
cdk_ptrmem,
cdk_error
} cp_declarator_kind;
/* A declarator. */
typedef struct cp_declarator cp_declarator;
typedef struct cp_parameter_declarator cp_parameter_declarator;
/* A parameter, before it has been semantically analyzed. */
struct cp_parameter_declarator {
/* The next parameter, or NULL_TREE if none. */
cp_parameter_declarator *next;
/* The decl-specifiers-seq for the parameter. */
cp_decl_specifier_seq decl_specifiers;
/* The declarator for the parameter. */
cp_declarator *declarator;
/* The default-argument expression, or NULL_TREE, if none. */
tree default_argument;
/* True iff this is the first parameter in the list and the
parameter sequence ends with an ellipsis. */
bool ellipsis_p;
};
/* A declarator. */
struct cp_declarator {
/* The kind of declarator. */
ENUM_BITFIELD (cp_declarator_kind) kind : 4;
/* Whether we parsed an ellipsis (`...') just before the declarator,
to indicate this is a parameter pack. */
BOOL_BITFIELD parameter_pack_p : 1;
location_t id_loc; /* Currently only set for cdk_id and cdk_function. */
/* GNU Attributes that apply to this declarator. If the declarator
is a pointer or a reference, these attribute apply to the type
pointed to. */
tree attributes;
/* Standard C++11 attributes that apply to this declarator. If the
declarator is a pointer or a reference, these attributes apply
to the pointer, rather than to the type pointed to. */
tree std_attributes;
/* For all but cdk_id and cdk_error, the contained declarator. For
cdk_id and cdk_error, guaranteed to be NULL. */
cp_declarator *declarator;
union {
/* For identifiers. */
struct {
/* If non-NULL, the qualifying scope (a NAMESPACE_DECL or
*_TYPE) for this identifier. */
tree qualifying_scope;
/* The unqualified name of the entity -- an IDENTIFIER_NODE,
BIT_NOT_EXPR, or TEMPLATE_ID_EXPR. */
tree unqualified_name;
/* If this is the name of a function, what kind of special
function (if any). */
special_function_kind sfk;
} id;
/* For functions. */
struct {
/* The parameters to the function as a TREE_LIST of decl/default. */
tree parameters;
/* The cv-qualifiers for the function. */
cp_cv_quals qualifiers;
/* The virt-specifiers for the function. */
cp_virt_specifiers virt_specifiers;
/* The ref-qualifier for the function. */
cp_ref_qualifier ref_qualifier;
/* The exception-specification for the function. */
tree exception_specification;
/* The late-specified return type, if any. */
tree late_return_type;
} function;
/* For arrays. */
struct {
/* The bounds to the array. */
tree bounds;
} array;
/* For cdk_pointer and cdk_ptrmem. */
struct {
/* The cv-qualifiers for the pointer. */
cp_cv_quals qualifiers;
/* For cdk_ptrmem, the class type containing the member. */
tree class_type;
} pointer;
/* For cdk_reference */
struct {
/* The cv-qualifiers for the reference. These qualifiers are
only used to diagnose ill-formed code. */
cp_cv_quals qualifiers;
/* Whether this is an rvalue reference */
bool rvalue_ref;
} reference;
} u;
};
/* A level of template instantiation. */
struct GTY((chain_next ("%h.next"))) tinst_level {
/* The immediately deeper level in the chain. */
struct tinst_level *next;
/* The original node. Can be either a DECL (for a function or static
data member) or a TYPE (for a class), depending on what we were
asked to instantiate. */
tree decl;
/* The location where the template is instantiated. */
location_t locus;
/* errorcount+sorrycount when we pushed this level. */
int errors;
/* True if the location is in a system header. */
bool in_system_header_p;
};
bool decl_spec_seq_has_spec_p (const cp_decl_specifier_seq *, cp_decl_spec);
/* Return the type of the `this' parameter of FNTYPE. */
inline tree
type_of_this_parm (const_tree fntype)
{
function_args_iterator iter;
gcc_assert (TREE_CODE (fntype) == METHOD_TYPE);
function_args_iter_init (&iter, fntype);
return function_args_iter_cond (&iter);
}
/* Return the class of the `this' parameter of FNTYPE. */
inline tree
class_of_this_parm (const_tree fntype)
{
return TREE_TYPE (type_of_this_parm (fntype));
}
/* A parameter list indicating for a function with no parameters,
e.g "int f(void)". */
extern cp_parameter_declarator *no_parameters;
/* True if we saw "#pragma GCC java_exceptions". */
extern bool pragma_java_exceptions;
/* in call.c */
extern bool check_dtor_name (tree, tree);
bool magic_varargs_p (tree);
extern tree build_conditional_expr (location_t, tree, tree, tree,
tsubst_flags_t);
extern tree build_addr_func (tree, tsubst_flags_t);
extern void set_flags_from_callee (tree);
extern tree build_call_a (tree, int, tree*);
extern tree build_call_n (tree, int, ...);
extern bool null_ptr_cst_p (tree);
extern bool null_member_pointer_value_p (tree);
extern bool sufficient_parms_p (const_tree);
extern tree type_decays_to (tree);
extern tree build_user_type_conversion (tree, tree, int,
tsubst_flags_t);
extern tree build_new_function_call (tree, vec<tree, va_gc> **, bool,
tsubst_flags_t);
extern tree build_operator_new_call (tree, vec<tree, va_gc> **, tree *,
tree *, tree, tree *,
tsubst_flags_t);
extern tree build_new_method_call (tree, tree, vec<tree, va_gc> **,
tree, int, tree *,
tsubst_flags_t);
extern tree build_special_member_call (tree, tree, vec<tree, va_gc> **,
tree, int, tsubst_flags_t);
extern tree build_new_op (location_t, enum tree_code,
int, tree, tree, tree, tree *,
tsubst_flags_t);
extern tree build_op_call (tree, vec<tree, va_gc> **,
tsubst_flags_t);
extern tree build_op_delete_call (enum tree_code, tree, tree,
bool, tree, tree,
tsubst_flags_t);
extern bool can_convert (tree, tree, tsubst_flags_t);
extern bool can_convert_standard (tree, tree, tsubst_flags_t);
extern bool can_convert_arg (tree, tree, tree, int,
tsubst_flags_t);
extern bool can_convert_arg_bad (tree, tree, tree, int,
tsubst_flags_t);
extern bool enforce_access (tree, tree, tree,
tsubst_flags_t);
extern void push_defarg_context (tree);
extern void pop_defarg_context (void);
extern tree convert_default_arg (tree, tree, tree, int,
tsubst_flags_t);
extern tree convert_arg_to_ellipsis (tree, tsubst_flags_t);
extern tree build_x_va_arg (source_location, tree, tree);
extern tree cxx_type_promotes_to (tree);
extern tree type_passed_as (tree);
extern tree convert_for_arg_passing (tree, tree, tsubst_flags_t);
extern bool is_properly_derived_from (tree, tree);
extern tree initialize_reference (tree, tree, int,
tsubst_flags_t);
extern tree extend_ref_init_temps (tree, tree, vec<tree, va_gc>**);
extern tree make_temporary_var_for_ref_to_temp (tree, tree);
extern bool type_has_extended_temps (tree);
extern tree strip_top_quals (tree);
extern bool reference_related_p (tree, tree);
extern tree perform_implicit_conversion (tree, tree, tsubst_flags_t);
extern tree perform_implicit_conversion_flags (tree, tree, tsubst_flags_t, int);
extern tree build_integral_nontype_arg_conv (tree, tree, tsubst_flags_t);
extern tree perform_direct_initialization_if_possible (tree, tree, bool,
tsubst_flags_t);
extern tree in_charge_arg_for_name (tree);
extern tree build_cxx_call (tree, int, tree *,
tsubst_flags_t);
extern bool is_std_init_list (tree);
extern bool is_list_ctor (tree);
#ifdef ENABLE_CHECKING
extern void validate_conversion_obstack (void);
#endif /* ENABLE_CHECKING */
extern void mark_versions_used (tree);
extern tree get_function_version_dispatcher (tree);
/* in class.c */
extern tree build_vfield_ref (tree, tree);
extern tree build_base_path (enum tree_code, tree,
tree, int, tsubst_flags_t);
extern tree convert_to_base (tree, tree, bool, bool,
tsubst_flags_t);
extern tree convert_to_base_statically (tree, tree);
extern tree build_vtbl_ref (tree, tree);
extern tree build_vfn_ref (tree, tree);
extern tree get_vtable_decl (tree, int);
extern void resort_type_method_vec (void *, void *,
gt_pointer_operator, void *);
extern bool add_method (tree, tree, tree);
extern bool currently_open_class (tree);
extern tree currently_open_derived_class (tree);
extern tree outermost_open_class (void);
extern tree current_nonlambda_class_type (void);
extern tree finish_struct (tree, tree);
extern void finish_struct_1 (tree);
extern int resolves_to_fixed_type_p (tree, int *);
extern void init_class_processing (void);
extern int is_empty_class (tree);
extern bool is_really_empty_class (tree);
extern void pushclass (tree);
extern void popclass (void);
extern void push_nested_class (tree);
extern void pop_nested_class (void);
extern int current_lang_depth (void);
extern void push_lang_context (tree);
extern void pop_lang_context (void);
extern tree instantiate_type (tree, tree, tsubst_flags_t);
extern void print_class_statistics (void);
extern void build_self_reference (void);
extern int same_signature_p (const_tree, const_tree);
extern void maybe_add_class_template_decl_list (tree, tree, int);
extern void unreverse_member_declarations (tree);
extern void invalidate_class_lookup_cache (void);
extern void maybe_note_name_used_in_class (tree, tree);
extern void note_name_declared_in_class (tree, tree);
extern tree get_vtbl_decl_for_binfo (tree);
extern void debug_class (tree);
extern void debug_thunks (tree);
extern void set_linkage_according_to_type (tree, tree);
extern void determine_key_method (tree);
extern void check_for_override (tree, tree);
extern void push_class_stack (void);
extern void pop_class_stack (void);
extern bool type_has_user_nondefault_constructor (tree);
extern tree in_class_defaulted_default_constructor (tree);
extern bool user_provided_p (tree);
extern bool type_has_user_provided_constructor (tree);
extern bool type_has_user_provided_default_constructor (tree);
extern bool vbase_has_user_provided_move_assign (tree);
extern tree default_init_uninitialized_part (tree);
extern bool trivial_default_constructor_is_constexpr (tree);
extern bool type_has_constexpr_default_constructor (tree);
extern bool type_has_virtual_destructor (tree);
extern bool type_has_move_constructor (tree);
extern bool type_has_move_assign (tree);
extern bool type_has_user_declared_move_constructor (tree);
extern bool type_has_user_declared_move_assign(tree);
extern bool type_build_ctor_call (tree);
extern bool type_build_dtor_call (tree);
extern void explain_non_literal_class (tree);
extern void defaulted_late_check (tree);
extern bool defaultable_fn_check (tree);
extern void fixup_type_variants (tree);
extern void fixup_attribute_variants (tree);
extern tree* decl_cloned_function_p (const_tree, bool);
extern void clone_function_decl (tree, int);
extern void adjust_clone_args (tree);
extern void deduce_noexcept_on_destructor (tree);
extern void insert_late_enum_def_into_classtype_sorted_fields (tree, tree);
extern bool uniquely_derived_from_p (tree, tree);
extern bool publicly_uniquely_derived_p (tree, tree);
extern tree common_enclosing_class (tree, tree);
/* in cvt.c */
extern tree convert_to_reference (tree, tree, int, int, tree,
tsubst_flags_t);
extern tree convert_from_reference (tree);
extern tree force_rvalue (tree, tsubst_flags_t);
extern tree ocp_convert (tree, tree, int, int,
tsubst_flags_t);
extern tree cp_convert (tree, tree, tsubst_flags_t);
extern tree cp_convert_and_check (tree, tree, tsubst_flags_t);
extern tree cp_fold_convert (tree, tree);
extern tree convert_to_void (tree, impl_conv_void,
tsubst_flags_t);
extern tree convert_force (tree, tree, int,
tsubst_flags_t);
extern tree build_expr_type_conversion (int, tree, bool);
extern tree type_promotes_to (tree);
extern tree perform_qualification_conversions (tree, tree);
/* in name-lookup.c */
extern tree pushdecl (tree);
extern tree pushdecl_maybe_friend (tree, bool);
extern void maybe_push_cleanup_level (tree);
extern tree pushtag (tree, tree, tag_scope);
extern tree make_anon_name (void);
extern tree pushdecl_top_level_maybe_friend (tree, bool);
extern tree pushdecl_top_level_and_finish (tree, tree);
extern tree check_for_out_of_scope_variable (tree);
extern void dump (cp_binding_level &ref);
extern void dump (cp_binding_level *ptr);
extern void print_other_binding_stack (cp_binding_level *);
extern tree maybe_push_decl (tree);
extern tree current_decl_namespace (void);
/* decl.c */
extern tree poplevel (int, int, int);
extern void cxx_init_decl_processing (void);
enum cp_tree_node_structure_enum cp_tree_node_structure
(union lang_tree_node *);
extern void finish_scope (void);
extern void push_switch (tree);
extern void pop_switch (void);
extern tree make_lambda_name (void);
extern int decls_match (tree, tree);
extern tree duplicate_decls (tree, tree, bool);
extern tree declare_local_label (tree);
extern tree define_label (location_t, tree);
extern void check_goto (tree);
extern bool check_omp_return (void);
extern tree make_typename_type (tree, tree, enum tag_types, tsubst_flags_t);
extern tree make_unbound_class_template (tree, tree, tree, tsubst_flags_t);
extern tree build_library_fn_ptr (const char *, tree, int);
extern tree build_cp_library_fn_ptr (const char *, tree, int);
extern tree push_library_fn (tree, tree, tree, int);
extern tree push_void_library_fn (tree, tree, int);
extern tree push_throw_library_fn (tree, tree);
extern void warn_misplaced_attr_for_class_type (source_location location,
tree class_type);
extern tree check_tag_decl (cp_decl_specifier_seq *, bool);
extern tree shadow_tag (cp_decl_specifier_seq *);
extern tree groktypename (cp_decl_specifier_seq *, const cp_declarator *, bool);
extern tree start_decl (const cp_declarator *, cp_decl_specifier_seq *, int, tree, tree, tree *);
extern void start_decl_1 (tree, bool);
extern bool check_array_initializer (tree, tree, tree);
extern void cp_finish_decl (tree, tree, bool, tree, int);
extern int cp_complete_array_type (tree *, tree, bool);
extern int cp_complete_array_type_or_error (tree *, tree, bool, tsubst_flags_t);
extern tree build_ptrmemfunc_type (tree);
extern tree build_ptrmem_type (tree, tree);
/* the grokdeclarator prototype is in decl.h */
extern tree build_this_parm (tree, cp_cv_quals);
extern int copy_fn_p (const_tree);
extern bool move_fn_p (const_tree);
extern bool move_signature_fn_p (const_tree);
extern tree get_scope_of_declarator (const cp_declarator *);
extern void grok_special_member_properties (tree);
extern int grok_ctor_properties (const_tree, const_tree);
extern bool grok_op_properties (tree, bool);
extern tree xref_tag (enum tag_types, tree, tag_scope, bool);
extern tree xref_tag_from_type (tree, tree, tag_scope);
extern bool xref_basetypes (tree, tree);
extern tree start_enum (tree, tree, tree, bool, bool *);
extern void finish_enum_value_list (tree);
extern void finish_enum (tree);
extern void build_enumerator (tree, tree, tree, location_t);
extern tree lookup_enumerator (tree, tree);
extern bool start_preparsed_function (tree, tree, int);
extern bool start_function (cp_decl_specifier_seq *,
const cp_declarator *, tree);
extern tree begin_function_body (void);
extern void finish_function_body (tree);
extern tree outer_curly_brace_block (tree);
extern tree finish_function (int);
extern tree grokmethod (cp_decl_specifier_seq *, const cp_declarator *, tree);
extern void maybe_register_incomplete_var (tree);
extern void maybe_commonize_var (tree);
extern void complete_vars (tree);
extern tree static_fn_type (tree);
extern void revert_static_member_fn (tree);
extern void fixup_anonymous_aggr (tree);
extern tree compute_array_index_type (tree, tree, tsubst_flags_t);
extern tree check_default_argument (tree, tree, tsubst_flags_t);
typedef int (*walk_namespaces_fn) (tree, void *);
extern int walk_namespaces (walk_namespaces_fn,
void *);
extern int wrapup_globals_for_namespace (tree, void *);
extern tree create_implicit_typedef (tree, tree);
extern int local_variable_p (const_tree);
extern tree register_dtor_fn (tree);
extern tmpl_spec_kind current_tmpl_spec_kind (int);
extern tree cp_fname_init (const char *, tree *);
extern tree cxx_builtin_function (tree decl);
extern tree cxx_builtin_function_ext_scope (tree decl);
extern tree check_elaborated_type_specifier (enum tag_types, tree, bool);
extern void warn_extern_redeclared_static (tree, tree);
extern tree cxx_comdat_group (tree);
extern bool cp_missing_noreturn_ok_p (tree);
extern void initialize_artificial_var (tree, vec<constructor_elt, va_gc> *);
extern tree check_var_type (tree, tree);
extern tree reshape_init (tree, tree, tsubst_flags_t);
extern tree next_initializable_field (tree);
extern tree fndecl_declared_return_type (tree);
extern bool undeduced_auto_decl (tree);
extern void require_deduced_type (tree);
extern bool defer_mark_used_calls;
extern GTY(()) vec<tree, va_gc> *deferred_mark_used_calls;
extern tree finish_case_label (location_t, tree, tree);
extern tree cxx_maybe_build_cleanup (tree, tsubst_flags_t);
/* in decl2.c */
extern bool check_java_method (tree);
extern tree build_memfn_type (tree, tree, cp_cv_quals, cp_ref_qualifier);
extern tree build_pointer_ptrmemfn_type (tree);
extern tree change_return_type (tree, tree);
extern void maybe_retrofit_in_chrg (tree);
extern void maybe_make_one_only (tree);
extern bool vague_linkage_p (tree);
extern void grokclassfn (tree, tree,
enum overload_flags);
extern tree grok_array_decl (location_t, tree, tree, bool);
extern tree delete_sanity (tree, tree, bool, int, tsubst_flags_t);
extern tree check_classfn (tree, tree, tree);
extern void check_member_template (tree);
extern tree grokfield (const cp_declarator *, cp_decl_specifier_seq *,
tree, bool, tree, tree);
extern tree grokbitfield (const cp_declarator *, cp_decl_specifier_seq *,
tree, tree);
extern tree cp_reconstruct_complex_type (tree, tree);
extern bool attributes_naming_typedef_ok (tree);
extern void cplus_decl_attributes (tree *, tree, int);
extern void finish_anon_union (tree);
extern void cp_write_global_declarations (void);
extern tree coerce_new_type (tree);
extern tree coerce_delete_type (tree);
extern void comdat_linkage (tree);
extern void determine_visibility (tree);
extern void constrain_class_visibility (tree);
extern void reset_type_linkage (tree);
extern void tentative_decl_linkage (tree);
extern void import_export_decl (tree);
extern tree build_cleanup (tree);
extern tree build_offset_ref_call_from_tree (tree, vec<tree, va_gc> **,
tsubst_flags_t);
extern bool decl_constant_var_p (tree);
extern bool decl_maybe_constant_var_p (tree);
extern void no_linkage_error (tree);
extern void check_default_args (tree);
extern bool mark_used (tree);
extern bool mark_used (tree, tsubst_flags_t);
extern void finish_static_data_member_decl (tree, tree, bool, tree, int);
extern tree cp_build_parm_decl (tree, tree);
extern tree get_guard (tree);
extern tree get_guard_cond (tree);
extern tree set_guard (tree);
extern tree get_tls_wrapper_fn (tree);
extern void mark_needed (tree);
extern bool decl_needed_p (tree);
extern void note_vague_linkage_fn (tree);
extern void note_comdat_fn (tree);
extern tree build_artificial_parm (tree, tree);
extern bool possibly_inlined_p (tree);
extern int parm_index (tree);
extern tree vtv_start_verification_constructor_init_function (void);
extern tree vtv_finish_verification_constructor_init_function (tree);
extern bool cp_omp_mappable_type (tree);
/* in error.c */
extern void init_error (void);
extern const char *type_as_string (tree, int);
extern const char *type_as_string_translate (tree, int);
extern const char *decl_as_string (tree, int);
extern const char *decl_as_string_translate (tree, int);
extern const char *decl_as_dwarf_string (tree, int);
extern const char *expr_as_string (tree, int);
extern const char *lang_decl_name (tree, int, bool);
extern const char *lang_decl_dwarf_name (tree, int, bool);
extern const char *language_to_string (enum languages);
extern const char *class_key_or_enum_as_string (tree);
extern void print_instantiation_context (void);
extern void maybe_warn_variadic_templates (void);
extern void maybe_warn_cpp0x (cpp0x_warn_str str);
extern bool pedwarn_cxx98 (location_t, int, const char *, ...) ATTRIBUTE_GCC_DIAG(3,4);
extern location_t location_of (tree);
extern void qualified_name_lookup_error (tree, tree, tree,
location_t);
/* in except.c */
extern void init_exception_processing (void);
extern tree expand_start_catch_block (tree);
extern void expand_end_catch_block (void);
extern tree build_exc_ptr (void);
extern tree build_throw (tree);
extern int nothrow_libfn_p (const_tree);
extern void check_handlers (tree);
extern tree finish_noexcept_expr (tree, tsubst_flags_t);
extern bool expr_noexcept_p (tree, tsubst_flags_t);
extern void perform_deferred_noexcept_checks (void);
extern bool nothrow_spec_p (const_tree);
extern bool type_noexcept_p (const_tree);
extern bool type_throw_all_p (const_tree);
extern tree build_noexcept_spec (tree, int);
extern void choose_personality_routine (enum languages);
extern tree build_must_not_throw_expr (tree,tree);
extern tree eh_type_info (tree);
extern tree begin_eh_spec_block (void);
extern void finish_eh_spec_block (tree, tree);
extern tree build_eh_type_type (tree);
extern tree cp_protect_cleanup_actions (void);
extern tree create_try_catch_expr (tree, tree);
/* in expr.c */
extern tree cplus_expand_constant (tree);
extern tree mark_rvalue_use (tree);
extern tree mark_lvalue_use (tree);
extern tree mark_type_use (tree);
extern void mark_exp_read (tree);
/* friend.c */
extern int is_friend (tree, tree);
extern void make_friend_class (tree, tree, bool);
extern void add_friend (tree, tree, bool);
extern tree do_friend (tree, tree, tree, tree, enum overload_flags, bool);
/* in init.c */
extern tree expand_member_init (tree);
extern void emit_mem_initializers (tree);
extern tree build_aggr_init (tree, tree, int,
tsubst_flags_t);
extern int is_class_type (tree, int);
extern tree get_type_value (tree);
extern tree build_zero_init (tree, tree, bool);
extern tree build_value_init (tree, tsubst_flags_t);
extern tree build_value_init_noctor (tree, tsubst_flags_t);
extern tree get_nsdmi (tree, bool);
extern tree build_offset_ref (tree, tree, bool,
tsubst_flags_t);
extern tree throw_bad_array_new_length (void);
extern tree throw_bad_array_length (void);
extern tree build_new (vec<tree, va_gc> **, tree, tree,
vec<tree, va_gc> **, int,
tsubst_flags_t);
extern tree get_temp_regvar (tree, tree);
extern tree build_vec_init (tree, tree, tree, bool, int,
tsubst_flags_t);
extern tree build_delete (tree, tree,
special_function_kind,
int, int, tsubst_flags_t);
extern void push_base_cleanups (void);
extern tree build_vec_delete (tree, tree,
special_function_kind, int,
tsubst_flags_t);
extern tree create_temporary_var (tree);
extern void initialize_vtbl_ptrs (tree);
extern tree build_java_class_ref (tree);
extern tree integral_constant_value (tree);
extern tree decl_constant_value_safe (tree);
extern int diagnose_uninitialized_cst_or_ref_member (tree, bool, bool);
extern tree build_vtbl_address (tree);
/* in lex.c */
extern void cxx_dup_lang_specific_decl (tree);
extern void yyungetc (int, int);
extern tree unqualified_name_lookup_error (tree);
extern tree unqualified_fn_lookup_error (tree);
extern tree build_lang_decl (enum tree_code, tree, tree);
extern tree build_lang_decl_loc (location_t, enum tree_code, tree, tree);
extern void retrofit_lang_decl (tree);
extern tree copy_decl (tree);
extern tree copy_type (tree);
extern tree cxx_make_type (enum tree_code);
extern tree make_class_type (enum tree_code);
extern bool cxx_init (void);
extern void cxx_finish (void);
extern bool in_main_input_context (void);
/* in method.c */
extern void init_method (void);
extern tree make_thunk (tree, bool, tree, tree);
extern void finish_thunk (tree);
extern void use_thunk (tree, bool);
extern bool trivial_fn_p (tree);
extern bool maybe_explain_implicit_delete (tree);
extern void explain_implicit_non_constexpr (tree);
extern void deduce_inheriting_ctor (tree);
extern void synthesize_method (tree);
extern tree lazily_declare_fn (special_function_kind,
tree);
extern tree skip_artificial_parms_for (const_tree, tree);
extern int num_artificial_parms_for (const_tree);
extern tree make_alias_for (tree, tree);
extern tree get_copy_ctor (tree, tsubst_flags_t);
extern tree get_copy_assign (tree);
extern tree get_default_ctor (tree);
extern tree get_dtor (tree, tsubst_flags_t);
extern tree get_inherited_ctor (tree);
extern tree locate_ctor (tree);
extern tree implicitly_declare_fn (special_function_kind, tree,
bool, tree, tree);
/* In optimize.c */
extern bool maybe_clone_body (tree);
/* in pt.c */
extern bool check_template_shadow (tree);
extern tree get_innermost_template_args (tree, int);
extern void maybe_begin_member_template_processing (tree);
extern void maybe_end_member_template_processing (void);
extern tree finish_member_template_decl (tree);
extern void begin_template_parm_list (void);
extern bool begin_specialization (void);
extern void reset_specialization (void);
extern void end_specialization (void);
extern void begin_explicit_instantiation (void);
extern void end_explicit_instantiation (void);
extern tree check_explicit_specialization (tree, tree, int, int);
extern int num_template_headers_for_class (tree);
extern void check_template_variable (tree);
extern tree make_auto (void);
extern tree make_decltype_auto (void);
extern tree do_auto_deduction (tree, tree, tree);
extern tree type_uses_auto (tree);
extern tree type_uses_auto_or_concept (tree);
extern void append_type_to_template_for_access_check (tree, tree, tree,
location_t);
extern tree convert_generic_types_to_packs (tree, int, int);
extern tree splice_late_return_type (tree, tree);
extern bool is_auto (const_tree);
extern bool is_auto_or_concept (const_tree);
extern tree process_template_parm (tree, location_t, tree,
bool, bool);
extern tree end_template_parm_list (tree);
extern void end_template_decl (void);
extern tree maybe_update_decl_type (tree, tree);
extern bool check_default_tmpl_args (tree, tree, bool, bool, int);
extern tree push_template_decl (tree);
extern tree push_template_decl_real (tree, bool);
extern tree add_inherited_template_parms (tree, tree);
extern bool redeclare_class_template (tree, tree);
extern tree lookup_template_class (tree, tree, tree, tree,
int, tsubst_flags_t);
extern tree lookup_template_function (tree, tree);
extern int uses_template_parms (tree);
extern int uses_template_parms_level (tree, int);
extern bool in_template_function (void);
extern tree instantiate_class_template (tree);
extern tree instantiate_template (tree, tree, tsubst_flags_t);
extern tree fn_type_unification (tree, tree, tree,
const tree *, unsigned int,
tree, unification_kind_t, int,
bool, bool);
extern void mark_decl_instantiated (tree, int);
extern int more_specialized_fn (tree, tree, int);
extern void do_decl_instantiation (tree, tree);
extern void do_type_instantiation (tree, tree, tsubst_flags_t);
extern bool always_instantiate_p (tree);
extern void maybe_instantiate_noexcept (tree);
extern tree instantiate_decl (tree, int, bool);
extern int comp_template_parms (const_tree, const_tree);
extern bool uses_parameter_packs (tree);
extern bool template_parameter_pack_p (const_tree);
extern bool function_parameter_pack_p (const_tree);
extern bool function_parameter_expanded_from_pack_p (tree, tree);
extern tree make_pack_expansion (tree);
extern bool check_for_bare_parameter_packs (tree);
extern tree build_template_info (tree, tree);
extern tree get_template_info (const_tree);
extern vec<qualified_typedef_usage_t, va_gc> *get_types_needing_access_check (tree);
extern int template_class_depth (tree);
extern int is_specialization_of (tree, tree);
extern bool is_specialization_of_friend (tree, tree);
extern tree get_pattern_parm (tree, tree);
extern int comp_template_args (tree, tree);
extern tree maybe_process_partial_specialization (tree);
extern tree most_specialized_instantiation (tree);
extern void print_candidates (tree);
extern void instantiate_pending_templates (int);
extern tree tsubst_default_argument (tree, tree, tree,
tsubst_flags_t);
extern tree tsubst (tree, tree, tsubst_flags_t, tree);
extern tree tsubst_copy_and_build (tree, tree, tsubst_flags_t,
tree, bool, bool);
extern tree most_general_template (tree);
extern tree get_mostly_instantiated_function_type (tree);
extern int problematic_instantiation_changed (void);
extern void record_last_problematic_instantiation (void);
extern struct tinst_level *current_instantiation(void);
extern tree maybe_get_template_decl_from_type_decl (tree);
extern int processing_template_parmlist;
extern bool dependent_type_p (tree);
extern bool dependent_scope_p (tree);
extern bool any_dependent_template_arguments_p (const_tree);
extern bool dependent_template_p (tree);
extern bool dependent_template_id_p (tree, tree);
extern bool type_dependent_expression_p (tree);
extern bool any_type_dependent_arguments_p (const vec<tree, va_gc> *);
extern bool any_type_dependent_elements_p (const_tree);
extern bool type_dependent_expression_p_push (tree);
extern bool value_dependent_expression_p (tree);
extern bool instantiation_dependent_expression_p (tree);
extern bool any_value_dependent_elements_p (const_tree);
extern bool dependent_omp_for_p (tree, tree, tree, tree);
extern tree resolve_typename_type (tree, bool);
extern tree template_for_substitution (tree);
extern tree build_non_dependent_expr (tree);
extern void make_args_non_dependent (vec<tree, va_gc> *);
extern bool reregister_specialization (tree, tree, tree);
extern tree fold_non_dependent_expr (tree);
extern tree fold_non_dependent_expr_sfinae (tree, tsubst_flags_t);
extern bool alias_type_or_template_p (tree);
extern bool alias_template_specialization_p (const_tree);
extern bool explicit_class_specialization_p (tree);
extern int push_tinst_level (tree);
extern void pop_tinst_level (void);
extern struct tinst_level *outermost_tinst_level(void);
extern void init_template_processing (void);
extern void print_template_statistics (void);
bool template_template_parameter_p (const_tree);
bool template_type_parameter_p (const_tree);
extern bool primary_template_instantiation_p (const_tree);
extern tree get_primary_template_innermost_parameters (const_tree);
extern tree get_template_parms_at_level (tree, int);
extern tree get_template_innermost_arguments (const_tree);
extern tree get_template_argument_pack_elems (const_tree);
extern tree get_function_template_decl (const_tree);
extern tree resolve_nondeduced_context (tree, tsubst_flags_t);
extern hashval_t iterative_hash_template_arg (tree arg, hashval_t val);
/* in repo.c */
extern void init_repo (void);
extern int repo_emit_p (tree);
extern bool repo_export_class_p (const_tree);
extern void finish_repo (void);
/* in rtti.c */
/* A vector of all tinfo decls that haven't been emitted yet. */
extern GTY(()) vec<tree, va_gc> *unemitted_tinfo_decls;
extern void init_rtti_processing (void);
extern tree build_typeid (tree, tsubst_flags_t);
extern tree get_tinfo_decl (tree);
extern tree get_typeid (tree, tsubst_flags_t);
extern tree build_headof (tree);
extern tree build_dynamic_cast (tree, tree, tsubst_flags_t);
extern void emit_support_tinfos (void);
extern bool emit_tinfo_decl (tree);
/* in search.c */
extern bool accessible_base_p (tree, tree, bool);
extern tree lookup_base (tree, tree, base_access,
base_kind *, tsubst_flags_t);
extern tree dcast_base_hint (tree, tree);
extern int accessible_p (tree, tree, bool);
extern int accessible_in_template_p (tree, tree);
extern tree lookup_field_1 (tree, tree, bool);
extern tree lookup_field (tree, tree, int, bool);
extern int lookup_fnfields_1 (tree, tree);
extern tree lookup_fnfields_slot (tree, tree);
extern tree lookup_fnfields_slot_nolazy (tree, tree);
extern int class_method_index_for_fn (tree, tree);
extern tree lookup_fnfields (tree, tree, int);
extern tree lookup_member (tree, tree, int, bool,
tsubst_flags_t);
extern int look_for_overrides (tree, tree);
extern void get_pure_virtuals (tree);
extern void maybe_suppress_debug_info (tree);
extern void note_debug_info_needed (tree);
extern void print_search_statistics (void);
extern void reinit_search_statistics (void);
extern tree current_scope (void);
extern int at_function_scope_p (void);
extern bool at_class_scope_p (void);
extern bool at_namespace_scope_p (void);
extern tree context_for_name_lookup (tree);
extern tree lookup_conversions (tree);
extern tree binfo_from_vbase (tree);
extern tree binfo_for_vbase (tree, tree);
extern tree look_for_overrides_here (tree, tree);
#define dfs_skip_bases ((tree)1)
extern tree dfs_walk_all (tree, tree (*) (tree, void *),
tree (*) (tree, void *), void *);
extern tree dfs_walk_once (tree, tree (*) (tree, void *),
tree (*) (tree, void *), void *);
extern tree binfo_via_virtual (tree, tree);
extern tree build_baselink (tree, tree, tree, tree);
extern tree adjust_result_of_qualified_name_lookup
(tree, tree, tree);
extern tree copied_binfo (tree, tree);
extern tree original_binfo (tree, tree);
extern int shared_member_p (tree);
/* The representation of a deferred access check. */
typedef struct GTY(()) deferred_access_check {
/* The base class in which the declaration is referenced. */
tree binfo;
/* The declaration whose access must be checked. */
tree decl;
/* The declaration that should be used in the error message. */
tree diag_decl;
/* The location of this access. */
location_t loc;
} deferred_access_check;
/* in semantics.c */
extern void push_deferring_access_checks (deferring_kind);
extern void resume_deferring_access_checks (void);
extern void stop_deferring_access_checks (void);
extern void pop_deferring_access_checks (void);
extern vec<deferred_access_check, va_gc> *get_deferred_access_checks (void);
extern void reopen_deferring_access_checks (vec<deferred_access_check, va_gc> *);
extern void pop_to_parent_deferring_access_checks (void);
extern bool perform_access_checks (vec<deferred_access_check, va_gc> *,
tsubst_flags_t);
extern bool perform_deferred_access_checks (tsubst_flags_t);
extern bool perform_or_defer_access_check (tree, tree, tree,
tsubst_flags_t);
extern int stmts_are_full_exprs_p (void);
extern void init_cp_semantics (void);
extern tree do_poplevel (tree);
extern void break_maybe_infinite_loop (void);
extern void add_decl_expr (tree);
extern tree maybe_cleanup_point_expr_void (tree);
extern tree finish_expr_stmt (tree);
extern tree begin_if_stmt (void);
extern void finish_if_stmt_cond (tree, tree);
extern tree finish_then_clause (tree);
extern void begin_else_clause (tree);
extern void finish_else_clause (tree);
extern void finish_if_stmt (tree);
extern tree begin_while_stmt (void);
extern void finish_while_stmt_cond (tree, tree, bool);
extern void finish_while_stmt (tree);
extern tree begin_do_stmt (void);
extern void finish_do_body (tree);
extern void finish_do_stmt (tree, tree, bool);
extern tree finish_return_stmt (tree);
extern tree begin_for_scope (tree *);
extern tree begin_for_stmt (tree, tree);
extern void finish_for_init_stmt (tree);
extern void finish_for_cond (tree, tree, bool);
extern void finish_for_expr (tree, tree);
extern void finish_for_stmt (tree);
extern tree begin_range_for_stmt (tree, tree);
extern void finish_range_for_decl (tree, tree, tree);
extern void finish_range_for_stmt (tree);
extern tree finish_break_stmt (void);
extern tree finish_continue_stmt (void);
extern tree begin_switch_stmt (void);
extern void finish_switch_cond (tree, tree);
extern void finish_switch_stmt (tree);
extern tree finish_goto_stmt (tree);
extern tree begin_try_block (void);
extern void finish_try_block (tree);
extern void finish_handler_sequence (tree);
extern tree begin_function_try_block (tree *);
extern void finish_function_try_block (tree);
extern void finish_function_handler_sequence (tree, tree);
extern void finish_cleanup_try_block (tree);
extern tree begin_handler (void);
extern void finish_handler_parms (tree, tree);
extern void finish_handler (tree);
extern void finish_cleanup (tree, tree);
extern bool literal_type_p (tree);
extern tree register_constexpr_fundef (tree, tree);
extern bool check_constexpr_ctor_body (tree, tree);
extern tree ensure_literal_type_for_constexpr_object (tree);
extern bool potential_constant_expression (tree);
extern bool potential_rvalue_constant_expression (tree);
extern bool require_potential_constant_expression (tree);
extern bool require_potential_rvalue_constant_expression (tree);
extern tree cxx_constant_value (tree);
extern tree maybe_constant_value (tree);
extern tree maybe_constant_init (tree);
extern bool is_sub_constant_expr (tree);
extern bool reduced_constant_expression_p (tree);
extern void explain_invalid_constexpr_fn (tree);
extern vec<tree> cx_error_context (void);
enum {
BCS_NO_SCOPE = 1,
BCS_TRY_BLOCK = 2,
BCS_FN_BODY = 4
};
extern tree begin_compound_stmt (unsigned int);
extern void finish_compound_stmt (tree);
extern tree finish_asm_stmt (int, tree, tree, tree, tree,
tree);
extern tree finish_label_stmt (tree);
extern void finish_label_decl (tree);
extern tree finish_parenthesized_expr (tree);
extern tree force_paren_expr (tree);
extern tree finish_non_static_data_member (tree, tree, tree);
extern tree begin_stmt_expr (void);
extern tree finish_stmt_expr_expr (tree, tree);
extern tree finish_stmt_expr (tree, bool);
extern tree stmt_expr_value_expr (tree);
bool empty_expr_stmt_p (tree);
extern tree perform_koenig_lookup (tree, vec<tree, va_gc> *,
tsubst_flags_t);
extern tree finish_call_expr (tree, vec<tree, va_gc> **, bool,
bool, tsubst_flags_t);
extern tree finish_increment_expr (tree, enum tree_code);
extern tree finish_this_expr (void);
extern tree finish_pseudo_destructor_expr (tree, tree, tree, location_t);
extern tree finish_unary_op_expr (location_t, enum tree_code, tree,
tsubst_flags_t);
extern tree finish_compound_literal (tree, tree, tsubst_flags_t);
extern tree finish_fname (tree);
extern void finish_translation_unit (void);
extern tree finish_template_type_parm (tree, tree);
extern tree finish_template_template_parm (tree, tree);
extern tree begin_class_definition (tree);
extern void finish_template_decl (tree);
extern tree finish_template_type (tree, tree, int);
extern tree finish_base_specifier (tree, tree, bool);
extern void finish_member_declaration (tree);
extern tree finish_id_expression (tree, tree, tree,
cp_id_kind *,
bool, bool, bool *,
bool, bool, bool, bool,
const char **,
location_t);
extern tree finish_typeof (tree);
extern tree finish_underlying_type (tree);
extern tree calculate_bases (tree);
extern tree finish_bases (tree, bool);
extern tree calculate_direct_bases (tree);
extern tree finish_offsetof (tree);
extern void finish_decl_cleanup (tree, tree);
extern void finish_eh_cleanup (tree);
extern void emit_associated_thunks (tree);
extern void finish_mem_initializers (tree);
extern tree check_template_template_default_arg (tree);
extern bool expand_or_defer_fn_1 (tree);
extern void expand_or_defer_fn (tree);
extern void add_typedef_to_current_template_for_access_check (tree, tree,
location_t);
extern void check_accessibility_of_qualified_id (tree, tree, tree);
extern tree finish_qualified_id_expr (tree, tree, bool, bool,
bool, bool, tsubst_flags_t);
extern void simplify_aggr_init_expr (tree *);
extern void finalize_nrv (tree *, tree, tree);
extern void note_decl_for_pch (tree);
extern tree omp_reduction_id (enum tree_code, tree, tree);
extern tree cp_remove_omp_priv_cleanup_stmt (tree *, int *, void *);
extern void cp_check_omp_declare_reduction (tree);
extern tree finish_omp_clauses (tree);
extern void finish_omp_threadprivate (tree);
extern tree begin_omp_structured_block (void);
extern tree finish_omp_structured_block (tree);
extern tree begin_omp_parallel (void);
extern tree finish_omp_parallel (tree, tree);
extern tree begin_omp_task (void);
extern tree finish_omp_task (tree, tree);
extern tree finish_omp_for (location_t, enum tree_code,
tree, tree, tree, tree, tree,
tree, tree);
extern void finish_omp_atomic (enum tree_code, enum tree_code,
tree, tree, tree, tree, tree,
bool);
extern void finish_omp_barrier (void);
extern void finish_omp_flush (void);
extern void finish_omp_taskwait (void);
extern void finish_omp_taskyield (void);
extern void finish_omp_cancel (tree);
extern void finish_omp_cancellation_point (tree);
extern tree begin_transaction_stmt (location_t, tree *, int);
extern void finish_transaction_stmt (tree, tree, int, tree);
extern tree build_transaction_expr (location_t, tree, int, tree);
extern bool cxx_omp_create_clause_info (tree, tree, bool, bool,
bool, bool);
extern tree baselink_for_fns (tree);
extern void finish_static_assert (tree, tree, location_t,
bool);
extern tree finish_decltype_type (tree, bool, tsubst_flags_t);
extern tree finish_trait_expr (enum cp_trait_kind, tree, tree);
extern tree build_lambda_expr (void);
extern tree build_lambda_object (tree);
extern tree begin_lambda_type (tree);
extern tree lambda_capture_field_type (tree, bool);
extern tree lambda_return_type (tree);
extern tree lambda_proxy_type (tree);
extern tree lambda_function (tree);
extern void apply_deduced_return_type (tree, tree);
extern tree add_capture (tree, tree, tree, bool, bool);
extern tree add_default_capture (tree, tree, tree);
extern tree build_capture_proxy (tree);
extern void insert_capture_proxy (tree);
extern void insert_pending_capture_proxies (void);
extern bool is_capture_proxy (tree);
extern bool is_normal_capture_proxy (tree);
extern void register_capture_members (tree);
extern tree lambda_expr_this_capture (tree);
extern tree maybe_resolve_dummy (tree);
extern tree nonlambda_method_basetype (void);
extern void maybe_add_lambda_conv_op (tree);
extern bool is_lambda_ignored_entity (tree);
/* in tree.c */
extern int cp_tree_operand_length (const_tree);
void cp_free_lang_data (tree t);
extern tree force_target_expr (tree, tree, tsubst_flags_t);
extern tree build_target_expr_with_type (tree, tree, tsubst_flags_t);
extern void lang_check_failed (const char *, int,
const char *) ATTRIBUTE_NORETURN;
extern tree stabilize_expr (tree, tree *);
extern void stabilize_call (tree, tree *);
extern bool stabilize_init (tree, tree *);
extern tree add_stmt_to_compound (tree, tree);
extern void init_tree (void);
extern bool pod_type_p (const_tree);
extern bool layout_pod_type_p (const_tree);
extern bool std_layout_type_p (const_tree);
extern bool trivial_type_p (const_tree);
extern bool trivially_copyable_p (const_tree);
extern bool scalarish_type_p (const_tree);
extern bool type_has_nontrivial_default_init (const_tree);
extern bool type_has_nontrivial_copy_init (const_tree);
extern bool class_tmpl_impl_spec_p (const_tree);
extern int zero_init_p (const_tree);
extern bool check_abi_tag_redeclaration (const_tree, const_tree, const_tree);
extern tree strip_typedefs (tree);
extern tree strip_typedefs_expr (tree);
extern tree copy_binfo (tree, tree, tree,
tree *, int);
extern int member_p (const_tree);
extern cp_lvalue_kind real_lvalue_p (const_tree);
extern cp_lvalue_kind lvalue_kind (const_tree);
extern bool lvalue_or_rvalue_with_address_p (const_tree);
extern bool xvalue_p (const_tree);
extern bool builtin_valid_in_constant_expr_p (const_tree);
extern tree build_min (enum tree_code, tree, ...);
extern tree build_min_nt_loc (location_t, enum tree_code,
...);
extern tree build_min_non_dep (enum tree_code, tree, ...);
extern tree build_min_non_dep_call_vec (tree, tree, vec<tree, va_gc> *);
extern tree build_cplus_new (tree, tree, tsubst_flags_t);
extern tree build_aggr_init_expr (tree, tree);
extern tree get_target_expr (tree);
extern tree get_target_expr_sfinae (tree, tsubst_flags_t);
extern tree build_cplus_array_type (tree, tree);
extern tree build_array_of_n_type (tree, int);
extern bool array_of_runtime_bound_p (tree);
extern tree build_array_copy (tree);
extern tree build_vec_init_expr (tree, tree, tsubst_flags_t);
extern void diagnose_non_constexpr_vec_init (tree);
extern tree hash_tree_cons (tree, tree, tree);
extern tree hash_tree_chain (tree, tree);
extern tree build_qualified_name (tree, tree, tree, bool);
extern tree build_ref_qualified_type (tree, cp_ref_qualifier);
extern int is_overloaded_fn (tree);
extern tree dependent_name (tree);
extern tree get_fns (tree);
extern tree get_first_fn (tree);
extern tree ovl_cons (tree, tree);
extern tree build_overload (tree, tree);
extern tree ovl_scope (tree);
extern bool non_static_member_function_p (tree);
extern const char *cxx_printable_name (tree, int);
extern const char *cxx_printable_name_translate (tree, int);
extern tree build_exception_variant (tree, tree);
extern tree bind_template_template_parm (tree, tree);
extern tree array_type_nelts_total (tree);
extern tree array_type_nelts_top (tree);
extern tree break_out_target_exprs (tree);
extern tree get_type_decl (tree);
extern tree decl_namespace_context (tree);
extern bool decl_anon_ns_mem_p (const_tree);
extern tree lvalue_type (tree);
extern tree error_type (tree);
extern int varargs_function_p (const_tree);
extern bool really_overloaded_fn (tree);
extern bool cp_tree_equal (tree, tree);
extern tree no_linkage_check (tree, bool);
extern void debug_binfo (tree);
extern tree build_dummy_object (tree);
extern tree maybe_dummy_object (tree, tree *);
extern int is_dummy_object (const_tree);
extern const struct attribute_spec cxx_attribute_table[];
extern tree make_ptrmem_cst (tree, tree);
extern tree cp_build_type_attribute_variant (tree, tree);
extern tree cp_build_reference_type (tree, bool);
extern tree move (tree);
extern tree cp_build_qualified_type_real (tree, int, tsubst_flags_t);
#define cp_build_qualified_type(TYPE, QUALS) \
cp_build_qualified_type_real ((TYPE), (QUALS), tf_warning_or_error)
extern bool cv_qualified_p (const_tree);
extern tree cv_unqualified (tree);
extern special_function_kind special_function_p (const_tree);
extern int count_trees (tree);
extern int char_type_p (tree);
extern void verify_stmt_tree (tree);
extern linkage_kind decl_linkage (tree);
extern duration_kind decl_storage_duration (tree);
extern tree cp_walk_subtrees (tree*, int*, walk_tree_fn,
void*, struct pointer_set_t*);
#define cp_walk_tree(tp,func,data,pset) \
walk_tree_1 (tp, func, data, pset, cp_walk_subtrees)
#define cp_walk_tree_without_duplicates(tp,func,data) \
walk_tree_without_duplicates_1 (tp, func, data, cp_walk_subtrees)
extern tree fold_if_not_in_template (tree);
extern tree rvalue (tree);
extern tree convert_bitfield_to_declared_type (tree);
extern tree cp_save_expr (tree);
extern bool cast_valid_in_integral_constant_expression_p (tree);
extern bool cxx_type_hash_eq (const_tree, const_tree);
extern void cxx_print_statistics (void);
extern bool maybe_warn_zero_as_null_pointer_constant (tree, location_t);
/* in ptree.c */
extern void cxx_print_xnode (FILE *, tree, int);
extern void cxx_print_decl (FILE *, tree, int);
extern void cxx_print_type (FILE *, tree, int);
extern void cxx_print_identifier (FILE *, tree, int);
extern void cxx_print_error_function (diagnostic_context *,
const char *,
struct diagnostic_info *);
/* in typeck.c */
extern bool cxx_mark_addressable (tree);
extern int string_conv_p (const_tree, const_tree, int);
extern tree cp_truthvalue_conversion (tree);
extern tree condition_conversion (tree);
extern tree require_complete_type (tree);
extern tree require_complete_type_sfinae (tree, tsubst_flags_t);
extern tree complete_type (tree);
extern tree complete_type_or_else (tree, tree);
extern tree complete_type_or_maybe_complain (tree, tree, tsubst_flags_t);
extern int type_unknown_p (const_tree);
enum { ce_derived, ce_normal, ce_exact };
extern bool comp_except_specs (const_tree, const_tree, int);
extern bool comptypes (tree, tree, int);
extern bool same_type_ignoring_top_level_qualifiers_p (tree, tree);
extern bool compparms (const_tree, const_tree);
extern int comp_cv_qualification (const_tree, const_tree);
extern int comp_cv_qual_signature (tree, tree);
extern tree cxx_sizeof_or_alignof_expr (tree, enum tree_code, bool);
extern tree cxx_sizeof_or_alignof_type (tree, enum tree_code, bool);
extern tree cxx_alignas_expr (tree);
extern tree cxx_sizeof_nowarn (tree);
extern tree is_bitfield_expr_with_lowered_type (const_tree);
extern tree unlowered_expr_type (const_tree);
extern tree decay_conversion (tree, tsubst_flags_t);
extern tree build_class_member_access_expr (tree, tree, tree, bool,
tsubst_flags_t);
extern tree finish_class_member_access_expr (tree, tree, bool,
tsubst_flags_t);
extern tree build_x_indirect_ref (location_t, tree,
ref_operator, tsubst_flags_t);
extern tree cp_build_indirect_ref (tree, ref_operator,
tsubst_flags_t);
extern tree build_array_ref (location_t, tree, tree);
extern tree cp_build_array_ref (location_t, tree, tree,
tsubst_flags_t);
extern tree get_member_function_from_ptrfunc (tree *, tree, tsubst_flags_t);
extern tree cp_build_function_call (tree, tree, tsubst_flags_t);
extern tree cp_build_function_call_nary (tree, tsubst_flags_t, ...)
ATTRIBUTE_SENTINEL;
extern tree cp_build_function_call_vec (tree, vec<tree, va_gc> **,
tsubst_flags_t);
extern tree build_x_binary_op (location_t,
enum tree_code, tree,
enum tree_code, tree,
enum tree_code, tree *,
tsubst_flags_t);
extern tree build_x_array_ref (location_t, tree, tree,
tsubst_flags_t);
extern tree build_x_unary_op (location_t,
enum tree_code, tree,
tsubst_flags_t);
extern tree cp_build_addr_expr (tree, tsubst_flags_t);
extern tree cp_build_addr_expr_strict (tree, tsubst_flags_t);
extern tree cp_build_unary_op (enum tree_code, tree, int,
tsubst_flags_t);
extern tree unary_complex_lvalue (enum tree_code, tree);
extern tree build_x_conditional_expr (location_t, tree, tree, tree,
tsubst_flags_t);
extern tree build_x_compound_expr_from_list (tree, expr_list_kind,
tsubst_flags_t);
extern tree build_x_compound_expr_from_vec (vec<tree, va_gc> *,
const char *, tsubst_flags_t);
extern tree build_x_compound_expr (location_t, tree, tree,
tsubst_flags_t);
extern tree build_compound_expr (location_t, tree, tree);
extern tree cp_build_compound_expr (tree, tree, tsubst_flags_t);
extern tree build_static_cast (tree, tree, tsubst_flags_t);
extern tree build_reinterpret_cast (tree, tree, tsubst_flags_t);
extern tree build_const_cast (tree, tree, tsubst_flags_t);
extern tree build_c_cast (location_t, tree, tree);
extern tree cp_build_c_cast (tree, tree, tsubst_flags_t);
extern tree build_x_modify_expr (location_t, tree,
enum tree_code, tree,
tsubst_flags_t);
extern tree cp_build_modify_expr (tree, enum tree_code, tree,
tsubst_flags_t);
extern tree convert_for_initialization (tree, tree, tree, int,
impl_conv_rhs, tree, int,
tsubst_flags_t);
extern int comp_ptr_ttypes (tree, tree);
extern bool comp_ptr_ttypes_const (tree, tree);
extern bool error_type_p (const_tree);
extern bool ptr_reasonably_similar (const_tree, const_tree);
extern tree build_ptrmemfunc (tree, tree, int, bool,
tsubst_flags_t);
extern int cp_type_quals (const_tree);
extern int type_memfn_quals (const_tree);
extern cp_ref_qualifier type_memfn_rqual (const_tree);
extern tree apply_memfn_quals (tree, cp_cv_quals, cp_ref_qualifier);
extern bool cp_has_mutable_p (const_tree);
extern bool at_least_as_qualified_p (const_tree, const_tree);
extern void cp_apply_type_quals_to_decl (int, tree);
extern tree build_ptrmemfunc1 (tree, tree, tree);
extern void expand_ptrmemfunc_cst (tree, tree *, tree *);
extern tree type_after_usual_arithmetic_conversions (tree, tree);
extern tree common_pointer_type (tree, tree);
extern tree composite_pointer_type (tree, tree, tree, tree,
composite_pointer_operation,
tsubst_flags_t);
extern tree merge_types (tree, tree);
extern tree strip_array_domain (tree);
extern tree check_return_expr (tree, bool *);
extern tree cp_build_binary_op (location_t,
enum tree_code, tree, tree,
tsubst_flags_t);
extern tree build_x_vec_perm_expr (location_t,
tree, tree, tree,
tsubst_flags_t);
#define cxx_sizeof(T) cxx_sizeof_or_alignof_type (T, SIZEOF_EXPR, true)
extern tree build_simple_component_ref (tree, tree);
extern tree build_ptrmemfunc_access_expr (tree, tree);
extern tree build_address (tree);
extern tree build_typed_address (tree, tree);
extern tree build_nop (tree, tree);
extern tree non_reference (tree);
extern tree lookup_anon_field (tree, tree);
extern bool invalid_nonstatic_memfn_p (tree, tsubst_flags_t);
extern tree convert_member_func_to_ptr (tree, tree, tsubst_flags_t);
extern tree convert_ptrmem (tree, tree, bool, bool,
tsubst_flags_t);
extern int lvalue_or_else (tree, enum lvalue_use,
tsubst_flags_t);
extern void check_template_keyword (tree);
extern bool check_raw_literal_operator (const_tree decl);
extern bool check_literal_operator_args (const_tree, bool *, bool *);
extern void maybe_warn_about_useless_cast (tree, tree, tsubst_flags_t);
extern tree cp_perform_integral_promotions (tree, tsubst_flags_t);
/* in typeck2.c */
extern void require_complete_eh_spec_types (tree, tree);
extern void cxx_incomplete_type_diagnostic (const_tree, const_tree, diagnostic_t);
#undef cxx_incomplete_type_error
extern void cxx_incomplete_type_error (const_tree, const_tree);
#define cxx_incomplete_type_error(V,T) \
(cxx_incomplete_type_diagnostic ((V), (T), DK_ERROR))
extern tree error_not_base_type (tree, tree);
extern tree binfo_or_else (tree, tree);
extern void cxx_readonly_error (tree, enum lvalue_use);
extern void complete_type_check_abstract (tree);
extern int abstract_virtuals_error (tree, tree);
extern int abstract_virtuals_error (abstract_class_use, tree);
extern int abstract_virtuals_error_sfinae (tree, tree, tsubst_flags_t);
extern int abstract_virtuals_error_sfinae (abstract_class_use, tree, tsubst_flags_t);
extern tree store_init_value (tree, tree, vec<tree, va_gc>**, int);
extern void check_narrowing (tree, tree);
extern tree digest_init (tree, tree, tsubst_flags_t);
extern tree digest_init_flags (tree, tree, int);
extern tree digest_nsdmi_init (tree, tree);
extern tree build_scoped_ref (tree, tree, tree *);
extern tree build_x_arrow (location_t, tree,
tsubst_flags_t);
extern tree build_m_component_ref (tree, tree, tsubst_flags_t);
extern tree build_functional_cast (tree, tree, tsubst_flags_t);
extern tree add_exception_specifier (tree, tree, int);
extern tree merge_exception_specifiers (tree, tree, tree);
/* in mangle.c */
extern void init_mangle (void);
extern void mangle_decl (tree);
extern const char *mangle_type_string (tree);
extern tree mangle_typeinfo_for_type (tree);
extern tree mangle_typeinfo_string_for_type (tree);
extern tree mangle_vtbl_for_type (tree);
extern tree mangle_vtt_for_type (tree);
extern tree mangle_ctor_vtbl_for_type (tree, tree);
extern tree mangle_thunk (tree, int, tree, tree);
extern tree mangle_conv_op_name_for_type (tree);
extern tree mangle_guard_variable (tree);
extern tree mangle_tls_init_fn (tree);
extern tree mangle_tls_wrapper_fn (tree);
extern bool decl_tls_wrapper_p (tree);
extern tree mangle_ref_init_variable (tree);
extern char * get_mangled_vtable_map_var_name (tree);
/* in dump.c */
extern bool cp_dump_tree (void *, tree);
/* In cp/cp-objcp-common.c. */
extern alias_set_type cxx_get_alias_set (tree);
extern bool cxx_warn_unused_global_decl (const_tree);
extern size_t cp_tree_size (enum tree_code);
extern bool cp_var_mod_type_p (tree, tree);
extern void cxx_initialize_diagnostics (diagnostic_context *);
extern int cxx_types_compatible_p (tree, tree);
extern void init_shadowed_var_for_decl (void);
extern bool cxx_block_may_fallthru (const_tree);
/* in cp-gimplify.c */
extern int cp_gimplify_expr (tree *, gimple_seq *,
gimple_seq *);
extern void cp_genericize (tree);
extern bool cxx_omp_const_qual_no_mutable (tree);
extern enum omp_clause_default_kind cxx_omp_predetermined_sharing (tree);
extern tree cxx_omp_clause_default_ctor (tree, tree, tree);
extern tree cxx_omp_clause_copy_ctor (tree, tree, tree);
extern tree cxx_omp_clause_assign_op (tree, tree, tree);
extern tree cxx_omp_clause_dtor (tree, tree);
extern void cxx_omp_finish_clause (tree, gimple_seq *);
extern bool cxx_omp_privatize_by_reference (const_tree);
/* in name-lookup.c */
extern void suggest_alternatives_for (location_t, tree);
extern tree strip_using_decl (tree);
/* in vtable-class-hierarchy.c */
extern void vtv_compute_class_hierarchy_transitive_closure (void);
extern void vtv_generate_init_routine (void);
extern void vtv_save_class_info (tree);
extern void vtv_recover_class_info (void);
extern void vtv_build_vtable_verify_fndecl (void);
/* In cp-cilkplus.c. */
extern bool cpp_validate_cilk_plus_loop (tree);
/* In cp/cp-array-notations.c */
extern tree expand_array_notation_exprs (tree);
bool cilkplus_an_triplet_types_ok_p (location_t, tree, tree, tree,
tree);
/* In c-family/cilk.c */
extern bool cilk_valid_spawn (tree);
/* -- end of C++ */
#endif /* ! GCC_CP_TREE_H */
|
DRB048-firstprivate-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "omprace.h"
#include <omp.h>
/*
Example use of firstprivate()
*/
void foo(int * a, int n, int g)
{
int i;
#pragma omp parallel for firstprivate (g)
for (i=0;i<n;i++)
{
a[i] = a[i]+g;
}
}
int a[100];
int main()
{
omprace_init();
foo(a, 100, 7);
omprace_fini();
return 0;
}
|
matMult_SSE.c | /*
* File: matMult.c
* Author: Malcolm Davis
* Course: Computer Architecture II
* Created on Apr 20, 2018
* 4x4 matrix multiplication
*
* Usage:
* ./matMult for default parameters and random matrixes or;
* ./matMult v1.1.1 v1.1.2 ... v1.1.4 ... v1.2.1 v1.2.2 ... v1.2.4 ... v2.4.1 v2.4.2 ... v2.4.4
*/
#include "xmmintrin.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
void usage(){
printf("Usage:\n ./matMult for default parameters and random matrixes or;\n\
./matMult v1.1.1 v1.1.2 ... v1.1.4 ... v1.2.1 v1.2.2 ... v1.2.4 ... v2.4.1 v2.4.2 ... v2.4.4 \n");
}
/*
* Prints a __m128i vector on console
* @param v the vector to print
*/
void printVector(__m128i* v){
float * pointer = (float*)v;
for (int i = 0; i < 4; ++i)
{
printf("%f\t", *pointer);
pointer++;
}
printf("\n");
}
/*
* Prints a 4x4 float matrix on console
* @param matrix pointer to the matrix to print
*/
void prinMatrix(float* matrix){
float * pointer = matrix;
for (int i = 0; i < 16; ++i)
{
printf("%f\t", *pointer);
if((i+1)%4==0)
printf("\n");
pointer++;
}
printf("\n");
}
/*
* Transpose a 4x4 float matrix
* @param src the one to transpose
* @param out the one with the transpose result
*/
void transposeMatrix(float* src, float* dst){
//#pragma omp parallel for
for (int i = 0; i < 16; i++)
{
dst[i]=src[(int)(floor(i/4)+4*(i%4))];
}
printf("\n");
}
/*
* Main method, retrive command line options, and multiplies the matrixes
*/
int main(int argc, char ** argv){
//If the count of the input is not a 32(16x2(4x4)) greater than 2, then exit
if (argc != 33 && argc != 1){
usage();
return -1;
}
__m128 matrix1, matrix2, tmpRes, tmpV1, tmpV2;
static float m1[16], m2[16], m2t[16], result[16], *tmpPointer;
if(argc == 1){
srand (time(NULL));
//If no arguments then generate random matrices
#pragma omp parallel for
for (int i = 0; i < 16; ++i)
{
m1[i] = rand();
m2[i] = rand();
}
} else{
//If arguments then set the values into a vector
#pragma omp parallel for
for (int i = 0; i < 16; ++i)
{
m1[i]=atof(argv[i+1]);
m2[i]=atof(argv[i+17]);
}
}
matrix1 = *(__m128*)m1;
matrix2 = *(__m128*)m2;
printf("Matrix 1: \n");
prinMatrix(m1);
printf("Matrix 2: \n");
prinMatrix(m2);
transposeMatrix(m2, m2t);
#pragma omp parallel for private(tmpV1, tmpV2, tmpPointer, tmpRes)
for (int i = 0; i < 16; ++i)
{
tmpV1 = *(((__m128*)m1)+(int)floor(i/4));
tmpV2 = *(((__m128*)m2t)+i%4);
tmpRes = _mm_mul_ps(tmpV1, tmpV2);
tmpPointer = (float*)&tmpRes;
result[i] =0;
for (int j = 0; j < 4; ++j)
{
result[i] += *(tmpPointer++);
}
}
printf("Result *********************** \n");
printf("Result: \n");
prinMatrix(result);
} |
GB_binop__plus_int32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__plus_int32)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__plus_int32)
// A.*B function (eWiseMult): GB (_AemultB_03__plus_int32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__plus_int32)
// A*D function (colscale): GB (_AxD__plus_int32)
// D*A function (rowscale): GB (_DxB__plus_int32)
// C+=B function (dense accum): GB (_Cdense_accumB__plus_int32)
// C+=b function (dense accum): GB (_Cdense_accumb__plus_int32)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__plus_int32)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__plus_int32)
// C=scalar+B GB (_bind1st__plus_int32)
// C=scalar+B' GB (_bind1st_tran__plus_int32)
// C=A+scalar GB (_bind2nd__plus_int32)
// C=A'+scalar GB (_bind2nd_tran__plus_int32)
// C type: int32_t
// A type: int32_t
// B,b type: int32_t
// BinaryOp: cij = (aij + bij)
#define GB_ATYPE \
int32_t
#define GB_BTYPE \
int32_t
#define GB_CTYPE \
int32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int32_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x + y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_PLUS || GxB_NO_INT32 || GxB_NO_PLUS_INT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__plus_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__plus_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__plus_int32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__plus_int32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int32_t
int32_t bwork = (*((int32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__plus_int32)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *restrict Cx = (int32_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__plus_int32)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *restrict Cx = (int32_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__plus_int32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__plus_int32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__plus_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__plus_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__plus_int32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__plus_int32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *Cx = (int32_t *) Cx_output ;
int32_t x = (*((int32_t *) x_input)) ;
int32_t *Bx = (int32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
int32_t bij = Bx [p] ;
Cx [p] = (x + bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__plus_int32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int32_t *Cx = (int32_t *) Cx_output ;
int32_t *Ax = (int32_t *) Ax_input ;
int32_t y = (*((int32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int32_t aij = Ax [p] ;
Cx [p] = (aij + y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = Ax [pA] ; \
Cx [pC] = (x + aij) ; \
}
GrB_Info GB (_bind1st_tran__plus_int32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t x = (*((const int32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = Ax [pA] ; \
Cx [pC] = (aij + y) ; \
}
GrB_Info GB (_bind2nd_tran__plus_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t y = (*((const int32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
two_step_v_p_strategy.h | //
// Project Name: KratosPFEMFluidDynamicsApplication $
// Last modified by: $Author: AFranci $
// Date: $Date: January 2016 $
// Revision: $Revision: 0.0 $
//
//
#ifndef KRATOS_TWO_STEP_V_P_STRATEGY_H
#define KRATOS_TWO_STEP_V_P_STRATEGY_H
#include "includes/define.h"
#include "includes/model_part.h"
#include "includes/deprecated_variables.h"
#include "includes/cfd_variables.h"
#include "utilities/openmp_utils.h"
#include "processes/process.h"
#include "solving_strategies/schemes/scheme.h"
#include "solving_strategies/strategies/solving_strategy.h"
#include "custom_utilities/mesher_utilities.hpp"
#include "custom_utilities/boundary_normals_calculation_utilities.hpp"
#include "solving_strategies/schemes/residualbased_incrementalupdate_static_scheme.h"
/* #include "solving_strategies/schemes/residualbased_incrementalupdate_static_scheme_slip.h" */
#include "solving_strategies/builder_and_solvers/residualbased_elimination_builder_and_solver.h"
#include "solving_strategies/builder_and_solvers/residualbased_elimination_builder_and_solver_componentwise.h"
#include "solving_strategies/builder_and_solvers/residualbased_block_builder_and_solver.h"
#include "custom_utilities/solver_settings.h"
#include "custom_strategies/strategies/gauss_seidel_linear_strategy.h"
#include "pfem_fluid_dynamics_application_variables.h"
#include <stdio.h>
#include <math.h>
namespace Kratos
{
///@addtogroup PFEMFluidDynamicsApplication
///@{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
template <class TSparseSpace,
class TDenseSpace,
class TLinearSolver>
class TwoStepVPStrategy : public SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver>
{
public:
///@name Type Definitions
///@{
KRATOS_CLASS_POINTER_DEFINITION(TwoStepVPStrategy);
/// Counted pointer of TwoStepVPStrategy
//typedef boost::shared_ptr< TwoStepVPStrategy<TSparseSpace, TDenseSpace, TLinearSolver> > Pointer;
typedef SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver> BaseType;
typedef typename BaseType::TDataType TDataType;
//typedef typename BaseType::DofSetType DofSetType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BaseType::TSystemVectorType TSystemVectorType;
typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType;
typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType;
typedef typename SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver>::Pointer StrategyPointerType;
typedef TwoStepVPSolverSettings<TSparseSpace, TDenseSpace, TLinearSolver> SolverSettingsType;
///@}
///@name Life Cycle
///@{
TwoStepVPStrategy(ModelPart &rModelPart,
SolverSettingsType &rSolverConfig) : BaseType(rModelPart)
{
InitializeStrategy(rSolverConfig);
}
TwoStepVPStrategy(ModelPart &rModelPart,
/*SolverConfiguration<TSparseSpace, TDenseSpace, TLinearSolver>& rSolverConfig,*/
typename TLinearSolver::Pointer pVelocityLinearSolver,
typename TLinearSolver::Pointer pPressureLinearSolver,
bool ReformDofSet = true,
double VelTol = 0.0001,
double PresTol = 0.0001,
int MaxPressureIterations = 1, // Only for predictor-corrector
unsigned int TimeOrder = 2,
unsigned int DomainSize = 2) : BaseType(rModelPart), // Move Mesh flag, pass as input?
mVelocityTolerance(VelTol),
mPressureTolerance(PresTol),
mMaxPressureIter(MaxPressureIterations),
mDomainSize(DomainSize),
mTimeOrder(TimeOrder),
mReformDofSet(ReformDofSet)
{
KRATOS_TRY;
BaseType::SetEchoLevel(1);
// Check that input parameters are reasonable and sufficient.
this->Check();
bool CalculateNormDxFlag = true;
bool ReformDofAtEachIteration = false; // DofSet modifiaction is managed by the fractional step strategy, auxiliary strategies should not modify the DofSet directly.
// Additional Typedefs
typedef typename BuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver>::Pointer BuilderSolverTypePointer;
typedef SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver> BaseType;
//initializing fractional velocity solution step
typedef Scheme<TSparseSpace, TDenseSpace> SchemeType;
typename SchemeType::Pointer pScheme;
typename SchemeType::Pointer Temp = typename SchemeType::Pointer(new ResidualBasedIncrementalUpdateStaticScheme<TSparseSpace, TDenseSpace>());
pScheme.swap(Temp);
//CONSTRUCTION OF VELOCITY
BuilderSolverTypePointer vel_build = BuilderSolverTypePointer(new ResidualBasedEliminationBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver>(pVelocityLinearSolver));
/* BuilderSolverTypePointer vel_build = BuilderSolverTypePointer(new ResidualBasedBlockBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver > (pVelocityLinearSolver)); */
this->mpMomentumStrategy = typename BaseType::Pointer(new GaussSeidelLinearStrategy<TSparseSpace, TDenseSpace, TLinearSolver>(rModelPart, pScheme, pVelocityLinearSolver, vel_build, ReformDofAtEachIteration, CalculateNormDxFlag));
this->mpMomentumStrategy->SetEchoLevel(BaseType::GetEchoLevel());
vel_build->SetCalculateReactionsFlag(false);
/* BuilderSolverTypePointer pressure_build = BuilderSolverTypePointer(new ResidualBasedEliminationBuilderAndSolverComponentwise<TSparseSpace, TDenseSpace, TLinearSolver, Variable<double> >(pPressureLinearSolver, PRESSURE)); */
BuilderSolverTypePointer pressure_build = BuilderSolverTypePointer(new ResidualBasedBlockBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver>(pPressureLinearSolver));
this->mpPressureStrategy = typename BaseType::Pointer(new GaussSeidelLinearStrategy<TSparseSpace, TDenseSpace, TLinearSolver>(rModelPart, pScheme, pPressureLinearSolver, pressure_build, ReformDofAtEachIteration, CalculateNormDxFlag));
this->mpPressureStrategy->SetEchoLevel(BaseType::GetEchoLevel());
pressure_build->SetCalculateReactionsFlag(false);
KRATOS_CATCH("");
}
/// Destructor.
virtual ~TwoStepVPStrategy() {}
int Check() override
{
KRATOS_TRY;
// Check elements and conditions in the model part
int ierr = BaseType::Check();
if (ierr != 0)
return ierr;
if (DELTA_TIME.Key() == 0)
KRATOS_THROW_ERROR(std::runtime_error, "DELTA_TIME Key is 0. Check that the application was correctly registered.", "");
if (BDF_COEFFICIENTS.Key() == 0)
KRATOS_THROW_ERROR(std::runtime_error, "BDF_COEFFICIENTS Key is 0. Check that the application was correctly registered.", "");
ModelPart &rModelPart = BaseType::GetModelPart();
if (mTimeOrder == 2 && rModelPart.GetBufferSize() < 3)
KRATOS_THROW_ERROR(std::invalid_argument, "Buffer size too small for fractional step strategy (BDF2), needed 3, got ", rModelPart.GetBufferSize());
if (mTimeOrder == 1 && rModelPart.GetBufferSize() < 2)
KRATOS_THROW_ERROR(std::invalid_argument, "Buffer size too small for fractional step strategy (Backward Euler), needed 2, got ", rModelPart.GetBufferSize());
// const ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
// for (ModelPart::ElementIterator itEl = rModelPart.ElementsBegin(); itEl != rModelPart.ElementsEnd(); ++itEl)
// {
// ierr = itEl->Check(rCurrentProcessInfo);
// if (ierr != 0)
// break;
// }
const auto &r_current_process_info = rModelPart.GetProcessInfo();
for (const auto &r_element : rModelPart.Elements())
{
ierr = r_element.Check(r_current_process_info);
if (ierr != 0)
{
break;
}
}
/* for ( ModelPart::ConditionIterator itCond = rModelPart.ConditionsBegin(); itCond != rModelPart.ConditionsEnd(); ++itCond) */
/* { */
/* ierr = itCond->Check(rCurrentProcessInfo); */
/* if (ierr != 0) break; */
/* } */
return ierr;
KRATOS_CATCH("");
}
bool SolveSolutionStep() override
{
ModelPart &rModelPart = BaseType::GetModelPart();
this->SetTimeCoefficients(rModelPart.GetProcessInfo());
ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
double currentTime = rCurrentProcessInfo[TIME];
double timeInterval = rCurrentProcessInfo[DELTA_TIME];
bool timeIntervalChanged = rCurrentProcessInfo[TIME_INTERVAL_CHANGED];
unsigned int stepsWithChangedDt = rCurrentProcessInfo[STEPS_WITH_CHANGED_DT];
bool converged = false;
unsigned int maxNonLinearIterations = mMaxPressureIter;
KRATOS_INFO("\n Solution with two_step_vp_strategy at t=") << currentTime << "s" << std::endl;
if ((timeIntervalChanged == true && currentTime > 10 * timeInterval) || stepsWithChangedDt > 0)
{
maxNonLinearIterations *= 2;
}
if (currentTime < 10 * timeInterval)
{
if (BaseType::GetEchoLevel() > 1)
std::cout << "within the first 10 time steps, I consider the given iteration number x3" << std::endl;
maxNonLinearIterations *= 3;
}
if (currentTime < 20 * timeInterval && currentTime >= 10 * timeInterval)
{
if (BaseType::GetEchoLevel() > 1)
std::cout << "within the second 10 time steps, I consider the given iteration number x2" << std::endl;
maxNonLinearIterations *= 2;
}
bool momentumConverged = true;
bool continuityConverged = false;
bool fixedTimeStep = false;
double pressureNorm = 0;
double velocityNorm = 0;
this->SetBlockedFlag();
for (unsigned int it = 0; it < maxNonLinearIterations; ++it)
{
momentumConverged = this->SolveMomentumIteration(it, maxNonLinearIterations, fixedTimeStep, velocityNorm);
this->UpdateTopology(rModelPart, BaseType::GetEchoLevel());
if (fixedTimeStep == false)
{
continuityConverged = this->SolveContinuityIteration(it, maxNonLinearIterations, pressureNorm);
}
if (it == maxNonLinearIterations - 1 || ((continuityConverged && momentumConverged) && it > 2))
{
this->UpdateStressStrain();
}
if ((continuityConverged && momentumConverged) && it > 2)
{
rCurrentProcessInfo.SetValue(BAD_VELOCITY_CONVERGENCE, false);
rCurrentProcessInfo.SetValue(BAD_PRESSURE_CONVERGENCE, false);
converged = true;
KRATOS_INFO("TwoStepVPStrategy") << "V-P strategy converged in " << it + 1 << " iterations." << std::endl;
break;
}
if (fixedTimeStep == true)
{
break;
}
}
if (!continuityConverged && !momentumConverged && BaseType::GetEchoLevel() > 0 && rModelPart.GetCommunicator().MyPID() == 0)
std::cout << "Convergence tolerance not reached." << std::endl;
if (mReformDofSet)
this->Clear();
return converged;
}
void FinalizeSolutionStep() override
{
}
void InitializeSolutionStep() override
{
}
void UpdateTopology(ModelPart &rModelPart, unsigned int echoLevel)
{
KRATOS_TRY;
this->CalculateDisplacementsAndPorosity();
BaseType::MoveMesh();
/* BoundaryNormalsCalculationUtilities BoundaryComputation; */
/* BoundaryComputation.CalculateWeightedBoundaryNormals(rModelPart, echoLevel); */
KRATOS_CATCH("");
}
void SetBlockedFlag()
{
KRATOS_TRY;
ModelPart &rModelPart = BaseType::GetModelPart();
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
#pragma omp parallel
{
ModelPart::ElementIterator ElemBegin;
ModelPart::ElementIterator ElemEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Elements(), ElemBegin, ElemEnd);
for (ModelPart::ElementIterator itElem = ElemBegin; itElem != ElemEnd; ++itElem)
{
unsigned int numNodes = itElem->GetGeometry().size();
std::vector<array_1d<double, 3>> nodesCoordinates;
nodesCoordinates.resize(numNodes);
(itElem)->Set(BLOCKED, false);
(itElem)->Set(ISOLATED, false);
unsigned int freeSurfaceNodes = 0;
unsigned int freeSurfaceRigidNodes = 0;
unsigned int rigidNodes = 0;
unsigned int isolatedNodes = 0;
for (unsigned int i = 0; i < numNodes; i++)
{
if (itElem->GetGeometry()[i].Is(FREE_SURFACE))
{
freeSurfaceNodes++;
if (itElem->GetGeometry()[i].Is(RIGID))
{
freeSurfaceRigidNodes++;
}
}
else if (itElem->GetGeometry()[i].Is(RIGID))
{
rigidNodes++;
}
nodesCoordinates[i] = itElem->GetGeometry()[i].Coordinates();
ElementWeakPtrVectorType &neighb_elems = itElem->GetGeometry()[i].GetValue(NEIGHBOUR_ELEMENTS);
if (neighb_elems.size() == 1)
{
isolatedNodes++;
}
}
// if (dimension == 3 && (freeSurfaceNodes == numNodes || (freeSurfaceNodes + rigidNodes) == numNodes))
if (dimension == 3)
{
double a1 = 0; //slope x for plane on the first triangular face of the tetrahedra (nodes A,B,C)
double b1 = 0; //slope y for plane on the first triangular face of the tetrahedra (nodes A,B,C)
double c1 = 0; //slope z for plane on the first triangular face of the tetrahedra (nodes A,B,C)
a1 = (nodesCoordinates[1][1] - nodesCoordinates[0][1]) * (nodesCoordinates[2][2] - nodesCoordinates[0][2]) - (nodesCoordinates[2][1] - nodesCoordinates[0][1]) * (nodesCoordinates[1][2] - nodesCoordinates[0][2]);
b1 = (nodesCoordinates[1][2] - nodesCoordinates[0][2]) * (nodesCoordinates[2][0] - nodesCoordinates[0][0]) - (nodesCoordinates[2][2] - nodesCoordinates[0][2]) * (nodesCoordinates[1][0] - nodesCoordinates[0][0]);
c1 = (nodesCoordinates[1][0] - nodesCoordinates[0][0]) * (nodesCoordinates[2][1] - nodesCoordinates[0][1]) - (nodesCoordinates[2][0] - nodesCoordinates[0][0]) * (nodesCoordinates[1][1] - nodesCoordinates[0][1]);
double a2 = 0; //slope x for plane on the second triangular face of the tetrahedra (nodes A,B,D)
double b2 = 0; //slope y for plane on the second triangular face of the tetrahedra (nodes A,B,D)
double c2 = 0; //slope z for plane on the second triangular face of the tetrahedra (nodes A,B,D)
a2 = (nodesCoordinates[1][1] - nodesCoordinates[0][1]) * (nodesCoordinates[3][2] - nodesCoordinates[0][2]) - (nodesCoordinates[3][1] - nodesCoordinates[0][1]) * (nodesCoordinates[1][2] - nodesCoordinates[0][2]);
b2 = (nodesCoordinates[1][2] - nodesCoordinates[0][2]) * (nodesCoordinates[3][0] - nodesCoordinates[0][0]) - (nodesCoordinates[3][2] - nodesCoordinates[0][2]) * (nodesCoordinates[1][0] - nodesCoordinates[0][0]);
c2 = (nodesCoordinates[1][0] - nodesCoordinates[0][0]) * (nodesCoordinates[3][1] - nodesCoordinates[0][1]) - (nodesCoordinates[3][0] - nodesCoordinates[0][0]) * (nodesCoordinates[1][1] - nodesCoordinates[0][1]);
double a3 = 0; //slope x for plane on the third triangular face of the tetrahedra (nodes B,C,D)
double b3 = 0; //slope y for plane on the third triangular face of the tetrahedra (nodes B,C,D)
double c3 = 0; //slope z for plane on the third triangular face of the tetrahedra (nodes B,C,D)
a3 = (nodesCoordinates[1][1] - nodesCoordinates[2][1]) * (nodesCoordinates[3][2] - nodesCoordinates[2][2]) - (nodesCoordinates[3][1] - nodesCoordinates[2][1]) * (nodesCoordinates[1][2] - nodesCoordinates[2][2]);
b3 = (nodesCoordinates[1][2] - nodesCoordinates[2][2]) * (nodesCoordinates[3][0] - nodesCoordinates[2][0]) - (nodesCoordinates[3][2] - nodesCoordinates[2][2]) * (nodesCoordinates[1][0] - nodesCoordinates[2][0]);
c3 = (nodesCoordinates[1][0] - nodesCoordinates[2][0]) * (nodesCoordinates[3][1] - nodesCoordinates[2][1]) - (nodesCoordinates[3][0] - nodesCoordinates[2][0]) * (nodesCoordinates[1][1] - nodesCoordinates[2][1]);
double a4 = 0; //slope x for plane on the fourth triangular face of the tetrahedra (nodes A,C,D)
double b4 = 0; //slope y for plane on the fourth triangular face of the tetrahedra (nodes A,C,D)
double c4 = 0; //slope z for plane on the fourth triangular face of the tetrahedra (nodes A,C,D)
a4 = (nodesCoordinates[0][1] - nodesCoordinates[2][1]) * (nodesCoordinates[3][2] - nodesCoordinates[2][2]) - (nodesCoordinates[3][1] - nodesCoordinates[2][1]) * (nodesCoordinates[0][2] - nodesCoordinates[2][2]);
b4 = (nodesCoordinates[0][2] - nodesCoordinates[2][2]) * (nodesCoordinates[3][0] - nodesCoordinates[2][0]) - (nodesCoordinates[3][2] - nodesCoordinates[2][2]) * (nodesCoordinates[0][0] - nodesCoordinates[2][0]);
c4 = (nodesCoordinates[0][0] - nodesCoordinates[2][0]) * (nodesCoordinates[3][1] - nodesCoordinates[2][1]) - (nodesCoordinates[3][0] - nodesCoordinates[2][0]) * (nodesCoordinates[0][1] - nodesCoordinates[2][1]);
double cosAngle12 = (a1 * a2 + b1 * b2 + c1 * c2) / (sqrt(pow(a1, 2) + pow(b1, 2) + pow(c1, 2)) * sqrt(pow(a2, 2) + pow(b2, 2) + pow(c2, 2)));
double cosAngle13 = (a1 * a3 + b1 * b3 + c1 * c3) / (sqrt(pow(a1, 2) + pow(b1, 2) + pow(c1, 2)) * sqrt(pow(a3, 2) + pow(b3, 2) + pow(c3, 2)));
double cosAngle14 = (a1 * a4 + b1 * b4 + c1 * c4) / (sqrt(pow(a1, 2) + pow(b1, 2) + pow(c1, 2)) * sqrt(pow(a4, 2) + pow(b4, 2) + pow(c4, 2)));
double cosAngle23 = (a3 * a2 + b3 * b2 + c3 * c2) / (sqrt(pow(a3, 2) + pow(b3, 2) + pow(c3, 2)) * sqrt(pow(a2, 2) + pow(b2, 2) + pow(c2, 2)));
double cosAngle24 = (a4 * a2 + b4 * b2 + c4 * c2) / (sqrt(pow(a4, 2) + pow(b4, 2) + pow(c4, 2)) * sqrt(pow(a2, 2) + pow(b2, 2) + pow(c2, 2)));
double cosAngle34 = (a4 * a3 + b4 * b3 + c4 * c3) / (sqrt(pow(a4, 2) + pow(b4, 2) + pow(c4, 2)) * sqrt(pow(a3, 2) + pow(b3, 2) + pow(c3, 2)));
if ((fabs(cosAngle12) > 0.99 || fabs(cosAngle13) > 0.99 || fabs(cosAngle14) > 0.99 || fabs(cosAngle23) > 0.99 || fabs(cosAngle24) > 0.99 || fabs(cosAngle34) > 0.99) && (freeSurfaceNodes == numNodes) && isolatedNodes > 1)
{
(itElem)->Set(BLOCKED, true);
// std::cout << "in the strategy BLOCKED ELEMENT: " << (itElem)->Id() << std::endl;
}
else if ((fabs(cosAngle12) > 0.995 || fabs(cosAngle13) > 0.995 || fabs(cosAngle14) > 0.995 || fabs(cosAngle23) > 0.995 || fabs(cosAngle24) > 0.995 || fabs(cosAngle34) > 0.995) && (freeSurfaceNodes == numNodes) && isolatedNodes == 1)
{
(itElem)->Set(BLOCKED, true);
// std::cout << "in the strategy BLOCKED ELEMENT: " << (itElem)->Id() << std::endl;
}
else if ((fabs(cosAngle12) > 0.999 || fabs(cosAngle13) > 0.999 || fabs(cosAngle14) > 0.999 || fabs(cosAngle23) > 0.999 || fabs(cosAngle24) > 0.999 || fabs(cosAngle34) > 0.999) && (freeSurfaceNodes == numNodes))
{
(itElem)->Set(BLOCKED, true);
// std::cout << "in the strategy BLOCKED ELEMENT: " << (itElem)->Id() << std::endl;
}
// else if (fabs(cosAngle12) > 0.999 || fabs(cosAngle13) > 0.999 || fabs(cosAngle14) > 0.999 || fabs(cosAngle23) > 0.999 || fabs(cosAngle24) > 0.999 || fabs(cosAngle34) > 0.999)
// {
// (itElem)->Set(BLOCKED, true);
// // std::cout << "in the strategy BLOCKED ELEMENT: " << (itElem)->Id() << std::endl;
// }
}
if (freeSurfaceNodes == numNodes && rigidNodes == 0 && isolatedNodes >= (numNodes - 1))
{
(itElem)->Set(ISOLATED, true);
(itElem)->Set(BLOCKED, false);
}
}
}
KRATOS_CATCH("");
}
void UnactiveSliverElements()
{
KRATOS_TRY;
ModelPart &rModelPart = BaseType::GetModelPart();
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
MesherUtilities MesherUtils;
double ModelPartVolume = MesherUtils.ComputeModelPartVolume(rModelPart);
double CriticalVolume = 0.001 * ModelPartVolume / double(rModelPart.Elements().size());
double ElementalVolume = 0;
#pragma omp parallel
{
ModelPart::ElementIterator ElemBegin;
ModelPart::ElementIterator ElemEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Elements(), ElemBegin, ElemEnd);
for (ModelPart::ElementIterator itElem = ElemBegin; itElem != ElemEnd; ++itElem)
{
unsigned int numNodes = itElem->GetGeometry().size();
if (numNodes == (dimension + 1))
{
if (dimension == 2)
{
ElementalVolume = (itElem)->GetGeometry().Area();
}
else if (dimension == 3)
{
ElementalVolume = (itElem)->GetGeometry().Volume();
}
if (ElementalVolume < CriticalVolume)
{
// std::cout << "sliver element: it has Volume: " << ElementalVolume << " vs CriticalVolume(meanVol/1000): " << CriticalVolume<< std::endl;
(itElem)->Set(ACTIVE, false);
}
else
{
(itElem)->Set(ACTIVE, true);
}
}
}
}
KRATOS_CATCH("");
}
void CalculatePressureVelocity()
{
ModelPart &rModelPart = BaseType::GetModelPart();
ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
const double timeInterval = rCurrentProcessInfo[DELTA_TIME];
unsigned int timeStep = rCurrentProcessInfo[STEP];
for (ModelPart::NodeIterator i = rModelPart.NodesBegin();
i != rModelPart.NodesEnd(); ++i)
{
if (timeStep == 1)
{
(i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0) = 0;
(i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 1) = 0;
}
else
{
double &CurrentPressure = (i)->FastGetSolutionStepValue(PRESSURE, 0);
double &PreviousPressure = (i)->FastGetSolutionStepValue(PRESSURE, 1);
double &CurrentPressureVelocity = (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0);
CurrentPressureVelocity = (CurrentPressure - PreviousPressure) / timeInterval;
}
}
}
void CalculatePressureAcceleration()
{
ModelPart &rModelPart = BaseType::GetModelPart();
ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
const double timeInterval = rCurrentProcessInfo[DELTA_TIME];
unsigned int timeStep = rCurrentProcessInfo[STEP];
for (ModelPart::NodeIterator i = rModelPart.NodesBegin();
i != rModelPart.NodesEnd(); ++i)
{
if (timeStep == 1)
{
(i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 0) = 0;
(i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 1) = 0;
}
else
{
double &CurrentPressureVelocity = (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0);
double &PreviousPressureVelocity = (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 1);
double &CurrentPressureAcceleration = (i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 0);
CurrentPressureAcceleration = (CurrentPressureVelocity - PreviousPressureVelocity) / timeInterval;
}
}
}
virtual void CalculateTemporalVariables()
{
ModelPart &rModelPart = BaseType::GetModelPart();
ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
Vector &BDFcoeffs = rCurrentProcessInfo[BDF_COEFFICIENTS];
for (ModelPart::NodeIterator i = rModelPart.NodesBegin();
i != rModelPart.NodesEnd(); ++i)
{
array_1d<double, 3> &CurrentVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 0);
array_1d<double, 3> &PreviousVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 1);
array_1d<double, 3> &CurrentAcceleration = (i)->FastGetSolutionStepValue(ACCELERATION, 0);
array_1d<double, 3> &PreviousAcceleration = (i)->FastGetSolutionStepValue(ACCELERATION, 1);
/* if((i)->IsNot(ISOLATED) || (i)->Is(SOLID)){ */
if ((i)->IsNot(ISOLATED) && ((i)->IsNot(RIGID) || (i)->Is(SOLID)))
{
UpdateAccelerations(CurrentAcceleration, CurrentVelocity, PreviousAcceleration, PreviousVelocity, BDFcoeffs);
}
else if ((i)->Is(RIGID))
{
array_1d<double, 3> Zeros(3, 0.0);
(i)->FastGetSolutionStepValue(ACCELERATION, 0) = Zeros;
(i)->FastGetSolutionStepValue(ACCELERATION, 1) = Zeros;
}
else
{
(i)->FastGetSolutionStepValue(PRESSURE, 0) = 0.0;
(i)->FastGetSolutionStepValue(PRESSURE, 1) = 0.0;
(i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0) = 0.0;
(i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 1) = 0.0;
(i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 0) = 0.0;
(i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 1) = 0.0;
if ((i)->SolutionStepsDataHas(VOLUME_ACCELERATION))
{
array_1d<double, 3> &VolumeAcceleration = (i)->FastGetSolutionStepValue(VOLUME_ACCELERATION);
(i)->FastGetSolutionStepValue(ACCELERATION, 0) = VolumeAcceleration;
(i)->FastGetSolutionStepValue(VELOCITY, 0) += VolumeAcceleration * rCurrentProcessInfo[DELTA_TIME];
}
}
const double timeInterval = rCurrentProcessInfo[DELTA_TIME];
unsigned int timeStep = rCurrentProcessInfo[STEP];
if (timeStep == 1)
{
(i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0) = 0;
(i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 1) = 0;
(i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 0) = 0;
(i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 1) = 0;
}
else
{
double &CurrentPressure = (i)->FastGetSolutionStepValue(PRESSURE, 0);
double &PreviousPressure = (i)->FastGetSolutionStepValue(PRESSURE, 1);
double &CurrentPressureVelocity = (i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0);
double &CurrentPressureAcceleration = (i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 0);
CurrentPressureAcceleration = CurrentPressureVelocity / timeInterval;
CurrentPressureVelocity = (CurrentPressure - PreviousPressure) / timeInterval;
CurrentPressureAcceleration += -CurrentPressureVelocity / timeInterval;
}
}
}
void CalculateAccelerations()
{
ModelPart &rModelPart = BaseType::GetModelPart();
ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
Vector &BDFcoeffs = rCurrentProcessInfo[BDF_COEFFICIENTS];
for (ModelPart::NodeIterator i = rModelPart.NodesBegin();
i != rModelPart.NodesEnd(); ++i)
{
array_1d<double, 3> &CurrentVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 0);
array_1d<double, 3> &PreviousVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 1);
array_1d<double, 3> &CurrentAcceleration = (i)->FastGetSolutionStepValue(ACCELERATION, 0);
array_1d<double, 3> &PreviousAcceleration = (i)->FastGetSolutionStepValue(ACCELERATION, 1);
/* if((i)->IsNot(ISOLATED) || (i)->Is(SOLID)){ */
if ((i)->IsNot(ISOLATED) && ((i)->IsNot(RIGID) || (i)->Is(SOLID)))
{
UpdateAccelerations(CurrentAcceleration, CurrentVelocity, PreviousAcceleration, PreviousVelocity, BDFcoeffs);
}
else if ((i)->Is(RIGID))
{
array_1d<double, 3> Zeros(3, 0.0);
(i)->FastGetSolutionStepValue(ACCELERATION, 0) = Zeros;
(i)->FastGetSolutionStepValue(ACCELERATION, 1) = Zeros;
}
else
{
(i)->FastGetSolutionStepValue(PRESSURE, 0) = 0.0;
(i)->FastGetSolutionStepValue(PRESSURE, 1) = 0.0;
(i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0) = 0.0;
(i)->FastGetSolutionStepValue(PRESSURE_VELOCITY, 1) = 0.0;
(i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 0) = 0.0;
(i)->FastGetSolutionStepValue(PRESSURE_ACCELERATION, 1) = 0.0;
if ((i)->SolutionStepsDataHas(VOLUME_ACCELERATION))
{
array_1d<double, 3> &VolumeAcceleration = (i)->FastGetSolutionStepValue(VOLUME_ACCELERATION);
(i)->FastGetSolutionStepValue(ACCELERATION, 0) = VolumeAcceleration;
(i)->FastGetSolutionStepValue(VELOCITY, 0) += VolumeAcceleration * rCurrentProcessInfo[DELTA_TIME];
}
}
}
}
inline void UpdateAccelerations(array_1d<double, 3> &CurrentAcceleration,
const array_1d<double, 3> &CurrentVelocity,
array_1d<double, 3> &PreviousAcceleration,
const array_1d<double, 3> &PreviousVelocity,
Vector &BDFcoeffs)
{
noalias(CurrentAcceleration) = -BDFcoeffs[1] * (CurrentVelocity - PreviousVelocity) - PreviousAcceleration;
}
virtual void CalculateDisplacementsAndPorosity()
{
ModelPart &rModelPart = BaseType::GetModelPart();
ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
const double TimeStep = rCurrentProcessInfo[DELTA_TIME];
for (ModelPart::NodeIterator i = rModelPart.NodesBegin();
i != rModelPart.NodesEnd(); ++i)
{
array_1d<double, 3> &CurrentVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 0);
array_1d<double, 3> &PreviousVelocity = (i)->FastGetSolutionStepValue(VELOCITY, 1);
array_1d<double, 3> &CurrentDisplacement = (i)->FastGetSolutionStepValue(DISPLACEMENT, 0);
array_1d<double, 3> &PreviousDisplacement = (i)->FastGetSolutionStepValue(DISPLACEMENT, 1);
/* if( i->IsFixed(DISPLACEMENT_X) == false ) */
CurrentDisplacement[0] = 0.5 * TimeStep * (CurrentVelocity[0] + PreviousVelocity[0]) + PreviousDisplacement[0];
/* if( i->IsFixed(DISPLACEMENT_Y) == false ) */
CurrentDisplacement[1] = 0.5 * TimeStep * (CurrentVelocity[1] + PreviousVelocity[1]) + PreviousDisplacement[1];
/* if( i->IsFixed(DISPLACEMENT_Z) == false ) */
CurrentDisplacement[2] = 0.5 * TimeStep * (CurrentVelocity[2] + PreviousVelocity[2]) + PreviousDisplacement[2];
// currentFluidFractionRate = (currentFluidFraction - previousFluidFraction)/TimeStep;
}
}
void UpdateStressStrain()
{
ModelPart &rModelPart = BaseType::GetModelPart();
const ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
#pragma omp parallel
{
ModelPart::ElementIterator ElemBegin;
ModelPart::ElementIterator ElemEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Elements(), ElemBegin, ElemEnd);
for (ModelPart::ElementIterator itElem = ElemBegin; itElem != ElemEnd; ++itElem)
{
/* itElem-> InitializeElementStrainStressState(); */
itElem->InitializeSolutionStep(rCurrentProcessInfo);
}
}
/* this->CalculateAccelerations(); */
/* this->CalculatePressureVelocity(); */
/* this->CalculatePressureAcceleration(); */
this->CalculateTemporalVariables();
}
void Clear() override
{
mpMomentumStrategy->Clear();
mpPressureStrategy->Clear();
}
///@}
///@name Access
///@{
void SetEchoLevel(int Level) override
{
BaseType::SetEchoLevel(Level);
int StrategyLevel = Level > 0 ? Level - 1 : 0;
mpMomentumStrategy->SetEchoLevel(StrategyLevel);
mpPressureStrategy->SetEchoLevel(StrategyLevel);
}
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
std::string Info() const override
{
std::stringstream buffer;
buffer << "TwoStepVPStrategy";
return buffer.str();
}
/// Print information about this object.
void PrintInfo(std::ostream &rOStream) const override
{
rOStream << "TwoStepVPStrategy";
}
/// Print object's data.
void PrintData(std::ostream &rOStream) const override
{
}
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected Life Cycle
///@{
///@}
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
/// Calculate the coefficients for time iteration.
/**
* @param rCurrentProcessInfo ProcessInfo instance from the fluid ModelPart. Must contain DELTA_TIME and BDF_COEFFICIENTS variables.
*/
void SetTimeCoefficients(ProcessInfo &rCurrentProcessInfo)
{
KRATOS_TRY;
if (mTimeOrder == 2)
{
//calculate the BDF coefficients
double Dt = rCurrentProcessInfo[DELTA_TIME];
double OldDt = rCurrentProcessInfo.GetPreviousTimeStepInfo(1)[DELTA_TIME];
double Rho = OldDt / Dt;
double TimeCoeff = 1.0 / (Dt * Rho * Rho + Dt * Rho);
Vector &BDFcoeffs = rCurrentProcessInfo[BDF_COEFFICIENTS];
BDFcoeffs.resize(3, false);
BDFcoeffs[0] = TimeCoeff * (Rho * Rho + 2.0 * Rho); //coefficient for step n+1 (3/2Dt if Dt is constant)
BDFcoeffs[1] = -TimeCoeff * (Rho * Rho + 2.0 * Rho + 1.0); //coefficient for step n (-4/2Dt if Dt is constant)
BDFcoeffs[2] = TimeCoeff; //coefficient for step n-1 (1/2Dt if Dt is constant)
}
else if (mTimeOrder == 1)
{
double Dt = rCurrentProcessInfo[DELTA_TIME];
double TimeCoeff = 1.0 / Dt;
Vector &BDFcoeffs = rCurrentProcessInfo[BDF_COEFFICIENTS];
BDFcoeffs.resize(2, false);
BDFcoeffs[0] = TimeCoeff; //coefficient for step n+1 (1/Dt)
BDFcoeffs[1] = -TimeCoeff; //coefficient for step n (-1/Dt)
}
KRATOS_CATCH("");
}
bool SolveMomentumIteration(unsigned int it, unsigned int maxIt, bool &fixedTimeStep, double &velocityNorm)
{
ModelPart &rModelPart = BaseType::GetModelPart();
int Rank = rModelPart.GetCommunicator().MyPID();
bool ConvergedMomentum = false;
double NormDv = 0;
fixedTimeStep = false;
// build momentum system and solve for fractional step velocity increment
rModelPart.GetProcessInfo().SetValue(FRACTIONAL_STEP, 1);
if (it == 0)
{
mpMomentumStrategy->InitializeSolutionStep();
}
NormDv = mpMomentumStrategy->Solve();
if (BaseType::GetEchoLevel() > 1 && Rank == 0)
std::cout << "-------------- s o l v e d ! ------------------" << std::endl;
if (it == 0)
{
velocityNorm = this->ComputeVelocityNorm();
}
double DvErrorNorm = NormDv / velocityNorm;
unsigned int iterationForCheck = 2;
// Check convergence
if (it == maxIt - 1)
{
KRATOS_INFO("Iteration") << it << " Final Velocity error: " << DvErrorNorm << std::endl;
ConvergedMomentum = this->FixTimeStepMomentum(DvErrorNorm, fixedTimeStep);
}
else if (it > iterationForCheck)
{
KRATOS_INFO("Iteration") << it << " Velocity error: " << DvErrorNorm << std::endl;
ConvergedMomentum = this->CheckMomentumConvergence(DvErrorNorm, fixedTimeStep);
}
else
{
KRATOS_INFO("Iteration") << it << " Velocity error: " << DvErrorNorm << std::endl;
}
if (!ConvergedMomentum && BaseType::GetEchoLevel() > 0 && Rank == 0)
std::cout << "Momentum equations did not reach the convergence tolerance." << std::endl;
return ConvergedMomentum;
}
bool SolveContinuityIteration(unsigned int it, unsigned int maxIt, double &NormP)
{
ModelPart &rModelPart = BaseType::GetModelPart();
int Rank = rModelPart.GetCommunicator().MyPID();
bool ConvergedContinuity = false;
bool fixedTimeStep = false;
double NormDp = 0;
// 2. Pressure solution
rModelPart.GetProcessInfo().SetValue(FRACTIONAL_STEP, 5);
if (it == 0)
{
mpPressureStrategy->InitializeSolutionStep();
}
NormDp = mpPressureStrategy->Solve();
if (BaseType::GetEchoLevel() > 0 && Rank == 0)
std::cout << "The norm of pressure is: " << NormDp << std::endl;
if (it == 0)
{
NormP = this->ComputePressureNorm();
}
double DpErrorNorm = NormDp / (NormP);
// Check convergence
if (it == (maxIt - 1))
{
KRATOS_INFO("Iteration") << it << " Final Pressure error: " << DpErrorNorm << std::endl;
ConvergedContinuity = this->FixTimeStepContinuity(DpErrorNorm, fixedTimeStep);
}
else
{
KRATOS_INFO("Iteration") << it << " Pressure error: " << DpErrorNorm << std::endl;
ConvergedContinuity = this->CheckContinuityConvergence(DpErrorNorm, fixedTimeStep);
}
if (!ConvergedContinuity && BaseType::GetEchoLevel() > 0 && Rank == 0)
std::cout << "Continuity equation did not reach the convergence tolerance." << std::endl;
return ConvergedContinuity;
}
void ComputeErrorL2Norm()
{
ModelPart &rModelPart = BaseType::GetModelPart();
const ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
const double currentTime = rCurrentProcessInfo[TIME];
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
long double sumErrorL2Velocity = 0;
long double sumErrorL2VelocityX = 0;
long double sumErrorL2VelocityY = 0;
long double sumErrorL2Pressure = 0;
long double sumErrorL2TauXX = 0;
long double sumErrorL2TauYY = 0;
long double sumErrorL2TauXY = 0;
#pragma omp parallel
{
ModelPart::ElementIterator ElemBegin;
ModelPart::ElementIterator ElemEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Elements(), ElemBegin, ElemEnd);
for (ModelPart::ElementIterator itElem = ElemBegin; itElem != ElemEnd; ++itElem)
{
Element::GeometryType &geometry = itElem->GetGeometry();
long double nodalArea = 0;
if (dimension == 2)
{
nodalArea = geometry.Area() / 3.0;
}
else if (dimension == 3)
{
nodalArea = geometry.Volume() * 0.25;
}
long double bariPosX = 0;
long double bariPosY = 0;
long double eleErrorL2Velocity = 0;
long double eleErrorL2VelocityX = 0;
long double eleErrorL2VelocityY = 0;
long double eleErrorL2Pressure = 0;
//ShapeFunctionDerivativesArrayType DN_DX;
Matrix NContainer;
NContainer = geometry.ShapeFunctionsValues(GeometryData::GI_GAUSS_1);
const Vector &N = row(NContainer, 0);
const unsigned int NumNodes = geometry.size();
double elementalPressure = N[0] * geometry(0)->FastGetSolutionStepValue(PRESSURE);
double elementalVelocityX = N[0] * geometry(0)->FastGetSolutionStepValue(VELOCITY_X);
double elementalVelocityY = N[0] * geometry(0)->FastGetSolutionStepValue(VELOCITY_Y);
;
for (unsigned int i = 1; i < NumNodes; i++)
{
elementalPressure += N[i] * geometry(i)->FastGetSolutionStepValue(PRESSURE);
elementalVelocityX += N[i] * geometry(i)->FastGetSolutionStepValue(VELOCITY_X);
elementalVelocityY += N[i] * geometry(i)->FastGetSolutionStepValue(VELOCITY_Y);
}
for (unsigned int i = 0; i < geometry.size(); i++)
{
const long double nodalPosX = geometry(i)->X();
const long double nodalPosY = geometry(i)->Y();
bariPosX += nodalPosX / 3.0;
bariPosY += nodalPosY / 3.0;
}
const long double posX = bariPosX;
const long double posY = bariPosY;
long double expectedVelocityX = pow(posX, 2) * (1.0 - posX) * (1.0 - posX) * (2.0 * posY - 6.0 * pow(posY, 2) + 4.0 * pow(posY, 3));
long double expectedVelocityY = -pow(posY, 2) * (1.0 - posY) * (1.0 - posY) * (2.0 * posX - 6.0 * pow(posX, 2) + 4.0 * pow(posX, 3));
long double expectedPressure = -posX * (1.0 - posX);
eleErrorL2VelocityX = elementalVelocityX - expectedVelocityX;
eleErrorL2VelocityY = elementalVelocityY - expectedVelocityY;
eleErrorL2Pressure = elementalPressure - expectedPressure;
sumErrorL2VelocityX += pow(eleErrorL2VelocityX, 2) * geometry.Area();
sumErrorL2VelocityY += pow(eleErrorL2VelocityY, 2) * geometry.Area();
sumErrorL2Pressure += pow(eleErrorL2Pressure, 2) * geometry.Area();
const long double tauXX = 0; // itElem->GetValue(ELEMENTAL_DEVIATORIC_STRESS_XX);
const long double tauYY = 0; // itElem->GetValue(ELEMENTAL_DEVIATORIC_STRESS_YY);
const long double tauXY = 0; // itElem->GetValue(ELEMENTAL_DEVIATORIC_STRESS_XY);
long double expectedTauXX = 2.0 * (-4.0 * (1.0 - bariPosX) * bariPosX * (-1.0 + 2.0 * bariPosX) * bariPosY * (1.0 - 3.0 * bariPosY + 2.0 * pow(bariPosY, 2)));
long double expectedTauYY = 2.0 * (4.0 * bariPosX * (1.0 - 3.0 * bariPosX + 2.0 * pow(bariPosX, 2)) * (1.0 - bariPosY) * bariPosY * (-1.0 + 2.0 * bariPosY));
long double expectedTauXY = (2.0 * (1.0 - 6.0 * bariPosY + 6.0 * pow(bariPosY, 2)) * (1.0 - bariPosX) * (1.0 - bariPosX) * pow(bariPosX, 2) - 2.0 * (1.0 - 6.0 * bariPosX + 6.0 * pow(bariPosX, 2)) * (1.0 - bariPosY) * (1 - bariPosY) * pow(bariPosY, 2));
long double nodalErrorTauXX = tauXX - expectedTauXX;
long double nodalErrorTauYY = tauYY - expectedTauYY;
long double nodalErrorTauXY = tauXY - expectedTauXY;
sumErrorL2TauXX += pow(nodalErrorTauXX, 2) * geometry.Area();
sumErrorL2TauYY += pow(nodalErrorTauYY, 2) * geometry.Area();
sumErrorL2TauXY += pow(nodalErrorTauXY, 2) * geometry.Area();
}
}
long double errorL2Velocity = sqrt(sumErrorL2Velocity);
long double errorL2VelocityX = sqrt(sumErrorL2VelocityX);
long double errorL2VelocityY = sqrt(sumErrorL2VelocityY);
long double errorL2Pressure = sqrt(sumErrorL2Pressure);
long double errorL2TauXX = sqrt(sumErrorL2TauXX);
long double errorL2TauYY = sqrt(sumErrorL2TauYY);
long double errorL2TauXY = sqrt(sumErrorL2TauXY);
std::ofstream myfileVelocity;
myfileVelocity.open("errorL2VelocityFile.txt", std::ios::app);
myfileVelocity << currentTime << "\t" << errorL2Velocity << "\n";
myfileVelocity.close();
std::ofstream myfileVelocityX;
myfileVelocityX.open("errorL2VelocityXFile.txt", std::ios::app);
myfileVelocityX << currentTime << "\t" << errorL2VelocityX << "\n";
myfileVelocityX.close();
std::ofstream myfileVelocityY;
myfileVelocityY.open("errorL2VelocityYFile.txt", std::ios::app);
myfileVelocityY << currentTime << "\t" << errorL2VelocityY << "\n";
myfileVelocityY.close();
std::ofstream myfilePressure;
myfilePressure.open("errorL2PressureFile.txt", std::ios::app);
myfilePressure << currentTime << "\t" << errorL2Pressure << "\n";
myfilePressure.close();
std::ofstream myfileTauXX;
myfileTauXX.open("errorL2TauXXFile.txt", std::ios::app);
myfileTauXX << currentTime << "\t" << errorL2TauXX << "\n";
myfileTauXX.close();
std::ofstream myfileTauYY;
myfileTauYY.open("errorL2TauYYFile.txt", std::ios::app);
myfileTauYY << currentTime << "\t" << errorL2TauYY << "\n";
myfileTauYY.close();
std::ofstream myfileTauXY;
myfileTauXY.open("errorL2TauXYFile.txt", std::ios::app);
myfileTauXY << currentTime << "\t" << errorL2TauXY << "\n";
myfileTauXY.close();
}
void ComputeErrorL2NormCasePoiseuille()
{
ModelPart &rModelPart = BaseType::GetModelPart();
const ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
const double currentTime = rCurrentProcessInfo[TIME];
const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension();
double sumErrorL2VelocityTheta = 0;
double sumErrorL2TauTheta = 0;
double r_in = 0.2;
double R_out = 0.5;
double kappa = r_in / R_out;
double omega = 0.5;
double viscosity = 100.0;
#pragma omp parallel
{
ModelPart::ElementIterator ElemBegin;
ModelPart::ElementIterator ElemEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Elements(), ElemBegin, ElemEnd);
for (ModelPart::ElementIterator itElem = ElemBegin; itElem != ElemEnd; ++itElem)
{
Element::GeometryType &geometry = itElem->GetGeometry();
long double nodalArea = 0;
if (dimension == 2)
{
nodalArea = geometry.Area() / 3.0;
}
else if (dimension == 3)
{
nodalArea = geometry.Volume() * 0.25;
}
long double bariPosX = 0;
long double bariPosY = 0;
long double eleErrorL2Velocity = 0;
long double eleErrorL2VelocityX = 0;
long double eleErrorL2VelocityY = 0;
long double eleErrorL2Pressure = 0;
//ShapeFunctionDerivativesArrayType DN_DX;
Matrix NContainer;
NContainer = geometry.ShapeFunctionsValues(GeometryData::GI_GAUSS_1);
//this->CalculateGeometryData(DN_DX,NContainer,GaussWeights);
const Vector &N = row(NContainer, 0);
// itElem->EvaluateInPoint(elementalPressure,PRESSURE,N);
const unsigned int NumNodes = geometry.size();
double elementalPressure = N[0] * geometry(0)->FastGetSolutionStepValue(PRESSURE);
double elementalVelocityX = N[0] * geometry(0)->FastGetSolutionStepValue(VELOCITY_X);
double elementalVelocityY = N[0] * geometry(0)->FastGetSolutionStepValue(VELOCITY_Y);
;
for (unsigned int i = 1; i < NumNodes; i++)
{
elementalPressure += N[i] * geometry(i)->FastGetSolutionStepValue(PRESSURE);
elementalVelocityX += N[i] * geometry(i)->FastGetSolutionStepValue(VELOCITY_X);
elementalVelocityY += N[i] * geometry(i)->FastGetSolutionStepValue(VELOCITY_Y);
}
for (unsigned int i = 0; i < geometry.size(); i++)
{
// index = i*dimension;
const long double nodalPosX = geometry(i)->X();
const long double nodalPosY = geometry(i)->Y();
bariPosX += nodalPosX / 3.0;
bariPosY += nodalPosY / 3.0;
}
const long double posX = bariPosX;
const long double posY = bariPosY;
const double rPos = sqrt(pow(posX, 2) + pow(posY, 2));
const double cosalfa = posX / rPos;
const double sinalfa = posY / rPos;
const double sin2alfa = 2.0 * cosalfa * sinalfa;
const double cos2alfa = 1.0 - 2.0 * pow(sinalfa, 2);
double expectedVelocityTheta = pow(kappa, 2) * omega * R_out / (1.0 - pow(kappa, 2)) * (R_out / rPos - rPos / R_out);
double computedVelocityTheta = sqrt(pow(elementalVelocityX, 2) + pow(elementalVelocityY, 2));
double nodalErrorVelocityTheta = computedVelocityTheta - expectedVelocityTheta;
const long double tauXX = 0; // itElem->GetValue(ELEMENTAL_DEVIATORIC_STRESS_XX);
const long double tauYY = 0; // itElem->GetValue(ELEMENTAL_DEVIATORIC_STRESS_YY);
const long double tauXY = 0; // itElem->GetValue(ELEMENTAL_DEVIATORIC_STRESS_XY);
double expectedTauTheta = (2.0 * viscosity * pow(kappa, 2) * omega * pow(R_out, 2)) / (1.0 - pow(kappa, 2)) / pow(rPos, 2);
double computedTauTheta = (tauXX - tauYY) * sin2alfa / 2.0 - tauXY * cos2alfa;
double nodalErrorTauTheta = computedTauTheta - expectedTauTheta;
sumErrorL2VelocityTheta += pow(nodalErrorVelocityTheta, 2) * geometry.Area();
sumErrorL2TauTheta += pow(nodalErrorTauTheta, 2) * geometry.Area();
}
}
double errorL2VelocityTheta = sqrt(sumErrorL2VelocityTheta);
double errorL2TauTheta = sqrt(sumErrorL2TauTheta);
std::ofstream myfileVelocity;
myfileVelocity.open("errorL2Poiseuille.txt", std::ios::app);
myfileVelocity << currentTime << "\t" << errorL2VelocityTheta << "\t" << errorL2TauTheta << "\n";
myfileVelocity.close();
}
double ComputeVelocityNorm()
{
ModelPart &rModelPart = BaseType::GetModelPart();
double NormV = 0.00;
#pragma omp parallel reduction(+ \
: NormV)
{
ModelPart::NodeIterator NodeBegin;
ModelPart::NodeIterator NodeEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodeBegin, NodeEnd);
for (ModelPart::NodeIterator itNode = NodeBegin; itNode != NodeEnd; ++itNode)
{
const array_1d<double, 3> &Vel = itNode->FastGetSolutionStepValue(VELOCITY);
double NormVelNode = 0;
for (unsigned int d = 0; d < 3; ++d)
{
NormVelNode += Vel[d] * Vel[d];
NormV += Vel[d] * Vel[d];
}
}
}
BaseType::GetModelPart().GetCommunicator().GetDataCommunicator().SumAll(NormV);
NormV = sqrt(NormV);
if (NormV == 0.0)
NormV = 1.00;
return NormV;
}
bool CheckVelocityConvergence(const double NormDv, double &errorNormDv)
{
ModelPart &rModelPart = BaseType::GetModelPart();
double NormV = 0.00;
errorNormDv = 0;
#pragma omp parallel reduction(+ \
: NormV)
{
ModelPart::NodeIterator NodeBegin;
ModelPart::NodeIterator NodeEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodeBegin, NodeEnd);
for (ModelPart::NodeIterator itNode = NodeBegin; itNode != NodeEnd; ++itNode)
{
const array_1d<double, 3> &Vel = itNode->FastGetSolutionStepValue(VELOCITY);
double NormVelNode = 0;
for (unsigned int d = 0; d < 3; ++d)
{
NormVelNode += Vel[d] * Vel[d];
NormV += Vel[d] * Vel[d];
}
}
}
BaseType::GetModelPart().GetCommunicator().GetDataCommunicator().SumAll(NormV);
NormV = sqrt(NormV);
if (NormV == 0.0)
NormV = 1.00;
errorNormDv = NormDv / NormV;
if (BaseType::GetEchoLevel() > 0 && rModelPart.GetCommunicator().MyPID() == 0)
{
std::cout << "The norm of velocity increment is: " << NormDv << std::endl;
std::cout << "The norm of velocity is: " << NormV << std::endl;
std::cout << "Velocity error: " << errorNormDv << "mVelocityTolerance: " << mVelocityTolerance << std::endl;
}
/* else{ */
/* std::cout<<"Velocity error: "<< errorNormDv <<" velTol: " << mVelocityTolerance<< std::endl; */
/* } */
if (errorNormDv < mVelocityTolerance)
{
return true;
}
else
{
return false;
}
}
bool CheckPressureConvergence(const double NormDp, double &errorNormDp, double &NormP)
{
ModelPart &rModelPart = BaseType::GetModelPart();
NormP = 0.00;
errorNormDp = 0;
#pragma omp parallel reduction(+ \
: NormP)
{
ModelPart::NodeIterator NodeBegin;
ModelPart::NodeIterator NodeEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodeBegin, NodeEnd);
for (ModelPart::NodeIterator itNode = NodeBegin; itNode != NodeEnd; ++itNode)
{
const double Pr = itNode->FastGetSolutionStepValue(PRESSURE);
NormP += Pr * Pr;
}
}
BaseType::GetModelPart().GetCommunicator().GetDataCommunicator().SumAll(NormP);
NormP = sqrt(NormP);
if (NormP == 0.0)
NormP = 1.00;
errorNormDp = NormDp / (NormP);
if (BaseType::GetEchoLevel() > 0 && rModelPart.GetCommunicator().MyPID() == 0)
{
std::cout << " The norm of pressure increment is: " << NormDp << std::endl;
std::cout << " The norm of pressure is: " << NormP << std::endl;
std::cout << " Pressure error: " << errorNormDp << std::endl;
}
/* else{ */
/* std::cout<<" Pressure error: "<<errorNormDp <<" presTol: "<<mPressureTolerance << std::endl; */
/* } */
if (errorNormDp < mPressureTolerance)
{
return true;
}
else
return false;
}
double ComputePressureNorm()
{
ModelPart &rModelPart = BaseType::GetModelPart();
double NormP = 0.00;
#pragma omp parallel reduction(+ \
: NormP)
{
ModelPart::NodeIterator NodeBegin;
ModelPart::NodeIterator NodeEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodeBegin, NodeEnd);
for (ModelPart::NodeIterator itNode = NodeBegin; itNode != NodeEnd; ++itNode)
{
const double Pr = itNode->FastGetSolutionStepValue(PRESSURE);
NormP += Pr * Pr;
}
}
BaseType::GetModelPart().GetCommunicator().GetDataCommunicator().SumAll(NormP);
NormP = sqrt(NormP);
if (NormP == 0.0)
NormP = 1.00;
return NormP;
}
bool FixTimeStepMomentum(const double DvErrorNorm, bool &fixedTimeStep)
{
ModelPart &rModelPart = BaseType::GetModelPart();
ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
double currentTime = rCurrentProcessInfo[TIME];
double timeInterval = rCurrentProcessInfo[DELTA_TIME];
double minTolerance = 0.005;
bool converged = false;
if (currentTime < 10 * timeInterval)
{
minTolerance = 10;
}
if ((DvErrorNorm > minTolerance || (DvErrorNorm < 0 && DvErrorNorm > 0) || (DvErrorNorm != DvErrorNorm)) &&
DvErrorNorm != 0 &&
(DvErrorNorm != 1 || currentTime > timeInterval))
{
rCurrentProcessInfo.SetValue(BAD_VELOCITY_CONVERGENCE, true);
std::cout << "NOT GOOD CONVERGENCE!!! I'll reduce the next time interval" << DvErrorNorm << std::endl;
minTolerance = 0.05;
if (DvErrorNorm > minTolerance)
{
std::cout << "BAD CONVERGENCE!!! I GO AHEAD WITH THE PREVIOUS VELOCITY AND PRESSURE FIELDS" << DvErrorNorm << std::endl;
fixedTimeStep = true;
#pragma omp parallel
{
ModelPart::NodeIterator NodeBegin;
ModelPart::NodeIterator NodeEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodeBegin, NodeEnd);
for (ModelPart::NodeIterator itNode = NodeBegin; itNode != NodeEnd; ++itNode)
{
itNode->FastGetSolutionStepValue(VELOCITY, 0) = itNode->FastGetSolutionStepValue(VELOCITY, 1);
itNode->FastGetSolutionStepValue(PRESSURE, 0) = itNode->FastGetSolutionStepValue(PRESSURE, 1);
itNode->FastGetSolutionStepValue(ACCELERATION, 0) = itNode->FastGetSolutionStepValue(ACCELERATION, 1);
}
}
}
}
else
{
rCurrentProcessInfo.SetValue(BAD_VELOCITY_CONVERGENCE, false);
if (DvErrorNorm < mVelocityTolerance)
{
converged = true;
}
}
return converged;
}
bool CheckMomentumConvergence(const double DvErrorNorm, bool &fixedTimeStep)
{
ModelPart &rModelPart = BaseType::GetModelPart();
ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
double currentTime = rCurrentProcessInfo[TIME];
double timeInterval = rCurrentProcessInfo[DELTA_TIME];
double minTolerance = 0.99999;
bool converged = false;
if ((DvErrorNorm > minTolerance || (DvErrorNorm < 0 && DvErrorNorm > 0) || (DvErrorNorm != DvErrorNorm)) &&
DvErrorNorm != 0 &&
(DvErrorNorm != 1 || currentTime > timeInterval))
{
rCurrentProcessInfo.SetValue(BAD_VELOCITY_CONVERGENCE, true);
std::cout << " BAD CONVERGENCE DETECTED DURING THE ITERATIVE LOOP!!! error: " << DvErrorNorm << " higher than 0.9999" << std::endl;
std::cout << " I GO AHEAD WITH THE PREVIOUS VELOCITY AND PRESSURE FIELDS" << std::endl;
fixedTimeStep = true;
#pragma omp parallel
{
ModelPart::NodeIterator NodeBegin;
ModelPart::NodeIterator NodeEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodeBegin, NodeEnd);
for (ModelPart::NodeIterator itNode = NodeBegin; itNode != NodeEnd; ++itNode)
{
itNode->FastGetSolutionStepValue(VELOCITY, 0) = itNode->FastGetSolutionStepValue(VELOCITY, 1);
itNode->FastGetSolutionStepValue(PRESSURE, 0) = itNode->FastGetSolutionStepValue(PRESSURE, 1);
itNode->FastGetSolutionStepValue(ACCELERATION, 0) = itNode->FastGetSolutionStepValue(ACCELERATION, 1);
}
}
}
else
{
rCurrentProcessInfo.SetValue(BAD_VELOCITY_CONVERGENCE, false);
if (DvErrorNorm < mVelocityTolerance)
{
converged = true;
}
}
return converged;
}
bool FixTimeStepContinuity(const double DvErrorNorm, bool &fixedTimeStep)
{
ModelPart &rModelPart = BaseType::GetModelPart();
ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
double currentTime = rCurrentProcessInfo[TIME];
double timeInterval = rCurrentProcessInfo[DELTA_TIME];
double minTolerance = 0.01;
bool converged = false;
if (currentTime < 10 * timeInterval)
{
minTolerance = 10;
}
if ((DvErrorNorm > minTolerance || (DvErrorNorm < 0 && DvErrorNorm > 0) || (DvErrorNorm != DvErrorNorm)) &&
DvErrorNorm != 0 &&
(DvErrorNorm != 1 || currentTime > timeInterval))
{
fixedTimeStep = true;
// rCurrentProcessInfo.SetValue(BAD_PRESSURE_CONVERGENCE, true);
if (DvErrorNorm > 0.9999)
{
rCurrentProcessInfo.SetValue(BAD_VELOCITY_CONVERGENCE, true);
std::cout << " BAD PRESSURE CONVERGENCE DETECTED DURING THE ITERATIVE LOOP!!! error: " << DvErrorNorm << " higher than 0.1" << std::endl;
std::cout << " I GO AHEAD WITH THE PREVIOUS VELOCITY AND PRESSURE FIELDS" << std::endl;
fixedTimeStep = true;
#pragma omp parallel
{
ModelPart::NodeIterator NodeBegin;
ModelPart::NodeIterator NodeEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodeBegin, NodeEnd);
for (ModelPart::NodeIterator itNode = NodeBegin; itNode != NodeEnd; ++itNode)
{
itNode->FastGetSolutionStepValue(VELOCITY, 0) = itNode->FastGetSolutionStepValue(VELOCITY, 1);
itNode->FastGetSolutionStepValue(PRESSURE, 0) = itNode->FastGetSolutionStepValue(PRESSURE, 1);
itNode->FastGetSolutionStepValue(ACCELERATION, 0) = itNode->FastGetSolutionStepValue(ACCELERATION, 1);
}
}
}
}
else if (DvErrorNorm < mPressureTolerance)
{
converged = true;
fixedTimeStep = false;
}
rCurrentProcessInfo.SetValue(BAD_PRESSURE_CONVERGENCE, false);
return converged;
}
bool CheckContinuityConvergence(const double DvErrorNorm, bool &fixedTimeStep)
{
ModelPart &rModelPart = BaseType::GetModelPart();
ProcessInfo &rCurrentProcessInfo = rModelPart.GetProcessInfo();
bool converged = false;
if (DvErrorNorm < mPressureTolerance)
{
converged = true;
fixedTimeStep = false;
}
rCurrentProcessInfo.SetValue(BAD_PRESSURE_CONVERGENCE, false);
return converged;
}
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
double mVelocityTolerance;
double mPressureTolerance;
unsigned int mMaxPressureIter;
unsigned int mDomainSize;
unsigned int mTimeOrder;
bool mReformDofSet;
// Fractional step index.
/* 1 : Momentum step (calculate fractional step velocity)
* 2-3 : Unused (reserved for componentwise calculation of frac step velocity)
* 4 : Pressure step
* 5 : Computation of projections
* 6 : End of step velocity
*/
// unsigned int mStepId;
/// Scheme for the solution of the momentum equation
StrategyPointerType mpMomentumStrategy;
/// Scheme for the solution of the mass equation
StrategyPointerType mpPressureStrategy;
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
virtual void InitializeStrategy(SolverSettingsType &rSolverConfig)
{
KRATOS_TRY;
mTimeOrder = rSolverConfig.GetTimeOrder();
// Check that input parameters are reasonable and sufficient.
this->Check();
//ModelPart& rModelPart = this->GetModelPart();
mDomainSize = rSolverConfig.GetDomainSize();
mReformDofSet = rSolverConfig.GetReformDofSet();
BaseType::SetEchoLevel(rSolverConfig.GetEchoLevel());
// Initialize strategies for each step
bool HaveVelStrategy = rSolverConfig.FindStrategy(SolverSettingsType::Velocity, mpMomentumStrategy);
if (HaveVelStrategy)
{
rSolverConfig.FindTolerance(SolverSettingsType::Velocity, mVelocityTolerance);
/* rSolverConfig.FindMaxIter(SolverSettingsType::Velocity,mMaxVelocityIter); */
}
else
{
KRATOS_THROW_ERROR(std::runtime_error, "TwoStepVPStrategy error: No Velocity strategy defined in FractionalStepSettings", "");
}
bool HavePressStrategy = rSolverConfig.FindStrategy(SolverSettingsType::Pressure, mpPressureStrategy);
if (HavePressStrategy)
{
rSolverConfig.FindTolerance(SolverSettingsType::Pressure, mPressureTolerance);
rSolverConfig.FindMaxIter(SolverSettingsType::Pressure, mMaxPressureIter);
}
else
{
KRATOS_THROW_ERROR(std::runtime_error, "TwoStepVPStrategy error: No Pressure strategy defined in FractionalStepSettings", "");
}
// Check input parameters
this->Check();
KRATOS_CATCH("");
}
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
/// Assignment operator.
TwoStepVPStrategy &operator=(TwoStepVPStrategy const &rOther) {}
/// Copy constructor.
TwoStepVPStrategy(TwoStepVPStrategy const &rOther) {}
///@}
}; /// Class TwoStepVPStrategy
///@}
///@name Type Definitions
///@{
///@}
///@} // addtogroup
} // namespace Kratos.
#endif // KRATOS_TWO_STEP_V_P_STRATEGY_H
|
pooling_2x2.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#if __ARM_NEON
#include <arm_neon.h>
#endif // __ARM_NEON
static void pooling2x2s2_max_neon(const Mat& bottom_blob, Mat& top_blob)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
#pragma omp parallel for
for (int q=0; q<inch; q++)
{
const float* img0 = bottom_blob.channel(q);
float* outptr = top_blob.channel(q);
const float* r0 = img0;
const float* r1 = img0 + w;
for (int i = 0; i < outh; i++)
{
#if __ARM_NEON
int nn = outw >> 2;
int remain = outw - (nn << 2);
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
for (; nn>0; nn--)
{
float32x4_t _r00 = vld1q_f32(r0);
float32x4_t _r10 = vld1q_f32(r1);
float32x4_t _r01 = vld1q_f32(r0 + 4);
float32x4_t _r11 = vld1q_f32(r1 + 4);
float32x4_t _max0 = vmaxq_f32(_r00, _r10);
float32x4_t _max1 = vmaxq_f32(_r01, _r11);
float32x4_t _max = vpmaxq_f32(_max0, _max1);
vst1q_f32(outptr, _max);
r0 += 8;
r1 += 8;
outptr += 4;
}
#else
if (nn > 0)
{
asm volatile(
"0: \n"
"pld [%1, #256] \n"
"pld [%2, #256] \n"
"vld1.f32 {d0-d3}, [%1]! \n"
"vld1.f32 {d4-d7}, [%2]! \n"
"vmax.f32 q0, q0, q2 \n"
"vmax.f32 q1, q1, q3 \n"
"vpmax.f32 d4, d0, d1 \n"
"vpmax.f32 d5, d2, d3 \n"
"subs %0, #1 \n"
"vst1.f32 {d4-d5}, [%3]! \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(outptr) // %3
: "0"(nn),
"1"(r0),
"2"(r1),
"3"(outptr)
: "cc", "memory", "q0", "q1", "q2", "q3"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
float max0 = std::max(r0[0], r0[1]);
float max1 = std::max(r1[0], r1[1]);
*outptr = std::max(max0, max1);
r0 += 2;
r1 += 2;
outptr++;
}
r0 += w;
r1 += w;
}
}
}
|
GB_subref_slice.c | //------------------------------------------------------------------------------
// GB_subref_slice: construct coarse/fine tasks for C = A(I,J)
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// Determine the tasks for computing C=A(I,J). The matrix C has Cnvec vectors,
// and these are divided into coarse and fine tasks. A coarse task will
// compute one or more whole vectors of C. A fine task operates on a slice of
// a single vector of C. The slice can be done by the # of entries in the
// corresponding vector of A, or by the list of indices I, depending on how the
// work is done for that method.
// The (kC)th vector will access A(imin:imax,kA) in Ai,Ax [pA:pA_end-1], where
// pA = Ap_start [kC] and pA_end = Ap_end [kC].
// The computation of each vector C(:,kC) = A(I,kA) is by done using one of 12
// different cases, depending on the vector, as determined by GB_subref_method.
// Not all vectors in C are computed using the same method.
// Note that J can have duplicates. kC is unique (0:Cnvec-1) but the
// corresponding vector kA in A may repeat, if J has duplicates. Duplicates in
// J are not exploited, since the coarse/fine tasks are constructed by slicing
// slicing the list of vectors Ch of size Cnvec, not the vectors of A.
// Compare this function with GB_ewise_slice, which constructs coarse/fine
// tasks for the eWise operations (C=A+B, C=A.*B, and C<M>=Z).
#define GB_FREE_WORKSPACE \
{ \
GB_WERK_POP (Coarse, int64_t) ; \
GB_FREE_WORK (&Cwork, Cwork_size) ; \
}
#define GB_FREE_ALL \
{ \
GB_FREE_WORKSPACE ; \
GB_FREE_WORK (&TaskList, TaskList_size) ; \
GB_FREE_WORK (&Mark, Mark_size) ; \
GB_FREE_WORK (&Inext, Inext_size) ; \
}
#include "GB_subref.h"
GrB_Info GB_subref_slice // phase 1 of GB_subref
(
// output:
GB_task_struct **p_TaskList, // array of structs
size_t *p_TaskList_size, // size of TaskList
int *p_ntasks, // # of tasks constructed
int *p_nthreads, // # of threads for subref operation
bool *p_post_sort, // true if a final post-sort is needed
int64_t *restrict *p_Mark, // for I inverse, if needed; size avlen
size_t *p_Mark_size,
int64_t *restrict *p_Inext, // for I inverse, if needed; size nI
size_t *p_Inext_size,
int64_t *p_nduplicates, // # of duplicates, if I inverse computed
// from phase0:
const int64_t *restrict Ap_start, // location of A(imin:imax,kA)
const int64_t *restrict Ap_end,
const int64_t Cnvec, // # of vectors of C
const bool need_qsort, // true if C must be sorted
const int Ikind, // GB_ALL, GB_RANGE, GB_STRIDE or GB_LIST
const int64_t nI, // length of I
const int64_t Icolon [3], // for GB_RANGE and GB_STRIDE
// original input:
const int64_t avlen, // A->vlen
const int64_t anz, // nnz (A)
const GrB_Index *I,
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
ASSERT (p_TaskList != NULL) ;
ASSERT (p_TaskList_size != NULL) ;
ASSERT (p_ntasks != NULL) ;
ASSERT (p_nthreads != NULL) ;
ASSERT (p_post_sort != NULL) ;
ASSERT (p_Mark != NULL) ;
ASSERT (p_Inext != NULL) ;
ASSERT (p_nduplicates != NULL) ;
ASSERT ((Cnvec > 0) == (Ap_start != NULL)) ;
ASSERT ((Cnvec > 0) == (Ap_end != NULL)) ;
(*p_TaskList) = NULL ;
(*p_TaskList_size) = 0 ;
(*p_Mark ) = NULL ;
(*p_Inext ) = NULL ;
int64_t *restrict Mark = NULL ; size_t Mark_size = 0 ;
int64_t *restrict Inext = NULL ; size_t Inext_size = 0 ;
int64_t *restrict Cwork = NULL ; size_t Cwork_size = 0 ;
GB_WERK_DECLARE (Coarse, int64_t) ; // size ntasks1+1
int ntasks1 = 0 ;
GrB_Info info ;
//--------------------------------------------------------------------------
// determine # of threads to use
//--------------------------------------------------------------------------
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
//--------------------------------------------------------------------------
// allocate the initial TaskList
//--------------------------------------------------------------------------
// Allocate the TaskList to hold at least 2*ntask0 tasks. It will grow
// later, if needed. Usually, 64*nthreads_max is enough, but in a few cases
// fine tasks can cause this number to be exceeded. If that occurs,
// TaskList is reallocated.
// When the mask is present, it is often fastest to break the work up
// into tasks, even when nthreads_max is 1.
GB_task_struct *restrict TaskList = NULL ; size_t TaskList_size = 0 ;
int max_ntasks = 0 ;
int ntasks0 = (nthreads_max == 1) ? 1 : (32 * nthreads_max) ;
GB_REALLOC_TASK_WORK (TaskList, ntasks0, max_ntasks) ;
//--------------------------------------------------------------------------
// determine if I_inverse can be constructed
//--------------------------------------------------------------------------
// I_inverse_ok is true if I might be inverted. If false, then I will not
// be inverted. I can be inverted only if the workspace for the inverse
// does not exceed nnz(A). Note that if I was provided on input as an
// explicit list, but consists of a contiguous range imin:imax, then Ikind
// is now GB_LIST and the list I is ignored.
// If I_inverse_ok is true, the inverse of I might still not be needed.
// need_I_inverse becomes true if any C(:,kC) = A (I,kA) computation
// requires I inverse.
int64_t I_inverse_limit = GB_IMAX (4096, anz) ;
bool I_inverse_ok = (Ikind == GB_LIST &&
((nI > avlen / 256) || ((nI + avlen) < I_inverse_limit))) ;
bool need_I_inverse = false ;
bool post_sort = false ;
int64_t iinc = Icolon [GxB_INC] ;
//--------------------------------------------------------------------------
// allocate workspace
//--------------------------------------------------------------------------
Cwork = GB_MALLOC_WORK (Cnvec+1, int64_t, &Cwork_size) ;
if (Cwork == NULL)
{
// out of memory
GB_FREE_ALL ;
return (GrB_OUT_OF_MEMORY) ;
}
//--------------------------------------------------------------------------
// estimate the work required for each vector of C
//--------------------------------------------------------------------------
int nthreads_for_Cwork = GB_nthreads (Cnvec, chunk, nthreads_max) ;
int64_t kC ;
#pragma omp parallel for num_threads(nthreads_for_Cwork) schedule(static) \
reduction(||:need_I_inverse)
for (kC = 0 ; kC < Cnvec ; kC++)
{
// jC is the (kC)th vector of C = A(I,J)
// int64_t jC = GBH (Ch, kC) ;
// C(:,kC) = A(I,kA) will be constructed
int64_t pA = Ap_start [kC] ;
int64_t pA_end = Ap_end [kC] ;
int64_t alen = pA_end - pA ; // nnz (A (imin:imax,j))
int64_t work ; // amount of work for C(:,kC) = A (I,kA)
bool this_needs_I_inverse ; // true if this vector needs I inverse
// ndupl in I not yet known; it is found when I is inverted. For
// now, assume I has no duplicate entries. All that is needed for now
// is the work required for each C(:,kC), and whether or not I inverse
// must be created. The # of duplicates has no impact on the I inverse
// decision, and a minor effect on the work (which is ignored).
GB_subref_method (&work, &this_needs_I_inverse, alen, avlen,
Ikind, nI, I_inverse_ok, need_qsort, iinc, 0) ;
// log the result
need_I_inverse = need_I_inverse || this_needs_I_inverse ;
Cwork [kC] = work ;
}
//--------------------------------------------------------------------------
// replace Cwork with its cumulative sum
//--------------------------------------------------------------------------
GB_cumsum (Cwork, Cnvec, NULL, nthreads_for_Cwork, Context) ;
double cwork = (double) Cwork [Cnvec] ;
//--------------------------------------------------------------------------
// determine # of threads and tasks to use for C=A(I,J)
//--------------------------------------------------------------------------
int nthreads = GB_nthreads (cwork, chunk, nthreads_max) ;
ntasks1 = (nthreads == 1) ? 1 : (32 * nthreads) ;
double target_task_size = cwork / (double) (ntasks1) ;
target_task_size = GB_IMAX (target_task_size, chunk) ;
//--------------------------------------------------------------------------
// invert I if required
//--------------------------------------------------------------------------
int64_t ndupl = 0 ;
if (need_I_inverse)
{
GB_OK (GB_I_inverse (I, nI, avlen, &Mark, &Mark_size,
&Inext, &Inext_size, &ndupl, Context)) ;
ASSERT (Mark != NULL) ;
ASSERT (Inext != NULL) ;
}
//--------------------------------------------------------------------------
// check for quick return for a single task
//--------------------------------------------------------------------------
if (Cnvec == 0 || ntasks1 == 1)
{
// construct a single coarse task that computes all of C
TaskList [0].kfirst = 0 ;
TaskList [0].klast = Cnvec-1 ;
// free workspace and return result
GB_FREE_WORKSPACE ;
(*p_TaskList ) = TaskList ;
(*p_TaskList_size) = TaskList_size ;
(*p_ntasks ) = (Cnvec == 0) ? 0 : 1 ;
(*p_nthreads ) = 1 ;
(*p_post_sort ) = false ;
(*p_Mark ) = Mark ;
(*p_Mark_size ) = Mark_size ;
(*p_Inext ) = Inext ;
(*p_Inext_size ) = Inext_size ;
(*p_nduplicates) = ndupl ;
return (GrB_SUCCESS) ;
}
//--------------------------------------------------------------------------
// slice the work into coarse tasks
//--------------------------------------------------------------------------
GB_WERK_PUSH (Coarse, ntasks1 + 1, int64_t) ;
if (Coarse == NULL)
{
// out of memory
GB_FREE_ALL ;
return (GrB_OUT_OF_MEMORY) ;
}
GB_pslice (Coarse, Cwork, Cnvec, ntasks1, false) ;
//--------------------------------------------------------------------------
// construct all tasks, both coarse and fine
//--------------------------------------------------------------------------
int ntasks = 0 ;
for (int t = 0 ; t < ntasks1 ; t++)
{
//----------------------------------------------------------------------
// coarse task computes C (:,k:klast)
//----------------------------------------------------------------------
int64_t k = Coarse [t] ;
int64_t klast = Coarse [t+1] - 1 ;
if (k >= Cnvec)
{
//------------------------------------------------------------------
// all tasks have been constructed
//------------------------------------------------------------------
break ;
}
else if (k < klast)
{
//------------------------------------------------------------------
// coarse task has 2 or more vectors
//------------------------------------------------------------------
// This is a non-empty coarse-grain task that does two or more
// entire vectors of C, vectors k:klast, inclusive.
GB_REALLOC_TASK_WORK (TaskList, ntasks + 1, max_ntasks) ;
TaskList [ntasks].kfirst = k ;
TaskList [ntasks].klast = klast ;
ntasks++ ;
}
else
{
//------------------------------------------------------------------
// coarse task has 0 or 1 vectors
//------------------------------------------------------------------
// As a coarse-grain task, this task is empty or does a single
// vector, k. Vector k must be removed from the work done by this
// and any other coarse-grain task, and split into one or more
// fine-grain tasks.
for (int tt = t ; tt < ntasks1 ; tt++)
{
// remove k from the initial slice tt
if (Coarse [tt] == k)
{
// remove k from task tt
Coarse [tt] = k+1 ;
}
else
{
// break, k not in task tt
break ;
}
}
//------------------------------------------------------------------
// determine the # of fine-grain tasks to create for vector k
//------------------------------------------------------------------
double ckwork = Cwork [k+1] - Cwork [k] ;
int nfine = ckwork / target_task_size ;
nfine = GB_IMAX (nfine, 1) ;
// make the TaskList bigger, if needed
GB_REALLOC_TASK_WORK (TaskList, ntasks + nfine, max_ntasks) ;
//------------------------------------------------------------------
// create the fine-grain tasks
//------------------------------------------------------------------
if (nfine == 1)
{
//--------------------------------------------------------------
// this is a single coarse task for all of vector k
//--------------------------------------------------------------
TaskList [ntasks].kfirst = k ;
TaskList [ntasks].klast = k ;
ntasks++ ;
}
else
{
//--------------------------------------------------------------
// slice vector k into nfine fine tasks
//--------------------------------------------------------------
// There are two kinds of fine tasks, depending on the method
// used to compute C(:,kC) = A(I,kA). If the method iterates
// across all entries in A(imin:imax,kA), then those entries
// are sliced (of size alen). Three methods (1, 2, and 6)
// iterate across all entries in I instead (of size nI).
int64_t pA = Ap_start [k] ;
int64_t pA_end = Ap_end [k] ;
int64_t alen = pA_end - pA ; // nnz (A (imin:imax,j))
int method = GB_subref_method (NULL, NULL, alen, avlen,
Ikind, nI, I_inverse_ok, need_qsort, iinc, ndupl) ;
if (method == 10)
{
// multiple fine tasks operate on a single vector C(:,kC)
// using method 10, and so a post-sort is needed.
post_sort = true ;
}
if (method == 1 || method == 2 || method == 6)
{
// slice I for this task
nfine = GB_IMIN (nfine, nI) ;
nfine = GB_IMAX (nfine, 1) ;
for (int tfine = 0 ; tfine < nfine ; tfine++)
{
// flag this as a fine task, and record the method.
// Methods 1, 2, and 6 slice I, not A(:,kA)
TaskList [ntasks].kfirst = k ;
TaskList [ntasks].klast = -method ;
// do not partition A(:,kA)
TaskList [ntasks].pA = pA ;
TaskList [ntasks].pA_end = pA_end ;
// partition I for this task
GB_PARTITION (TaskList [ntasks].pB,
TaskList [ntasks].pB_end, nI, tfine, nfine) ;
// unused
TaskList [ntasks].pM = -1 ;
TaskList [ntasks].pM_end = -1 ;
// no post sort
TaskList [ntasks].len = 0 ;
ntasks++ ;
}
}
else
{
// slice A(:,kA) for this task
nfine = GB_IMIN (nfine, alen) ;
nfine = GB_IMAX (nfine, 1) ;
bool reverse = (method == 8 || method == 9) ;
for (int tfine = 0 ; tfine < nfine ; tfine++)
{
// flag this as a fine task, and record the method.
// These methods slice A(:,kA). Methods 8 and 9
// must do so in reverse order.
TaskList [ntasks].kfirst = k ;
TaskList [ntasks].klast = -method ;
// partition the items for this task
GB_PARTITION (TaskList [ntasks].pA,
TaskList [ntasks].pA_end, alen,
(reverse) ? (nfine-tfine-1) : tfine, nfine) ;
TaskList [ntasks].pA += pA ;
TaskList [ntasks].pA_end += pA ;
// do not partition I
TaskList [ntasks].pB = 0 ;
TaskList [ntasks].pB_end = nI ;
// unused
TaskList [ntasks].pM = -1 ;
TaskList [ntasks].pM_end = -1 ;
// flag the task that does the post sort
TaskList [ntasks].len = (tfine == 0 && method == 10) ;
ntasks++ ;
}
}
}
}
}
ASSERT (ntasks > 0) ;
//--------------------------------------------------------------------------
// free workspace and return result
//--------------------------------------------------------------------------
GB_FREE_WORKSPACE ;
(*p_TaskList ) = TaskList ;
(*p_TaskList_size) = TaskList_size ;
(*p_ntasks ) = ntasks ;
(*p_nthreads ) = nthreads ;
(*p_post_sort ) = post_sort ;
(*p_Mark ) = Mark ;
(*p_Mark_size ) = Mark_size ;
(*p_Inext ) = Inext ;
(*p_Inext_size ) = Inext_size ;
(*p_nduplicates) = ndupl ;
return (GrB_SUCCESS) ;
}
|
ejercicio_04.c |
/* Ejercicio 4
* Usando la API(OpenMP) hacer un programa que realice lo siguiente:
* - Crear 2 matrices de 50 columnas x 50 filas (50x50), inicializada con valores aleatorios. [✔]
* - Generar las Matrices Transpuertas(MT) de cada una. [✔]
* - Realizar la suma de ambas MT en una 3ra Matriz "R". [✔]
* - Obtener la suma de cada renglón de la Matriz R y guardarlo en un vector. [✔]
* - Devolver el máximo valor del vector anterior y el renglón. [✔]
*/
// Librerias
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
// Definiciones
#define CHUNKSIZE 10
#define N 10
#define NRA 5 // numero de filas en matriz A
#define NCA 5 // numero de columnas en matriz A
#define NRB 5 // numero de filas en matriz B
#define NCB 5 // numero de columnas en matriz B
#define NRR 5 // numero de filas en matriz R
#define NCR 5 // numero de columnas en matriz R
// Metodo para obtener numeros random
long Random(long li, long ls)
{ long n;
n=li+rand()%(ls-li+1);
return n;
}
// Ejecucion main
int main (int argc, char *argv[]) {
int r, o, mayor =0;
int u = 1;
int i, j, n;
float a[100], b[100];
double mA[NRA][NCA];
double mAT[NRA][NCA];
double mB[NRB][NCB];
double mBT[NRB][NCB];
double mR[NRR][NCR];
double h[10];
double sum;
// Establece el numero de subprocesos en las proximas regiones paralelas
omp_set_num_threads(2);
// Directiva con constructor PARALLEL FOR con clausula SCHEDULE
#pragma omp parallel for schedule(static,10)
for (i=0; i<NRA; i++)
{
for (j=0; j<NCA; j++)
{
// Primeras 2 matrices con numeros aleatorios
mA[i][j]= Random(1,20);
mB[i][j]= Random(1,20);
// Las transpuestas de las primeras 2 matrices
mAT[j][i] = mA[i][j];
mBT[j][i] = mB[i][j];
//La suma de las 2 marces transpuestas para formar la matriz R
mR[j][i]=mAT[j][i]+mBT[j][i];
h[j]=mR[i][j];
}
}
// Impresion de Matriz A
printf("******************************************************\n");
printf("Matriz A:\n");
for (i=0; i<NRA; i++)
{
for (j=0; j<NCA; j++)
{
printf("%6.2f ", mA[i][j]);
}
printf("\n");
}
// Impresion de la Matriz Transpuesta de A
printf("******************************************************\n");
printf("Matriz Transpuesta de A:\n");
for (j=0; j<NRA; j++)
{
for (i=0; i<NCA; i++)
{
printf("%6.2f ", mAT[j][i]);
}
printf("\n");
}
// Impresion de Matriz B
printf("******************************************************\n");
printf("Matriz B:\n");
for (i=0; i<NRB; i++)
{
for (j=0; j<NCB; j++)
{
printf("%6.2f ", mB[i][j]);
}
printf("\n");
}
// Impresion de la Matriz Transpuesta de B
printf("******************************************************\n");
printf("Matriz Transpuesta de B:\n");
for (j=0; j<NRB; j++)
{
for (i=0; i<NCB; i++)
{
printf("%6.2f ", mBT[j][i]);
}
printf("\n");
}
// Impresion de la Matriz R
printf("\n");
printf("******************************************************\n");
printf("Matriz R: 'Sumatoria de Matrices Transpuestas'\n");
for (j=0; j<NRR; j++)
{
for (i=0; i<NCR; i++)
{
printf("%6.2f ", mR[j][i]);
sum = sum + mAT[j][i]; // Sumatoria
}
printf("\n");
}
/*
printf("\n");
//Suma de fila 1
printf(" Fila: %d ",u);
for(j=0;j<NCB;j++){
//int mayor=h[0];
//j++;
if (h[j]>mayor){
mayor = h[j];
}
else{
if (h[j]>mayor){
mayor=mayor;
}
}
printf("\n El valor del elemento %d del vector es: [%6.2f]",j+1,h[j]);
sum = sum + h[j];
}
printf("\n");
printf("\n La suma es: %6.2f ",sum);
*/
}
|
GB_concat_sparse.c | //------------------------------------------------------------------------------
// GB_concat_sparse: concatenate an array of matrices into a sparse matrix
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
#define GB_FREE_WORKSPACE \
if (S != NULL) \
{ \
for (int64_t k = 0 ; k < m * n ; k++) \
{ \
GB_Matrix_free (&(S [k])) ; \
} \
} \
GB_FREE_WORK (&S, S_size) ; \
GB_FREE_WORK (&Work, Work_size) ; \
GB_WERK_POP (A_ek_slicing, int64_t) ;
#define GB_FREE_ALL \
{ \
GB_FREE_WORKSPACE ; \
GB_phbix_free (C) ; \
}
#include "GB_concat.h"
GrB_Info GB_concat_sparse // concatenate into a sparse matrix
(
GrB_Matrix C, // input/output matrix for results
const bool C_iso, // if true, construct C as iso
const GB_void *cscalar, // iso value of C, if C is io
const int64_t cnz, // # of entries in C
const GrB_Matrix *Tiles, // 2D row-major array of size m-by-n,
const GrB_Index m,
const GrB_Index n,
const int64_t *restrict Tile_rows, // size m+1
const int64_t *restrict Tile_cols, // size n+1
GB_Context Context
)
{
//--------------------------------------------------------------------------
// allocate C as a sparse matrix
//--------------------------------------------------------------------------
GrB_Info info ;
GrB_Matrix A = NULL ;
ASSERT_MATRIX_OK (C, "C input to concat sparse", GB0) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
int64_t *Work = NULL ;
size_t Work_size = 0 ;
GrB_Matrix *S = NULL ;
size_t S_size = 0 ;
GrB_Type ctype = C->type ;
int64_t cvlen = C->vlen ;
int64_t cvdim = C->vdim ;
bool csc = C->is_csc ;
size_t csize = ctype->size ;
GB_Type_code ccode = ctype->code ;
float hyper_switch = C->hyper_switch ;
float bitmap_switch = C->bitmap_switch ;
int sparsity_control = C->sparsity_control ;
bool static_header = C->static_header ;
GB_phbix_free (C) ;
// set C->iso = C_iso OK
GB_OK (GB_new_bix (&C, static_header, // prior static or dynamic header
ctype, cvlen, cvdim, GB_Ap_malloc, csc, GxB_SPARSE, false,
hyper_switch, cvdim, cnz, true, C_iso, Context)) ;
C->bitmap_switch = bitmap_switch ;
C->sparsity_control = sparsity_control ;
int64_t *restrict Cp = C->p ;
int64_t *restrict Ci = C->i ;
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
if (C_iso)
{
memcpy (C->x, cscalar, csize) ;
}
//--------------------------------------------------------------------------
// allocate workspace
//--------------------------------------------------------------------------
int64_t nouter = csc ? n : m ;
int64_t ninner = csc ? m : n ;
Work = GB_CALLOC_WORK (ninner * cvdim, int64_t, &Work_size) ;
S = GB_CALLOC_WORK (m * n, GrB_Matrix, &S_size) ;
if (S == NULL || Work == NULL)
{
// out of memory
GB_FREE_ALL ;
return (GrB_OUT_OF_MEMORY) ;
}
//--------------------------------------------------------------------------
// count entries in each vector of each tile
//--------------------------------------------------------------------------
for (int64_t outer = 0 ; outer < nouter ; outer++)
{
for (int64_t inner = 0 ; inner < ninner ; inner++)
{
//------------------------------------------------------------------
// get the tile A; transpose and typecast, if needed
//------------------------------------------------------------------
A = csc ? GB_TILE (Tiles, inner, outer)
: GB_TILE (Tiles, outer, inner) ;
GrB_Matrix T = NULL ;
ASSERT_MATRIX_OK (A, "A tile for concat sparse", GB0) ;
if (csc != A->is_csc)
{
// T = (ctype) A', not in-place, using a dynamic header
GB_OK (GB_new (&T, false, // auto sparsity, new header
A->type, A->vdim, A->vlen, GB_Ap_null, csc,
GxB_AUTO_SPARSITY, -1, 1, Context)) ;
// save T in array S
if (csc)
{
GB_TILE (S, inner, outer) = T ;
}
else
{
GB_TILE (S, outer, inner) = T ;
}
GB_OK (GB_transpose_cast (T, ctype, csc, A, false, Context)) ;
A = T ;
GB_MATRIX_WAIT (A) ;
ASSERT_MATRIX_OK (A, "T=A' for concat sparse", GB0) ;
}
ASSERT (C->is_csc == A->is_csc) ;
ASSERT (!GB_ANY_PENDING_WORK (A)) ;
//------------------------------------------------------------------
// ensure the tile is not bitmap
//------------------------------------------------------------------
if (GB_IS_BITMAP (A))
{
if (T == NULL)
{
// copy A into T
// set T->iso = A->iso OK: no burble needed
GB_OK (GB_dup_worker (&T, A->iso, A, true, NULL, Context)) ;
// save T in array S
if (csc)
{
GB_TILE (S, inner, outer) = T ;
}
else
{
GB_TILE (S, outer, inner) = T ;
}
ASSERT_MATRIX_OK (T, "T=dup(A) for concat sparse", GB0) ;
}
// convert T from bitmap to sparse
GB_OK (GB_convert_bitmap_to_sparse (T, Context)) ;
ASSERT_MATRIX_OK (T, "T bitmap to sparse, concat sparse", GB0) ;
A = T ;
}
ASSERT (!GB_IS_BITMAP (A)) ;
//------------------------------------------------------------------
// log the # of entries in each vector of the tile A
//------------------------------------------------------------------
const int64_t anvec = A->nvec ;
const int64_t avlen = A->vlen ;
int64_t cvstart = csc ? Tile_cols [outer] : Tile_rows [outer] ;
int64_t *restrict W = Work + inner * cvdim + cvstart ;
int nth = GB_nthreads (anvec, chunk, nthreads_max) ;
if (GB_IS_FULL (A))
{
// A is full
int64_t j ;
#pragma omp parallel for num_threads(nth) schedule(static)
for (j = 0 ; j < anvec ; j++)
{
// W [j] = # of entries in A(:,j), which is just avlen
W [j] = avlen ;
}
}
else
{
// A is sparse or hyper
int64_t k ;
int64_t *restrict Ah = A->h ;
int64_t *restrict Ap = A->p ;
#pragma omp parallel for num_threads(nth) schedule(static)
for (k = 0 ; k < anvec ; k++)
{
// W [j] = # of entries in A(:,j), the kth column of A
int64_t j = GBH (Ah, k) ;
W [j] = Ap [k+1] - Ap [k] ;
}
}
}
}
//--------------------------------------------------------------------------
// cumulative sum of entries in each tile
//--------------------------------------------------------------------------
int nth = GB_nthreads (ninner*cvdim, chunk, nthreads_max) ;
int64_t k ;
#pragma omp parallel for num_threads(nth) schedule(static)
for (k = 0 ; k < cvdim ; k++)
{
int64_t s = 0 ;
for (int64_t inner = 0 ; inner < ninner ; inner++)
{
int64_t p = inner * cvdim + k ;
int64_t c = Work [p] ;
Work [p] = s ;
s += c ;
}
// total number of entries in C(:,k)
Cp [k] = s ;
}
GB_cumsum (Cp, cvdim, &(C->nvec_nonempty), nthreads_max, Context) ;
#pragma omp parallel for num_threads(nth) schedule(static)
for (k = 0 ; k < cvdim ; k++)
{
int64_t pC = Cp [k] ;
for (int64_t inner = 0 ; inner < ninner ; inner++)
{
int64_t p = inner * cvdim + k ;
Work [p] += pC ;
}
}
//--------------------------------------------------------------------------
// concatenate all matrices into C
//--------------------------------------------------------------------------
for (int64_t outer = 0 ; outer < nouter ; outer++)
{
for (int64_t inner = 0 ; inner < ninner ; inner++)
{
//------------------------------------------------------------------
// get the tile A, either the temporary matrix T or the original A
//------------------------------------------------------------------
A = csc ? GB_TILE (S, inner, outer)
: GB_TILE (S, outer, inner) ;
if (A == NULL)
{
A = csc ? GB_TILE (Tiles, inner, outer)
: GB_TILE (Tiles, outer, inner) ;
}
ASSERT_MATRIX_OK (A, "A tile again, concat sparse", GB0) ;
ASSERT (!GB_IS_BITMAP (A)) ;
ASSERT (C->is_csc == A->is_csc) ;
ASSERT (!GB_ANY_PENDING_WORK (A)) ;
GB_Type_code acode = A->type->code ;
//------------------------------------------------------------------
// determine where to place the tile in C
//------------------------------------------------------------------
// The tile A appears in vectors cvstart:cvend-1 of C, and indices
// cistart:ciend-1.
int64_t cvstart, cvend, cistart, ciend ;
if (csc)
{
// C and A are held by column
// Tiles is row-major and accessed in column order
cvstart = Tile_cols [outer] ;
cvend = Tile_cols [outer+1] ;
cistart = Tile_rows [inner] ;
ciend = Tile_rows [inner+1] ;
}
else
{
// C and A are held by row
// Tiles is row-major and accessed in row order
cvstart = Tile_rows [outer] ;
cvend = Tile_rows [outer+1] ;
cistart = Tile_cols [inner] ;
ciend = Tile_cols [inner+1] ;
}
// get the workspace pointer array W for this tile
int64_t *restrict W = Work + inner * cvdim + cvstart ;
//------------------------------------------------------------------
// slice the tile
//------------------------------------------------------------------
int64_t avdim = cvend - cvstart ;
int64_t avlen = ciend - cistart ;
ASSERT (avdim == A->vdim) ;
ASSERT (avlen == A->vlen) ;
int A_nthreads, A_ntasks ;
const int64_t *restrict Ap = A->p ;
const int64_t *restrict Ah = A->h ;
const int64_t *restrict Ai = A->i ;
const bool A_iso = A->iso ;
GB_SLICE_MATRIX (A, 1, chunk) ;
//------------------------------------------------------------------
// copy the tile A into C
//------------------------------------------------------------------
bool done = false ;
if (C_iso)
{
//--------------------------------------------------------------
// C and A are iso
//--------------------------------------------------------------
#define GB_ISO_CONCAT
#define GB_COPY(pC,pA,A_iso) ;
#include "GB_concat_sparse_template.c"
}
else
{
#ifndef GBCOMPACT
if (ccode == acode)
{
// no typecasting needed
switch (csize)
{
#undef GB_COPY
#define GB_COPY(pC,pA,A_iso) \
Cx [pC] = GBX (Ax, pA, A_iso) ;
case GB_1BYTE : // uint8, int8, bool, or 1-byte user
#define GB_CTYPE uint8_t
#include "GB_concat_sparse_template.c"
break ;
case GB_2BYTE : // uint16, int16, or 2-byte user
#define GB_CTYPE uint16_t
#include "GB_concat_sparse_template.c"
break ;
case GB_4BYTE : // uint32, int32, float, or 4-byte user
#define GB_CTYPE uint32_t
#include "GB_concat_sparse_template.c"
break ;
case GB_8BYTE : // uint64, int64, double, float complex,
// or 8-byte user defined
#define GB_CTYPE uint64_t
#include "GB_concat_sparse_template.c"
break ;
case GB_16BYTE : // double complex or 16-byte user
#define GB_CTYPE GB_blob16
#include "GB_concat_sparse_template.c"
break ;
default:;
}
}
#endif
}
if (!done)
{
// with typecasting or user-defined types
GB_cast_function cast_A_to_C = GB_cast_factory (ccode, acode) ;
size_t asize = A->type->size ;
#define GB_CTYPE GB_void
#undef GB_COPY
#define GB_COPY(pC,pA,A_iso) \
cast_A_to_C (Cx + (pC)*csize, \
Ax + (A_iso ? 0:(pA)*asize), asize) ;
#include "GB_concat_sparse_template.c"
}
GB_WERK_POP (A_ek_slicing, int64_t) ;
}
}
//--------------------------------------------------------------------------
// free workspace and return result
//--------------------------------------------------------------------------
GB_FREE_WORKSPACE ;
C->magic = GB_MAGIC ;
ASSERT_MATRIX_OK (C, "C from concat sparse", GB0) ;
return (GrB_SUCCESS) ;
}
|
simd-2.c | /* { dg-do run } */
/* { dg-additional-options "-msse2" { target sse2_runtime } } */
/* { dg-additional-options "-mavx" { target avx_runtime } } */
extern void abort ();
__UINTPTR_TYPE__ arr[1027];
__attribute__((noinline, noclone)) void
foo ()
{
int i, v;
#pragma omp simd private (v) safelen(16)
for (i = 0; i < 1027; i++)
arr[i] = (__UINTPTR_TYPE__) &v;
}
int
main ()
{
int i, j, cnt = 0;
__UINTPTR_TYPE__ arr2[16];
foo ();
for (i = 0; i < 1027; i++)
{
for (j = 0; j < cnt; j++)
if (arr[i] == arr2[j])
break;
if (j != cnt)
continue;
if (cnt == 16)
abort ();
arr2[cnt++] = arr[i];
}
return 0;
}
|
pooling_2x2.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#if __ARM_NEON
#include <arm_neon.h>
#endif // __ARM_NEON
static void pooling2x2s2_max_neon(const Mat& bottom_blob, Mat& top_blob, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
const int tailstep = w - 2*outw + w;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q=0; q<inch; q++)
{
const float* img0 = bottom_blob.channel(q);
float* outptr = top_blob.channel(q);
const float* r0 = img0;
const float* r1 = img0 + w;
for (int i = 0; i < outh; i++)
{
#if __ARM_NEON
int nn = outw >> 2;
int remain = outw - (nn << 2);
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
if (nn > 0)
{
asm volatile(
"0: \n"
"prfm pldl1keep, [%1, #256] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v0.4s, v1.4s}, [%1], #32 \n"
"ld1 {v2.4s, v3.4s}, [%2], #32 \n"
"fmax v0.4s, v0.4s, v2.4s \n"
"fmax v1.4s, v1.4s, v3.4s \n"
"fmaxp v2.4s, v0.4s, v1.4s \n"
"subs %w0, %w0, #1 \n"
"st1 {v2.4s}, [%3], #16 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(outptr) // %3
: "0"(nn),
"1"(r0),
"2"(r1),
"3"(outptr)
: "cc", "memory", "v0", "v1", "v2", "v3"
);
}
#else
if (nn > 0)
{
asm volatile(
"0: \n"
"pld [%1, #256] \n"
"pld [%2, #256] \n"
"vld1.f32 {d0-d3}, [%1]! \n"
"vld1.f32 {d4-d7}, [%2]! \n"
"vmax.f32 q0, q0, q2 \n"
"vmax.f32 q1, q1, q3 \n"
"vpmax.f32 d4, d0, d1 \n"
"vpmax.f32 d5, d2, d3 \n"
"subs %0, #1 \n"
"vst1.f32 {d4-d5}, [%3]! \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(outptr) // %3
: "0"(nn),
"1"(r0),
"2"(r1),
"3"(outptr)
: "cc", "memory", "q0", "q1", "q2", "q3"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
float max0 = std::max(r0[0], r0[1]);
float max1 = std::max(r1[0], r1[1]);
*outptr = std::max(max0, max1);
r0 += 2;
r1 += 2;
outptr++;
}
r0 += tailstep;
r1 += tailstep;
}
}
}
|
BlockOps.h |
/*****************************************************************************
*
* Copyright (c) 2003-2020 by The University of Queensland
* http://www.uq.edu.au
*
* Primary Business: Queensland, Australia
* Licensed under the Apache License, version 2.0
* http://www.apache.org/licenses/LICENSE-2.0
*
* Development until 2012 by Earth Systems Science Computational Center (ESSCC)
* Development 2012-2013 by School of Earth Sciences
* Development from 2014-2017 by Centre for Geoscience Computing (GeoComp)
* Development from 2019 by School of Earth and Environmental Sciences
**
*****************************************************************************/
#ifndef __PASO_BLOCKOPS_H__
#define __PASO_BLOCKOPS_H__
#include "Paso.h"
#include "PasoException.h"
#include <cstring> // memcpy
#ifdef ESYS_HAVE_LAPACK
#ifdef ESYS_MKL_LAPACK
#include <mkl_lapack.h>
#include <mkl_cblas.h>
#else
extern "C" {
#include <clapack.h>
#include <cblas.h>
}
#endif
#endif
namespace paso {
inline void BlockOps_Cpy_N(dim_t N, double* R, const double* V)
{
memcpy((void*)R, (void*)V, N*sizeof(double));
}
/// performs operation R=R-mat*V (V and R are not overlapping) - 2x2
inline void BlockOps_SMV_2(double* R, const double* mat, const double* V)
{
const double S1 = V[0];
const double S2 = V[1];
const double A11 = mat[0];
const double A12 = mat[2];
const double A21 = mat[1];
const double A22 = mat[3];
R[0] -= A11 * S1 + A12 * S2;
R[1] -= A21 * S1 + A22 * S2;
}
/// performs operation R=R-mat*V (V and R are not overlapping) - 3x3
inline void BlockOps_SMV_3(double* R, const double* mat, const double* V)
{
const double S1 = V[0];
const double S2 = V[1];
const double S3 = V[2];
const double A11 = mat[0];
const double A21 = mat[1];
const double A31 = mat[2];
const double A12 = mat[3];
const double A22 = mat[4];
const double A32 = mat[5];
const double A13 = mat[6];
const double A23 = mat[7];
const double A33 = mat[8];
R[0] -= A11 * S1 + A12 * S2 + A13 * S3;
R[1] -= A21 * S1 + A22 * S2 + A23 * S3;
R[2] -= A31 * S1 + A32 * S2 + A33 * S3;
}
#define PASO_MISSING_CLAPACK throw PasoException("You need to install a LAPACK version to enable operations on block sizes > 3.")
/// performs operation R=R-mat*V (V and R are not overlapping) - NxN
inline void BlockOps_SMV_N(dim_t N, double* R, const double* mat, const double* V)
{
#ifdef ESYS_HAVE_LAPACK
cblas_dgemv(CblasColMajor,CblasNoTrans, N, N, -1., mat, N, V, 1, 1., R, 1);
#else
PASO_MISSING_CLAPACK;
#endif
}
inline void BlockOps_MV_N(dim_t N, double* R, const double* mat, const double* V)
{
#ifdef ESYS_HAVE_LAPACK
cblas_dgemv(CblasColMajor,CblasNoTrans, N, N, 1., mat, N, V, 1, 0., R, 1);
#else
PASO_MISSING_CLAPACK;
#endif
}
inline void BlockOps_invM_2(double* invA, const double* A, int* failed)
{
const double A11 = A[0];
const double A12 = A[2];
const double A21 = A[1];
const double A22 = A[3];
double D = A11*A22-A12*A21;
if (std::abs(D) > 0) {
D = 1./D;
invA[0] = A22*D;
invA[1] = -A21*D;
invA[2] = -A12*D;
invA[3] = A11*D;
} else {
*failed = 1;
}
}
inline void BlockOps_invM_3(double* invA, const double* A, int* failed)
{
const double A11 = A[0];
const double A21 = A[1];
const double A31 = A[2];
const double A12 = A[3];
const double A22 = A[4];
const double A32 = A[5];
const double A13 = A[6];
const double A23 = A[7];
const double A33 = A[8];
double D = A11*(A22*A33-A23*A32) +
A12*(A31*A23-A21*A33) +
A13*(A21*A32-A31*A22);
if (std::abs(D) > 0) {
D = 1./D;
invA[0] = (A22*A33-A23*A32)*D;
invA[1] = (A31*A23-A21*A33)*D;
invA[2] = (A21*A32-A31*A22)*D;
invA[3] = (A13*A32-A12*A33)*D;
invA[4] = (A11*A33-A31*A13)*D;
invA[5] = (A12*A31-A11*A32)*D;
invA[6] = (A12*A23-A13*A22)*D;
invA[7] = (A13*A21-A11*A23)*D;
invA[8] = (A11*A22-A12*A21)*D;
} else {
*failed = 1;
}
}
/// LU factorization of NxN matrix mat with partial pivoting
inline void BlockOps_invM_N(dim_t N, double* mat, index_t* pivot, int* failed)
{
#ifdef ESYS_HAVE_LAPACK
#ifdef ESYS_MKL_LAPACK
int res = 0;
dgetrf(&N, &N, mat, &N, pivot, &res);
if (res != 0)
*failed = 1;
#else
int res = clapack_dgetrf(CblasColMajor, N, N, mat, N, pivot);
if (res != 0)
*failed = 1;
#endif // ESYS_MKL_LAPACK
#else
PASO_MISSING_CLAPACK;
#endif
}
/// solves system of linear equations A*X=B
inline void BlockOps_solve_N(dim_t N, double* X, double* mat, index_t* pivot, int* failed)
{
#ifdef ESYS_HAVE_LAPACK
#ifdef ESYS_MKL_LAPACK
int res = 0;
int ONE = 1;
dgetrs("N", &N, &ONE, mat, &N, pivot, X, &N, &res);
if (res != 0)
*failed = 1;
#else
int res = clapack_dgetrs(CblasColMajor, CblasNoTrans, N, 1, mat, N, pivot, X, N);
if (res != 0)
*failed = 1;
#endif // ESYS_MKL_LAPACK
#else
PASO_MISSING_CLAPACK;
#endif
}
/// inplace matrix vector product - order 2
inline void BlockOps_MViP_2(const double* mat, double* V)
{
const double S1 = V[0];
const double S2 = V[1];
const double A11 = mat[0];
const double A12 = mat[2];
const double A21 = mat[1];
const double A22 = mat[3];
V[0] = A11 * S1 + A12 * S2;
V[1] = A21 * S1 + A22 * S2;
}
/// inplace matrix vector product - order 3
inline void BlockOps_MViP_3(const double* mat, double* V)
{
const double S1 = V[0];
const double S2 = V[1];
const double S3 = V[2];
const double A11 = mat[0];
const double A21 = mat[1];
const double A31 = mat[2];
const double A12 = mat[3];
const double A22 = mat[4];
const double A32 = mat[5];
const double A13 = mat[6];
const double A23 = mat[7];
const double A33 = mat[8];
V[0] = A11 * S1 + A12 * S2 + A13 * S3;
V[1] = A21 * S1 + A22 * S2 + A23 * S3;
V[2] = A31 * S1 + A32 * S2 + A33 * S3;
}
inline void BlockOps_solveAll(dim_t n_block, dim_t n, double* D,
index_t* pivot, double* x)
{
if (n_block == 1) {
#pragma omp parallel for
for (dim_t i=0; i<n; ++i)
x[i] *= D[i];
} else if (n_block == 2) {
#pragma omp parallel for
for (dim_t i=0; i<n; ++i)
BlockOps_MViP_2(&D[4*i], &x[2*i]);
} else if (n_block == 3) {
#pragma omp parallel for
for (dim_t i=0; i<n; ++i)
BlockOps_MViP_3(&D[9*i], &x[3*i]);
} else {
int failed = 0;
#pragma omp parallel for
for (dim_t i=0; i<n; ++i) {
const dim_t block_size = n_block*n_block;
BlockOps_solve_N(n_block, &x[n_block*i], &D[block_size*i], &pivot[n_block*i], &failed);
}
if (failed > 0) {
throw PasoException("BlockOps_solveAll: solution failed.");
}
}
}
} // namespace paso
#endif // __PASO_BLOCKOPS_H__
|
indexed.c | #include <unistd.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include "constants.h"
/* Lower bound on timings by a single memcpy
* gives ~1.19 seconds for 10 iterations, 1757 MB /0.11s ~= 1600 MB/s
[root@laptopjisk ~]# dmidecode -t 17
# dmidecode 3.1
Getting SMBIOS data from sysfs.
SMBIOS 3.0.0 present.
Handle 0x0039, DMI type 17, 40 bytes
Memory Device
Array Handle: 0x0038
Error Information Handle: Not Provided
Total Width: 64 bits
Data Width: 64 bits
Size: 8192 MB
Form Factor: Row Of Chips
Set: None
Locator: System Board Memory
Bank Locator: BANK 0
Type: LPDDR3
Type Detail: Synchronous Unbuffered (Unregistered)
Speed: 1867 MT/s
Manufacturer: Micron
Serial Number: 00000000
Asset Tag: 9876543210
Part Number: MT52L1G32D4PG-107
Rank: 2
Configured Clock Speed: 1867 MT/s
Minimum Voltage: 1.25 V
Maximum Voltage: 1.25 V
Configured Voltage: 1.2 V
*/
/**
* Deinterleave (transpose) an IQUV ring buffer page to the ordering needed for FITS files
* Note that this is probably a slow function, and is not meant to be run real-time
*
* data in: tab, channel/4, time/500 packets of time,channel,pn
* data out: tab, channel, pol, time
*
* Suggested use is:
* 1. realtime: ringbuffer -> [trigger] -> dada_dbdisk
* 2. offline: dada_dbdisk -> ringbuffer -> dadafits
*
* @param {const unsigned char *} page Ringbuffer page with interleaved data
* @param {const unsigned char *} transposed Buffer with deinterleaved data
* @param {int} ntabs Number of tabs
* @param {int} nchannels Number of channels
* @param {int} npackets Number of packets per sequence
*/
void deinterleave (const unsigned char *page, unsigned char * restrict transposed, const int ntabs, const int nchannels, const int npackets) {
int packets_processed = 0;
int tab = 0;
for (tab = 0; tab < ntabs; tab++) {
int channel_offset = 0;
for (channel_offset = 0; channel_offset < nchannels; channel_offset+=4) {
int sequence_number = 0;
for (sequence_number = 0; sequence_number < npackets; sequence_number++) {
// find start of packet, and increase counter
const unsigned char * restrict packet = &page[packets_processed * NPOLS*NCHANS*NSAMPS];
packets_processed++;
// process packet
int tn,cn,pn;
#pragma omp parallel for
for (tn = 0; tn < NSAMPS; tn++) { // 500 samples per packet
for (cn = 0; cn < NCHANS; cn++) { // 4 channels per packet
for (pn = 0; pn < NPOLS; pn++) { // 4 poliarizations per packet
transposed[
((tab * nchannels + cn + channel_offset) * NPOLS + pn) * npackets * NSAMPS +
tn + sequence_number * NSAMPS
] = packet[tn * NCHANS * NPOLS + cn * NPOLS + pn];
}
}
}
}
}
}
}
|
batcher.h | #pragma once
#include <dhb/graph.h>
#include <algorithm>
#include <assert.h>
#include <omp.h>
#include <stdexcept>
#include <thread>
#include <tuple>
namespace dhb {
template <typename EdgeIt>
std::tuple<EdgeIt, EdgeIt> thread_batch(EdgeIt batch_begin, EdgeIt batch_end,
unsigned int thread_count, unsigned int thread_id) {
size_t const batch_size = std::distance(batch_begin, batch_end);
size_t const elements = batch_size / thread_count;
if (elements == 0 || elements == batch_size) {
bool const first_thread = thread_id == 0;
if (first_thread) {
return {batch_begin, batch_end};
} else {
return {batch_end, batch_end};
}
}
size_t const position = thread_id * elements;
EdgeIt start = std::min(batch_begin + position, batch_end);
bool const last_thread = thread_id == (thread_count - 1);
EdgeIt end = last_thread ? batch_end : std::min(start + elements, batch_end);
if (start != end) {
Vertex const predecessor =
(start == batch_begin) ? invalidVertex() : std::prev(start, 1)->source;
while (start != end && predecessor == start->source) {
std::advance(start, 1);
}
if (start != end) {
for (Vertex successor = (end == batch_end) ? invalidVertex() : end->source;
end != batch_end && successor == (end - 1)->source && end->source != predecessor;
successor = end->source) {
std::advance(end, 1);
}
}
}
return {start, end};
}
template <typename T> class BatchParallelizer {
public:
template <typename Iterator, typename K, typename F>
void operator()(Iterator begin, Iterator end, K key, F func) {
int const t_count = omp_get_max_threads();
size_t const n = end - begin;
if (t_count == 1 || n < t_count) {
for (auto it = begin; it != end; ++it)
func(*it);
return;
}
#if defined(DHB_SCATTER_SORTING)
auto cmp = [](Edge u, Edge v) { return u.source < v.source; };
std::sort(begin, end, cmp);
#pragma omp parallel shared(begin, end)
{
std::tuple<Iterator, Iterator> local_batch =
thread_batch(begin, end, t_count, omp_get_thread_num());
for (auto it = std::get<0>(local_batch); it != std::get<1>(local_batch); ++it) {
func(*it);
}
}
#elif defined(DHB_SCATTER_DARTS)
constexpr auto empty_cell = static_cast<unsigned int>(-1);
// Number of slots per thread.
// Slightly larger than the batch size per thread.
auto s = 1 << (integer_log2_ceil((n + t_count - 1) / t_count) + 2);
m_batch_slots.resize(s * t_count);
std::fill(m_batch_slots.begin(), m_batch_slots.end(), empty_cell);
m_batch_dispatched.resize(n);
std::fill(m_batch_dispatched.begin(), m_batch_dispatched.end(), 0);
auto slots = m_batch_slots.data();
auto dispatched = m_batch_dispatched.data();
std::atomic<size_t> n_done{0};
#pragma omp parallel num_threads(t_count)
{
auto t = omp_get_thread_num();
assert(omp_get_num_threads() == t_count);
auto n_per_thread = n / t_count;
std::minstd_rand prng{m_prng_seed + t};
std::uniform_int_distribution<int> distrib{0, s - 1};
int r;
for (r = 1;; ++r) {
auto i_begin = t * n_per_thread;
auto i_end = i_begin + n_per_thread;
if (t == t_count - 1)
i_end = n;
for (size_t i = i_begin; i < i_end; ++i) {
if (__atomic_load_n(&dispatched[i], __ATOMIC_RELAXED))
continue;
auto k = key(*(begin + i));
auto d = hash_node(k) % t_count;
auto j = distrib(prng);
__atomic_store_n(&slots[d * s + j], i, __ATOMIC_RELAXED);
}
size_t n_now = 0;
for (size_t j = t * s; j < (t + 1) * s; ++j) {
auto i = __atomic_load_n(&slots[j], __ATOMIC_RELAXED);
if (i == empty_cell)
continue;
if (dispatched[i])
continue;
func(*(begin + i));
__atomic_store_n(&dispatched[i], 1, __ATOMIC_RELAXED);
__atomic_store_n(&slots[j], empty_cell, __ATOMIC_RELAXED);
++n_now;
}
if (n_now)
n_done.fetch_add(n_now, std::memory_order_relaxed);
if (n_done.load(std::memory_order_relaxed) == n)
break;
}
}
m_prng_seed += t_count;
#elif defined(DHB_SCATTER_TWOPHASE)
auto key_to_thread = [](unsigned int k, unsigned int t_count) -> unsigned int {
auto hash = [](unsigned int x) -> unsigned int {
x = ((x >> 16) ^ x) * 0x45d9f3b;
x = ((x >> 16) ^ x) * 0x45d9f3b;
x = (x >> 16) ^ x;
return x;
};
// First, hash the key to get a value that is scattered evenly in [0, 2^32).
// For such values, the multiplication + shift yields an almost fair map,
// see https://lemire.me/blog/2016/06/27/a-fast-alternative-to-the-modulo-reduction/.
return (static_cast<uint64_t>(hash(k)) * static_cast<uint64_t>(t_count)) >> 32;
};
m_batch_counts.resize((t_count + 1) * t_count);
m_batch_slots.resize(n);
#pragma omp parallel num_threads(t_count)
{
auto t = omp_get_thread_num();
assert(omp_get_num_threads() == t_count);
auto counts_of_thread = [&](int ct) -> unsigned int* {
return &m_batch_counts[ct * (t_count + 1)];
};
auto n_per_thread = n / t_count;
auto i_begin = t * n_per_thread;
auto i_end = i_begin + n_per_thread;
if (t == t_count - 1)
i_end = n;
// First, perform a local counting sort to sort updates according to associated threads.
auto t_counts = counts_of_thread(t);
for (int at = 0; at < t_count; ++at)
t_counts[at] = 0;
for (size_t i = i_begin; i < i_end; ++i) {
auto k = key(*(begin + i));
auto at = key_to_thread(k, t_count);
++t_counts[at];
}
unsigned int psum = 0;
for (int at = 0; at < t_count; ++at) {
psum += t_counts[at];
t_counts[at] = i_begin + psum;
}
assert(i_begin + psum == i_end);
t_counts[t_count] = i_end;
for (size_t irev = i_end; irev > i_begin; --irev) {
// Iterating in reverse ensures that the sort is stable;
// this yields a better memory access pattern when performing random access later.
auto i = irev - 1;
auto k = key(*(begin + i));
auto at = key_to_thread(k, t_count);
m_batch_slots[--t_counts[at]] = i;
}
// Now, let each thread collect its updates.
#pragma omp barrier
unsigned int local_count = 0;
for (int ot = 0; ot < t_count; ++ot) {
auto ot_counts = counts_of_thread(ot);
auto j_begin = ot_counts[t];
auto j_end = ot_counts[t + 1];
for (size_t j = j_begin; j < j_end; ++j) {
auto i = m_batch_slots[j];
auto edge = *(begin + i);
func(*(begin + i));
}
local_count += j_end - j_begin;
}
}
#else
throw std::runtime_error("DHB was compiled without support for parallel updates");
#endif
}
template <typename Iterator, typename K> void distribute(Iterator begin, Iterator end, K key) {
int const t_count = omp_get_num_threads();
size_t const n = end - begin;
if (t_count == 1 || n < t_count)
return;
#if defined(DHB_SCATTER_TWOPHASE)
auto key_to_thread = [](unsigned int k, unsigned int t_count) -> unsigned int {
auto hash = [](unsigned int x) -> unsigned int {
x = ((x >> 16) ^ x) * 0x45d9f3b;
x = ((x >> 16) ^ x) * 0x45d9f3b;
x = (x >> 16) ^ x;
return x;
};
// First, hash the key to get a value that is scattered evenly in [0, 2^32).
// For such values, the multiplication + shift yields an almost fair map,
// see https://lemire.me/blog/2016/06/27/a-fast-alternative-to-the-modulo-reduction/.
return (static_cast<uint64_t>(hash(k)) * static_cast<uint64_t>(t_count)) >> 32;
};
#pragma omp single
{
m_batch_counts.resize((t_count + 1) * t_count);
m_batch_slots.resize(n);
}
auto t = omp_get_thread_num();
auto counts_of_thread = [&](int ct) -> unsigned int* {
return &m_batch_counts[ct * (t_count + 1)];
};
auto n_per_thread = n / t_count;
auto i_begin = t * n_per_thread;
auto i_end = i_begin + n_per_thread;
if (t == t_count - 1)
i_end = n;
// First, perform a local counting sort to sort updates according to associated threads.
auto t_counts = counts_of_thread(t);
for (int at = 0; at < t_count; ++at)
t_counts[at] = 0;
for (size_t i = i_begin; i < i_end; ++i) {
auto k = key(*(begin + i));
auto at = key_to_thread(k, t_count);
++t_counts[at];
}
unsigned int psum = 0;
for (int at = 0; at < t_count; ++at) {
psum += t_counts[at];
t_counts[at] = i_begin + psum;
}
assert(i_begin + psum == i_end);
t_counts[t_count] = i_end;
for (size_t irev = i_end; irev > i_begin; --irev) {
// Iterating in reverse ensures that the sort is stable;
// this yields a better memory access pattern when performing random access later.
auto i = irev - 1;
auto k = key(*(begin + i));
auto at = key_to_thread(k, t_count);
m_batch_slots[--t_counts[at]] = i;
}
// Now, let each thread collect its updates.
#pragma omp barrier
#elif defined(DHB_SCATTER_COUNTING)
auto key_to_thread = [](unsigned int k, unsigned int t_count) -> unsigned int {
auto hash = [](unsigned int x) -> unsigned int {
x = ((x >> 16) ^ x) * 0x45d9f3b;
x = ((x >> 16) ^ x) * 0x45d9f3b;
x = (x >> 16) ^ x;
return x;
};
// First, hash the key to get a value that is scattered evenly in [0, 2^32).
// For such values, the multiplication + shift yields an almost fair map,
// see https://lemire.me/blog/2016/06/27/a-fast-alternative-to-the-modulo-reduction/.
return (static_cast<uint64_t>(hash(k)) * static_cast<uint64_t>(t_count)) >> 32;
};
#pragma omp single
{
m_batch_counts.resize((t_count + 1) * t_count);
m_out.resize(t_count);
m_wp.resize(t_count);
}
auto t = omp_get_thread_num();
auto counts_of_thread = [&](int ct) -> unsigned int* {
return &m_batch_counts[ct * (t_count + 1)];
};
auto n_per_thread = n / t_count;
ptrdiff_t i_begin = t * n_per_thread;
ptrdiff_t i_end = i_begin + n_per_thread;
if (t == t_count - 1)
i_end = n;
// First, determine send counts.
auto t_counts = counts_of_thread(t);
for (int at = 0; at < t_count; ++at)
t_counts[at] = 0;
for (ptrdiff_t i = i_begin; i < i_end; ++i) {
auto it = begin + i;
auto k = key(*it);
auto at = key_to_thread(k, t_count);
++t_counts[at];
}
#pragma omp barrier
// Do a prefix sum over the send count *to* the current thread.
unsigned int psum = 0;
for (int rt = 0; rt < t_count; ++rt) {
auto rt_counts = counts_of_thread(rt);
auto c = rt_counts[t];
rt_counts[t] = psum;
psum += c;
}
m_out[t].resize(psum);
#pragma omp barrier
// Move the entries around.
m_wp[t].resize(t_count);
for (int at = 0; at < t_count; ++at)
m_wp[t][at] = m_out[at].data() + t_counts[at];
auto wp_ptr = m_wp[t].data();
for (ptrdiff_t i = i_begin; i < i_end; ++i) {
auto it = begin + i;
auto k = key(*it);
auto at = key_to_thread(k, t_count);
*(wp_ptr[at]++) = *it;
}
#pragma omp barrier
#else
throw std::runtime_error("DHB was compiled without support for parallel updates");
#endif
}
template <typename Iterator, typename F> void map(Iterator begin, Iterator end, F func) {
int const t_count = omp_get_num_threads();
size_t const n = end - begin;
if (t_count == 1 || n < t_count) {
#pragma omp master
{
for (auto it = begin; it != end; ++it) {
T elem = *it;
func(elem);
}
}
return;
}
#if defined(DHB_SCATTER_TWOPHASE)
auto t = omp_get_thread_num();
auto counts_of_thread = [&](int ct) -> unsigned int* {
return &m_batch_counts[ct * (t_count + 1)];
};
unsigned int local_count = 0;
for (int ot = 0; ot < t_count; ++ot) {
auto ot_counts = counts_of_thread(ot);
auto j_begin = ot_counts[t];
auto j_end = ot_counts[t + 1];
for (size_t j = j_begin; j < j_end; ++j) {
auto i = m_batch_slots[j];
auto edge = *(begin + i);
func(*(begin + i));
}
local_count += j_end - j_begin;
}
#elif defined(DHB_SCATTER_COUNTING)
auto t = omp_get_thread_num();
for (auto& elem : m_out[t])
func(elem);
#else
throw std::runtime_error("DHB was compiled without support for parallel updates");
#endif
}
private:
#if defined(DHB_SCATTER_DARTS)
unsigned int m_prng_seed = 0;
std::vector<unsigned int> m_batch_slots;
std::vector<char> m_batch_dispatched;
#elif defined(DHB_SCATTER_TWOPHASE)
std::vector<unsigned int> m_batch_counts;
std::vector<unsigned int> m_batch_slots;
#elif defined(DHB_SCATTER_COUNTING)
std::vector<unsigned int> m_batch_counts;
std::vector<std::vector<T>> m_out;
std::vector<std::vector<T*>> m_wp;
#endif
};
} // namespace dhb
|
WjCryptLib_AesCtr.c | ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// WjCryptLib_AesCtr
//
// Implementation of AES CTR stream cipher.
//
// Depends on: CryptoLib_Aes
//
// AES CTR is a stream cipher using the AES block cipher in counter mode.
// This implementation works on both little and big endian architectures.
//
// This is free and unencumbered software released into the public domain - November 2017 waterjuice.org
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// IMPORTS
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
#include "WjCryptLib_AesCtr.h"
#include "WjCryptLib_Aes.h"
#include <stdint.h>
#include <memory.h>
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// MACROS
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
#define MIN( x, y ) ( ((x)<(y))?(x):(y) )
#define STORE64H( x, y ) \
{ (y)[0] = (uint8_t)(((x)>>56)&255); (y)[1] = (uint8_t)(((x)>>48)&255); \
(y)[2] = (uint8_t)(((x)>>40)&255); (y)[3] = (uint8_t)(((x)>>32)&255); \
(y)[4] = (uint8_t)(((x)>>24)&255); (y)[5] = (uint8_t)(((x)>>16)&255); \
(y)[6] = (uint8_t)(((x)>>8)&255); (y)[7] = (uint8_t)((x)&255); }
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// INTERNAL FUNCTIONS
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// CreateCurrentCipherBlock
//
// Takes the IV and the counter in the AesCtrContext and produces the cipher block (CurrentCipherBlock). The cipher
// block is produced by first creating a 128 bit block with the IV as first 64 bits and the CurrentCipherBlockIndex
// stored as the remaining 64bits in Network byte order (Big Endian)
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
static
void
CreateCurrentCipherBlock
(
AesCtrContext* Context // [in out]
)
{
// Build block by first copying in the IV
memcpy( Context->CurrentCipherBlock, Context->IV, AES_CTR_IV_SIZE );
// Now place in the counter in Big Endian form
STORE64H( Context->CurrentCipherBlockIndex, Context->CurrentCipherBlock + AES_CTR_IV_SIZE );
// Perform AES encryption on the block
AesEncryptInPlace( &Context->Aes, Context->CurrentCipherBlock );
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// XorBuffer
//
// Takes two Source buffers and XORs them together and puts the result in DestinationBuffer
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
static
void
XorBuffers
(
uint8_t const* SourceBuffer1, // [in]
uint8_t const* SourceBuffer2, // [in]
uint8_t* DestinationBuffer, // [out]
uint32_t Amount // [in]
)
{
uint32_t i;
for( i=0; i<Amount; i++ )
{
DestinationBuffer[i] = SourceBuffer1[i] ^ SourceBuffer2[i];
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// PUBLIC FUNCTIONS
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// AesCtrInitialise
//
// Initialises an AesCtrContext with an already initialised AesContext and a IV. This function can quickly be used
// to change the IV without requiring the more length processes of reinitialising an AES key.
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void
AesCtrInitialise
(
AesCtrContext* Context, // [out]
AesContext const* InitialisedAesContext, // [in]
uint8_t const IV [AES_CTR_IV_SIZE] // [in]
)
{
// Setup context values
Context->Aes = *InitialisedAesContext;
memcpy( Context->IV, IV, AES_CTR_IV_SIZE );
Context->StreamIndex = 0;
Context->CurrentCipherBlockIndex = 0;
// Generate the first cipher block of the stream.
CreateCurrentCipherBlock( Context );
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// AesCtrInitialiseWithKey
//
// Initialises an AesCtrContext with an AES Key and an IV. This combines the initialising an AES Context and then
// running AesCtrInitialise. KeySize must be 16, 24, or 32 (for 128, 192, or 256 bit key size)
// Returns 0 if successful, or -1 if invalid KeySize provided
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
int
AesCtrInitialiseWithKey
(
AesCtrContext* Context, // [out]
uint8_t const* Key, // [in]
uint32_t KeySize, // [in]
uint8_t const IV [AES_CTR_IV_SIZE] // [in]
)
{
AesContext aes;
// Initialise AES Context
if( 0 != AesInitialise( &aes, Key, KeySize ) )
{
return -1;
}
// Now set-up AesCtrContext
AesCtrInitialise( Context, &aes, IV );
return 0;
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// AesCtrSetStreamIndex
//
// Sets the current stream index to any arbitrary position. Setting to 0 sets it to the beginning of the stream. Any
// subsequent output will start from this position
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void
AesCtrSetStreamIndex
(
AesCtrContext* Context, // [in out]
uint64_t StreamIndex // [in]
)
{
uint64_t blockIndex = StreamIndex / AES_BLOCK_SIZE;
Context->StreamIndex = StreamIndex;
if( blockIndex != Context->CurrentCipherBlockIndex )
{
// Update block index and generate new cipher block as the new StreamIndex is inside a different block to the
// one we currently had.
Context->CurrentCipherBlockIndex = blockIndex;
CreateCurrentCipherBlock( Context );
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// AesCtrXor
//
// XORs the stream of byte of the AesCtrContext from its current stream position onto the specified buffer. This will
// advance the stream index by that number of bytes.
// Use once over data to encrypt it. Use it a second time over the same data from the same stream position and the
// data will be decrypted.
// InBuffer and OutBuffer can point to the same location for in-place encrypting/decrypting
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void
AesCtrXor
(
AesCtrContext* Context, // [in out]
void const* InBuffer, // [in]
void* OutBuffer, // [out]
uint32_t Size // [in]
)
{
uint32_t firstChunkSize;
uint32_t amountAvailableInBlock;
int numIterations;
int i;
uint64_t loopStartingCipherBlockIndex;
uint32_t loopStartingOutputOffset;
uint8_t preCipherBlock [AES_KEY_SIZE_128];
uint8_t encCipherBlock [AES_KEY_SIZE_128];
uint64_t cipherBlockIndex = 0;
// First determine how much is available in the current block.
amountAvailableInBlock = AES_BLOCK_SIZE - (Context->StreamIndex % AES_BLOCK_SIZE);
// Determine how much of the current block we will take, either all that is available, or less
// if the amount requested is smaller.
firstChunkSize = MIN( amountAvailableInBlock, Size );
// XOR the bytes from the cipher block
XorBuffers( InBuffer, Context->CurrentCipherBlock + (AES_BLOCK_SIZE - amountAvailableInBlock), OutBuffer, firstChunkSize );
// Determine how many iterations will be needed for generating cipher blocks.
// We always have to finish with a non-depleted cipher block.
// Also calculate the cipher block index and the output offset for when we start the loop.
// This function may be built with OpenMP and the loop will run in parallel. So we set-up variables that will
// be common at the start of the loop.
numIterations = ( (Size - firstChunkSize) + AES_BLOCK_SIZE ) / AES_BLOCK_SIZE;
loopStartingCipherBlockIndex = Context->CurrentCipherBlockIndex + 1;
loopStartingOutputOffset = firstChunkSize;
// Copy the IV into the first half of the preCipherBlock. When built for OpenMP preCipherBlock will be copied into
// a local version within the loop.
memcpy( preCipherBlock, Context->IV, AES_CTR_IV_SIZE );
// Now start generating new cipher blocks as required.
#ifdef _OPENMP
#pragma omp parallel for firstprivate( preCipherBlock, cipherBlockIndex ) lastprivate( encCipherBlock, cipherBlockIndex )
#endif
for( i=0; i<numIterations; i++ )
{
uint32_t outputOffset = loopStartingOutputOffset + (AES_BLOCK_SIZE * i);
uint32_t amountLeft = Size - outputOffset;
uint32_t chunkSize = MIN( amountLeft, AES_BLOCK_SIZE );
// Increment block index and regenerate cipher block
cipherBlockIndex = loopStartingCipherBlockIndex + i;
// Now place in the counter in Big Endian form in second half of preCipherBlock
STORE64H( cipherBlockIndex, preCipherBlock + AES_CTR_IV_SIZE );
// Perform AES encryption on the preCipherBlock and put result in encCipherBlock
AesEncrypt( &Context->Aes, preCipherBlock, encCipherBlock );
// XOR block out onto the buffer.
XorBuffers( (uint8_t*)InBuffer + outputOffset, encCipherBlock, (uint8_t*)OutBuffer + outputOffset, chunkSize );
}
// Update context
Context->StreamIndex += Size;
if( numIterations > 0 )
{
Context->CurrentCipherBlockIndex = cipherBlockIndex;
memcpy( Context->CurrentCipherBlock, encCipherBlock, AES_BLOCK_SIZE );
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// AesCtrOutput
//
// Outputs the stream of byte of the AesCtrContext from its current stream position. This will advance the stream
// index by that number of bytes.
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void
AesCtrOutput
(
AesCtrContext* Context, // [in out]
void* Buffer, // [out]
uint32_t Size // [in]
)
{
memset( Buffer, 0, Size );
AesCtrXor( Context, Buffer, Buffer, Size );
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// AesCtrXorWithKey
//
// This function combines AesCtrInitialiseWithKey and AesCtrXor. This is suitable when encrypting/decypting data in
// one go with a key that is not going to be reused.
// This will used the provided Key and IV and generate a stream that is XORed over Buffer.
// InBuffer and OutBuffer can point to the same location for inplace encrypting/decrypting
// Returns 0 if successful, or -1 if invalid KeySize provided
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
int
AesCtrXorWithKey
(
uint8_t const* Key, // [in]
uint32_t KeySize, // [in]
uint8_t const IV [AES_CTR_IV_SIZE], // [in]
void const* InBuffer, // [in]
void* OutBuffer, // [out]
uint32_t BufferSize // [in]
)
{
int error;
AesCtrContext context;
error = AesCtrInitialiseWithKey( &context, Key, KeySize, IV );
if( 0 == error )
{
AesCtrXor( &context, InBuffer, OutBuffer, BufferSize );
}
return error;
}
|
EmbeddingBag.h | /******************************************************************************
* Copyright (c) Intel Corporation - All rights reserved. *
* This file is part of the LIBXSMM library. *
* *
* For information on the license, see the LICENSE file. *
* Further information: https://github.com/hfp/libxsmm/ *
* SPDX-License-Identifier: BSD-3-Clause *
******************************************************************************/
/* Dhiraj Kalamkar, Evangelos Georganas (Intel Corp.)
******************************************************************************/
#if defined(USE_LIBXSMM_JIT)
#include <libxsmm.h>
#endif
#include "utils.h"
#include "rtm.h"
template <typename T>
class EmbeddingBagImpl
{
public:
EmbeddingBagImpl(long M, long E) : M(M), E(E)
{
weight_ = (T*)my_malloc((size_t)M * E * sizeof(T), alignment);
#ifdef USE_LIBXSMM_JIT
_ld = E;
if (sizeof(T) == 4) {
kernel = libxsmm_dispatch_meltw_unary(E, 0, &_ld, &_ld, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, (sizeof(long) == 8) ? LIBXSMM_MELTW_FLAG_UNARY_IDX_SIZE_8BYTES : LIBXSMM_MELTW_FLAG_UNARY_IDX_SIZE_4BYTES, LIBXSMM_MELTW_TYPE_UNARY_REDUCE_COLS_IDX);
#else
} else {
kernel = libxsmm_dispatch_meltw_unary(E, 0, &_ld, &_ld, LIBXSMM_DATATYPE_F16, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F16, (sizeof(long) == 8) ? LIBXSMM_MELTW_FLAG_UNARY_IDX_SIZE_8BYTES : LIBXSMM_MELTW_FLAG_UNARY_IDX_SIZE_4BYTES, LIBXSMM_MELTW_TYPE_UNARY_REDUCE_COLS_IDX);
}
kernel1 = libxsmm_dispatch_meltw_unary(E, 0, &_ld, &_ld, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_MELTW_FLAG_UNARY_NONE, LIBXSMM_MELTW_TYPE_UNARY_REPLICATE_COL_VAR);
kernel2 = libxsmm_dispatch_meltw_binary(E, 1, &_ld, &_ld, &_ld, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_MELTW_FLAG_BINARY_BCAST_SCALAR_IN_0, LIBXSMM_MELTW_TYPE_BINARY_MULADD);
#endif
}
~EmbeddingBagImpl()
{
my_free(weight_);
weight_ = 0;
}
void init(T low = -0.1, T high = 0.1)
{
init_random(M * E, weight_, low, high);
}
#ifdef USE_LIBXSMM_JIT
void forward(long N, long NS, const long *offsets, const long *indices, T *output_)
{
T(*__restrict weight)[E] = (T(*)[*])weight_;
T(*__restrict output)[E] = (T(*)[*])output_;
#pragma omp parallel for
for (int n = 0; n < N; n++)
{
libxsmm_meltw_unary_param params;
auto start = offsets[n];
auto end = (n < N - 1 ? offsets[n + 1] : NS);
unsigned long long __n = end-start;
params.in.primary = weight;
params.in.secondary = &indices[start];
params.in.tertiary = &__n;
params.out.primary = &output[n][0];
kernel( ¶ms );
}
}
#else
void forward(long N, long NS, const long *offsets, const long *indices, T *output_)
{
T(*__restrict weight)[E] = (T(*)[*])weight_;
T(*__restrict output)[E] = (T(*)[*])output_;
#pragma omp parallel for
for (long n = 0; n < N; n++)
{
auto start = offsets[n];
auto end = (n < N - 1 ? offsets[n + 1] : NS);
#pragma omp simd
for (long v = 0; v < E; v++)
output[n][v] = 0;
for (long s = start; s < end; s++)
{
auto ind = indices[s];
#pragma omp simd
for (long v = 0; v < E; v++)
{
output[n][v] += weight[ind][v];
}
}
}
}
#endif
#ifdef USE_LIBXSMM_JIT
void backward(long N, long NS, const T *gradout_, const long *offsets, const long *indices, T *values_)
{
T(*__restrict gradout)[E] = (T(*)[*])gradout_;
T(*__restrict values)[E] = (T(*)[*])values_;
int _ld = E;
#pragma omp parallel for
for (long n = 0; n < N; n++)
{
libxsmm_meltw_unary_param unary_param;
auto start = offsets[n];
auto end = (n < N - 1 ? offsets[n + 1] : NS);
unsigned long long _N = end-start;
unary_param.in.primary = (void*)&gradout[n][0];
unary_param.out.primary = (void*)&values[start][0];
unary_param.op.primary = (void*)&_N;
kernel1(&unary_param);
}
}
#else
void backward(long N, long NS, const T *gradout_, const long *offsets, const long *indices, T *values_)
{
T(*__restrict gradout)[E] = (T(*)[*])gradout_;
T(*__restrict values)[E] = (T(*)[*])values_;
#pragma omp parallel for
for (long n = 0; n < N; n++)
{
auto start = offsets[n];
auto end = (n < N - 1 ? offsets[n + 1] : NS);
for (long s = start; s < end; s++)
{
#pragma omp simd
#ifdef STREAMING_WRITES
#pragma vector nontemporal(values)
#endif
for (long v = 0; v < E; v++)
values[s][v] = gradout[n][v];
}
}
}
#endif
#ifdef USE_LIBXSMM_JIT
void update(long NS, const T *grads_, const long *indices, float lr, long M, int use_rtm)
{
int use_lock_free = use_rtm == 0 ? 1: 0;
T(*__restrict weight)[E] = (T(*)[*])weight_;
T(*__restrict grads)[E] = (T(*)[*])grads_;
int _ld = E;
if(use_lock_free) {
/*printf("Using lock free update\n");*/
int max_thr = omp_get_max_threads();
if(M < max_thr) max_thr = M;
#pragma omp parallel num_threads(max_thr)
{
int tid = omp_get_thread_num();
for(long i = 0; i < NS; i++) {
auto ind = indices[i];
if(ind % max_thr == tid) {
libxsmm_meltw_binary_param binary_param;
binary_param.in0.primary = (void*)&lr;
binary_param.in1.primary = (void*)&grads[i][0];
binary_param.out.primary = (void*)&weight[ind][0];
{
kernel2(&binary_param);
}
}
}
}
} else {
SimpleSpinLock fallBackLock;
#pragma omp parallel for
for (long i = 0; i < NS; i++)
{
libxsmm_meltw_binary_param binary_param;
long ind = indices[i];
binary_param.in0.primary = (void*)&lr;
binary_param.in1.primary = (void*)&grads[i][0];
binary_param.out.primary = (void*)&weight[ind][0];
{
TransactionScope guard(fallBackLock, 100, 0);
kernel2(&binary_param);
}
}
}
}
#else
void update(long NS, const T *grads_, const long *indices, float lr, long M, int use_rtm)
{
T(*__restrict weight)[E] = (T(*)[*])weight_;
T(*__restrict grads)[E] = (T(*)[*])grads_;
int use_lock_free = use_rtm == 0 ? 1: 0;
if(use_lock_free) {
int max_thr = omp_get_max_threads();
if(M < max_thr) max_thr = M;
#pragma omp parallel num_threads(max_thr)
{
int tid = omp_get_thread_num();
for(long i = 0; i < NS; i++) {
auto ind = indices[i];
if(ind % max_thr == tid) {
#pragma omp simd
for (long v = 0; v < E; v++)
weight[ind][v] += lr * grads[i][v];
}
}
}
} else {
SimpleSpinLock fallBackLock;
#pragma omp parallel for
for (long i = 0; i < NS; i++)
{
long ind = indices[i];
{
TransactionScope guard(fallBackLock, 100, 0);
#pragma omp simd
for (long v = 0; v < E; v++)
weight[ind][v] += lr * grads[i][v];
}
}
}
}
#endif
T *weight_;
long M;
long E;
#ifdef USE_LIBXSMM_JIT
int _ld;
libxsmm_meltwfunction_unary kernel;
libxsmm_meltwfunction_unary kernel1;
libxsmm_meltwfunction_binary kernel2;
#endif
};
|
mpi_cpd.c |
/******************************************************************************
* INCLUDES
*****************************************************************************/
#include "../splatt_mpi.h"
#include "../mttkrp.h"
#include "../matrix.h"
#include "../timer.h"
#include "../thd_info.h"
#include "../tile.h"
#include "../util.h"
#include <math.h>
/**
* @brief Resets serial and MPI timers that were activated during some CPD
* pre-processing.
*
* @param rinfo MPI rank information.
*/
static void p_reset_cpd_timers(
rank_info const * const rinfo)
{
timer_reset(&timers[TIMER_ATA]);
#ifdef SPLATT_USE_MPI
timer_reset(&timers[TIMER_MPI]);
timer_reset(&timers[TIMER_MPI_IDLE]);
timer_reset(&timers[TIMER_MPI_COMM]);
timer_reset(&timers[TIMER_MPI_ATA]);
timer_reset(&timers[TIMER_MPI_REDUCE]);
timer_reset(&timers[TIMER_MPI_NORM]);
timer_reset(&timers[TIMER_MPI_UPDATE]);
timer_reset(&timers[TIMER_MPI_FIT]);
MPI_Barrier(rinfo->comm_3d);
#endif
}
/**
* @brief Compute the inner product of a Kruskal tensor and an unfactored
* tensor. Assumes that 'm1' contains the MTTKRP result along the last
* mode of the two input tensors. This naturally follows the end of a
* CPD iteration.
*
* @param nmodes The number of modes in the input tensors.
* @param rinfo MPI rank information.
* @param thds OpenMP thread data structures.
* @param lambda The vector of column norms.
* @param mats The Kruskal-tensor matrices.
* @param m1 The result of doing MTTKRP along the last mode.
*
* @return The inner product of the two tensors, computed via:
* 1^T hadamard(mats[nmodes-1], m1) \lambda.
*/
static val_t p_tt_kruskal_inner(
idx_t const nmodes,
rank_info * const rinfo,
thd_info * const thds,
val_t const * const restrict lambda,
matrix_t ** mats,
matrix_t const * const m1)
{
idx_t const rank = mats[0]->J;
idx_t const lastm = nmodes - 1;
idx_t const dim = m1->I;
val_t const * const m0 = mats[lastm]->vals;
val_t const * const mv = m1->vals;
val_t myinner = 0;
#pragma omp parallel reduction(+:myinner)
{
int const tid = splatt_omp_get_thread_num();
val_t * const restrict accumF = (val_t *) thds[tid].scratch[0];
for(idx_t r=0; r < rank; ++r) {
accumF[r] = 0.;
}
#pragma omp for
for(idx_t i=0; i < dim; ++i) {
for(idx_t r=0; r < rank; ++r) {
accumF[r] += m0[r+(i*rank)] * mv[r+(i*rank)];
}
}
/* accumulate everything into 'myinner' */
for(idx_t r=0; r < rank; ++r) {
myinner += accumF[r] * lambda[r];
}
}
val_t inner = 0.;
#ifdef SPLATT_USE_MPI
timer_start(&timers[TIMER_MPI_FIT]);
MPI_Allreduce(&myinner, &inner, 1, SPLATT_MPI_VAL, MPI_SUM, rinfo->comm_3d);
timer_stop(&timers[TIMER_MPI_FIT]);
#else
inner = myinner;
#endif
return inner;
}
/**
* @brief Find the Frobenius norm squared of a Kruskal tensor. This equivalent
* to via computing <X,X>, the inner product of X with itself. We find
* this via \lambda^T (AtA * BtB * ...) \lambda, where * is the Hadamard
* product.
*
* @param nmodes The number of modes in the tensor.
* @param lambda The vector of column norms.
* @param aTa An array of Gram Matrices (AtA, BtB, ...).
*
* @return The Frobenius norm of X, squared.
*/
static val_t p_kruskal_norm(
idx_t const nmodes,
val_t const * const restrict lambda,
matrix_t ** aTa)
{
idx_t const rank = aTa[0]->J;
val_t * const restrict av = aTa[MAX_NMODES]->vals;
val_t norm_mats = 0;
/* use aTa[MAX_NMODES] as scratch space */
for(idx_t i=0; i < rank; ++i) {
for(idx_t j=i; j < rank; ++j) {
av[j + (i*rank)] = 1.;
}
}
/* aTa[MAX_NMODES] = hada(aTa) */
for(idx_t m=0; m < nmodes; ++m) {
val_t const * const restrict atavals = aTa[m]->vals;
for(idx_t i=0; i < rank; ++i) {
for(idx_t j=i; j < rank; ++j) {
av[j + (i*rank)] *= atavals[j + (i*rank)];
}
}
}
/* now compute lambda^T * aTa[MAX_NMODES] * lambda */
for(idx_t i=0; i < rank; ++i) {
norm_mats += av[i+(i*rank)] * lambda[i] * lambda[i];
for(idx_t j=i+1; j < rank; ++j) {
norm_mats += av[j+(i*rank)] * lambda[i] * lambda[j] * 2;
}
}
return fabs(norm_mats);
}
/**
* @brief Compute the fit of a Kruskal tensor, Z, to an input tensor, X. This
* is computed via 1 - [sqrt(<X,X> + <Z,Z> - 2<X,Z>) / sqrt(<X,X>)].
*
* @param nmodes The number of modes in the input tensors.
* @param rinfo MPI rank information.
* @param thds OpenMP thread data structures.
* @param ttnormsq The norm (squared) of the original input tensor, <X,X>.
* @param lambda The vector of column norms.
* @param mats The Kruskal-tensor matrices.
* @param m1 The result of doing MTTKRP along the last mode.
* @param aTa An array of matrices (length MAX_NMODES)containing BtB, CtC, etc.
*
* @return The inner product of the two tensors, computed via:
* \lambda^T hadamard(mats[nmodes-1], m1) \lambda.
*/
static val_t p_calc_fit(
idx_t const nmodes,
rank_info * const rinfo,
thd_info * const thds,
val_t const ttnormsq,
val_t const * const restrict lambda,
matrix_t ** mats,
matrix_t const * const m1,
matrix_t ** aTa)
{
timer_start(&timers[TIMER_FIT]);
/* First get norm of new model: lambda^T * (hada aTa) * lambda. */
val_t const norm_mats = p_kruskal_norm(nmodes, lambda, aTa);
/* Compute inner product of tensor with new model */
val_t const inner = p_tt_kruskal_inner(nmodes, rinfo, thds, lambda, mats,m1);
/*
* We actually want sqrt(<X,X> + <Y,Y> - 2<X,Y>), but if the fit is perfect
* just make it 0.
*/
val_t residual = ttnormsq + norm_mats - (2 * inner);
if(residual > 0.) {
residual = sqrt(residual);
}
timer_stop(&timers[TIMER_FIT]);
return 1 - (residual / sqrt(ttnormsq));
}
/******************************************************************************
* PRIVATE FUNCTIONS
*****************************************************************************/
/**
* @brief Flush the updated values in globalmat to our local representation.
*
* @param localmat The local matrix to update.
* @param globalmat The recently updated global matrix.
* @param rinfo MPI rank information.
* @param nfactors The number of columns in the factor matrices.
* @param mode The mode we are operating on.
*/
static void p_flush_glob_to_local(
idx_t const * const indmap,
matrix_t * const localmat,
matrix_t const * const globalmat,
rank_info const * const rinfo,
idx_t const nfactors,
idx_t const mode)
{
idx_t const m = mode;
idx_t const mat_start = rinfo->mat_start[m];
idx_t const mat_end = rinfo->mat_end[m];
idx_t const start = rinfo->ownstart[m];
idx_t const nowned = rinfo->nowned[m];
assert(start + nowned <= localmat->I);
par_memcpy(localmat->vals + (start*nfactors),
globalmat->vals,
nowned * nfactors * sizeof(val_t));
}
/**
* @brief Do a reduction (sum) of all neighbor partial products which I own.
* Updates are written to globalmat.
* This version accomplishes the communication with an MPI_Alltoallv().
*
* @param local2nbr_buf A buffer at least as large as nlocal2nbr.
* @param nbr2globs_buf A buffer at least as large as nnbr2globs.
* @param localmat My local matrix containing partial products for other ranks.
* @param globalmat The global factor matrix to update.
* @param rinfo MPI rank information.
* @param nfactors The number of columns in the matrices.
* @param m The mode to operate on.
*/
static void p_reduce_rows_all2all(
val_t * const restrict local2nbr_buf,
val_t * const restrict nbr2globs_buf,
matrix_t const * const localmat,
matrix_t * const globalmat,
rank_info const * const rinfo,
idx_t const nfactors,
idx_t const m)
{
idx_t const mat_start = rinfo->mat_start[m];
idx_t const * const restrict local2nbr_inds = rinfo->local2nbr_inds[m];
idx_t const * const restrict nbr2globs_inds = rinfo->nbr2globs_inds[m];
val_t const * const restrict matv = localmat->vals;
val_t * const restrict gmatv = globalmat->vals;
/* copy my partial products into the sendbuf */
#pragma omp parallel for
for(idx_t s=0; s < rinfo->nlocal2nbr[m]; ++s) {
idx_t const row = local2nbr_inds[s];
for(idx_t f=0; f < nfactors; ++f) {
local2nbr_buf[f + (s*nfactors)] = matv[f + (row*nfactors)];
}
}
/* grab ptr/disp from rinfo. nbr2local and local2nbr will have the same
* structure so we just reuse those */
int const * const restrict nbr2globs_ptr = rinfo->nbr2globs_ptr[m];
int const * const restrict nbr2local_ptr = rinfo->local2nbr_ptr[m];
int const * const restrict nbr2globs_disp = rinfo->nbr2globs_disp[m];
int const * const restrict nbr2local_disp = rinfo->local2nbr_disp[m];
timer_start(&timers[TIMER_MPI_COMM]);
/* exchange rows */
MPI_Alltoallv(local2nbr_buf, nbr2local_ptr, nbr2local_disp, SPLATT_MPI_VAL,
nbr2globs_buf, nbr2globs_ptr, nbr2globs_disp, SPLATT_MPI_VAL,
rinfo->layer_comm[m]);
timer_stop(&timers[TIMER_MPI_COMM]);
int const lrank = rinfo->layer_rank[m];
int const lsize = rinfo->layer_size[m];
/* Now add received partial products. We can parallelize the additions from
* each process. */
#pragma omp parallel
for(int p=1; p < lsize; ++p) {
int const porig = (p + lrank) % lsize;
/* The number of rows to recv from porig */
int const nrecvs = nbr2globs_ptr[porig] / nfactors;
int const disp = nbr2globs_disp[porig] / nfactors;
#pragma omp for
for(int r=disp; r < disp + nrecvs; ++r) {
idx_t const row = nbr2globs_inds[r] - mat_start;
for(idx_t f=0; f < nfactors; ++f) {
gmatv[f+(row*nfactors)] += nbr2globs_buf[f+(r*nfactors)];
}
}
} /* end recvs */
}
/**
* @brief Do a reduction (sum) of all neighbor partial products which I own.
* Updates are written to globalmat.
* This version accomplishes the communication with an MPI_{Irecv,Isend}.
*
* @param local2nbr_buf A buffer at least as large as nlocal2nbr.
* @param nbr2globs_buf A buffer at least as large as nnbr2globs.
* @param localmat My local matrix containing partial products for other ranks.
* @param globalmat The global factor matrix to update.
* @param rinfo MPI rank information.
* @param nfactors The number of columns in the matrices.
* @param m The mode to operate on.
*/
static void p_reduce_rows_point2point(
val_t * const restrict local2nbr_buf,
val_t * const restrict nbr2globs_buf,
matrix_t const * const localmat,
matrix_t * const globalmat,
rank_info * const rinfo,
idx_t const nfactors,
idx_t const m)
{
int const lrank = rinfo->layer_rank[m];
int const lsize = rinfo->layer_size[m];
idx_t const mat_start = rinfo->mat_start[m];
idx_t const * const restrict local2nbr_inds = rinfo->local2nbr_inds[m];
idx_t const * const restrict nbr2globs_inds = rinfo->nbr2globs_inds[m];
val_t const * const restrict matv = localmat->vals;
val_t * const restrict gmatv = globalmat->vals;
/* IRECVS */
for(int p=1; p < lsize; ++p) {
int const porig = (p + lrank) % lsize;
/* The number of rows to recv from porig */
int const nrecvs = rinfo->nbr2globs_ptr[m][porig] / nfactors;
int const disp = rinfo->nbr2globs_disp[m][porig] / nfactors;
if(nrecvs == 0) {
continue;
}
/* do the actual communication */
timer_start(&timers[TIMER_MPI_COMM]);
MPI_Irecv(&(nbr2globs_buf[disp*nfactors]), nrecvs*nfactors, SPLATT_MPI_VAL,
porig, 0, rinfo->layer_comm[m], rinfo->recv_reqs + porig);
timer_stop(&timers[TIMER_MPI_COMM]);
}
#pragma omp parallel default(shared)
{
/* ISENDS */
for(int p=1; p < lsize; ++p) {
/* destination process -- starting from p+1 helps avoid contention */
int const pdest = (p + lrank) % lsize;
/* The number of rows to send to pdest */
int const nsends = rinfo->local2nbr_ptr[m][pdest] / nfactors;
int const disp = rinfo->local2nbr_disp[m][pdest] / nfactors;
if(nsends == 0) {
continue;
}
/* first prepare all rows that I own and need to send */
#pragma omp for
for(int s=disp; s < disp+nsends; ++s) {
idx_t const row = local2nbr_inds[s];
for(idx_t f=0; f < nfactors; ++f) {
local2nbr_buf[f + (s*nfactors)] = matv[f + (row*nfactors)];
}
}
/* do the actual communication */
#pragma omp master
{
timer_start(&timers[TIMER_MPI_COMM]);
MPI_Isend(&(local2nbr_buf[disp*nfactors]), nsends*nfactors, SPLATT_MPI_VAL,
pdest, 0, rinfo->layer_comm[m], rinfo->send_reqs + pdest);
timer_stop(&timers[TIMER_MPI_COMM]);
}
} /* end sends */
/* RECVS */
for(int p=1; p < lsize; ++p) {
int const porig = (p + lrank) % lsize;
/* The number of rows to recv from porig */
int const nrecvs = rinfo->nbr2globs_ptr[m][porig] / nfactors;
int const disp = rinfo->nbr2globs_disp[m][porig] / nfactors;
if(nrecvs == 0) {
continue;
}
/* Wait for receive to complete */
#pragma omp master
{
timer_start(&timers[TIMER_MPI_COMM]);
MPI_Wait(rinfo->recv_reqs + porig, MPI_STATUS_IGNORE);
timer_stop(&timers[TIMER_MPI_COMM]);
}
/* wait until recv is done */
#pragma omp barrier
/* now add partial products */
#pragma omp for
for(int r=disp; r < disp + nrecvs; ++r) {
idx_t const row = nbr2globs_inds[r] - mat_start;
for(idx_t f=0; f < nfactors; ++f) {
gmatv[f+(row*nfactors)] += nbr2globs_buf[f+(r*nfactors)];
}
}
} /* end recvs */
} /* end omp parallel */
}
/**
* @brief Exchange updated factor rows with all MPI ranks in the same layer.
* This version accomplishes the communication with individual MPI_Isend
* and MPI_Recv.
* We send globmats[mode] to the needing ranks and receive other ranks'
* globmats entries which we store in mats[mode].
*
* @param nbr2globs_buf Buffer at least as large as as there are rows to send
* (for each rank).
* @param nbr2local_buf Buffer at least as large as there are rows to receive.
* @param localmat Local factor matrix which receives updated values.
* @param globalmat Global factor matrix (owned by me) which is sent to ranks.
* @param rinfo MPI rank information.
* @param nfactors The number of columns in the factor matrices.
* @param mode The mode to exchange along.
*/
static void p_update_rows_point2point(
val_t * const nbr2globs_buf,
val_t * const nbr2local_buf,
matrix_t * const localmat,
matrix_t * const globalmat,
rank_info * const rinfo,
idx_t const nfactors,
idx_t const mode)
{
idx_t const m = mode;
idx_t const mat_start = rinfo->mat_start[m];
idx_t const * const nbr2globs_inds = rinfo->nbr2globs_inds[m];
idx_t const * const local2nbr_inds = rinfo->local2nbr_inds[m];
val_t const * const gmatv = globalmat->vals;
val_t * const matv = localmat->vals;
int const lrank = rinfo->layer_rank[m];
int const lsize = rinfo->layer_size[m];
/* IRECVS */
for(int p=1; p < lsize; ++p) {
int const porig = (p + lrank) % lsize;
/* The number of rows to recv from porig */
int const nrecvs = rinfo->local2nbr_ptr[m][porig] / nfactors;
int const disp = rinfo->local2nbr_disp[m][porig] / nfactors;
if(nrecvs == 0) {
continue;
}
/* do the actual communication */
timer_start(&timers[TIMER_MPI_COMM]);
MPI_Irecv(&(nbr2local_buf[disp*nfactors]), nrecvs*nfactors, SPLATT_MPI_VAL,
porig, 0, rinfo->layer_comm[m], rinfo->recv_reqs + porig);
timer_stop(&timers[TIMER_MPI_COMM]);
}
#pragma omp parallel default(shared)
{
/* SENDS */
for(int p=1; p < lsize; ++p) {
/* destination process -- starting from p+1 helps avoid contention */
int const pdest = (p + lrank) % lsize;
/* The number of rows to send to pdest */
int const nsends = rinfo->nbr2globs_ptr[m][pdest] / nfactors;
int const disp = rinfo->nbr2globs_disp[m][pdest] / nfactors;
if(nsends == 0) {
continue;
}
/* first prepare all rows that I own and need to send */
#pragma omp for
for(int s=disp; s < disp+nsends; ++s) {
idx_t const row = nbr2globs_inds[s] - mat_start;
for(idx_t f=0; f < nfactors; ++f) {
nbr2globs_buf[f+(s*nfactors)] = gmatv[f+(row*nfactors)];
}
}
/* do the actual communication */
#pragma omp master
{
timer_start(&timers[TIMER_MPI_COMM]);
MPI_Isend(&(nbr2globs_buf[disp*nfactors]), nsends*nfactors, SPLATT_MPI_VAL,
pdest, 0, rinfo->layer_comm[m], &(rinfo->req));
timer_stop(&timers[TIMER_MPI_COMM]);
}
} /* end sends */
/* RECVS */
for(int p=1; p < lsize; ++p) {
int const porig = (p + lrank) % lsize;
/* The number of rows to recv from porig */
int const nrecvs = rinfo->local2nbr_ptr[m][porig] / nfactors;
int const disp = rinfo->local2nbr_disp[m][porig] / nfactors;
if(nrecvs == 0) {
continue;
}
/* wait for the actual communication */
#pragma omp master
{
timer_start(&timers[TIMER_MPI_COMM]);
MPI_Wait(rinfo->recv_reqs + porig, MPI_STATUS_IGNORE);
timer_stop(&timers[TIMER_MPI_COMM]);
}
/* wait until recv is done */
#pragma omp barrier
/* now write incoming nbr2locals to my local matrix */
#pragma omp for
for(int r=disp; r < disp + nrecvs; ++r) {
idx_t const row = local2nbr_inds[r];
for(idx_t f=0; f < nfactors; ++f) {
matv[f+(row*nfactors)] = nbr2local_buf[f+(r*nfactors)];
}
}
} /* end recvs */
} /* end omp parallel */
}
/**
* @brief Exchange updated factor rows with all MPI ranks in the same layer.
* This version accomplishes the communication with an MPI_Alltoallv().
* We send globmats[mode] to the needing ranks and receive other ranks'
* globmats entries which we store in mats[mode].
*
* @param nbr2globs_buf Buffer at least as large as as there are rows to send
* (for each rank).
* @param nbr2local_buf Buffer at least as large as there are rows to receive.
* @param localmat Local factor matrix which receives updated values.
* @param globalmat Global factor matrix (owned by me) which is sent to ranks.
* @param rinfo MPI rank information.
* @param nfactors The number of columns in the factor matrices.
* @param mode The mode to exchange along.
*/
static void p_update_rows_all2all(
val_t * const nbr2globs_buf,
val_t * const nbr2local_buf,
matrix_t * const localmat,
matrix_t * const globalmat,
rank_info * const rinfo,
idx_t const nfactors,
idx_t const mode)
{
idx_t const m = mode;
idx_t const mat_start = rinfo->mat_start[m];
idx_t const * const nbr2globs_inds = rinfo->nbr2globs_inds[m];
idx_t const * const local2nbr_inds = rinfo->local2nbr_inds[m];
val_t const * const gmatv = globalmat->vals;
#pragma omp parallel
{
/* first prepare all rows that I own and need to send */
#pragma omp for
for(idx_t s=0; s < rinfo->nnbr2globs[m]; ++s) {
idx_t const row = nbr2globs_inds[s] - mat_start;
for(idx_t f=0; f < nfactors; ++f) {
nbr2globs_buf[f+(s*nfactors)] = gmatv[f+(row*nfactors)];
}
}
#pragma omp master
{
/* grab ptr/disp from rinfo. nbr2local and local2nbr will have the same
* structure so we just reuse those */
int const * const restrict nbr2globs_ptr = rinfo->nbr2globs_ptr[m];
int const * const restrict nbr2local_ptr = rinfo->local2nbr_ptr[m];
int const * const restrict nbr2globs_disp = rinfo->nbr2globs_disp[m];
int const * const restrict nbr2local_disp = rinfo->local2nbr_disp[m];
/* exchange rows */
timer_start(&timers[TIMER_MPI_COMM]);
MPI_Alltoallv(nbr2globs_buf, nbr2globs_ptr, nbr2globs_disp, SPLATT_MPI_VAL,
nbr2local_buf, nbr2local_ptr, nbr2local_disp, SPLATT_MPI_VAL,
rinfo->layer_comm[m]);
timer_stop(&timers[TIMER_MPI_COMM]);
}
/* wait for communication to complete */
#pragma omp barrier
/* now write incoming nbr2locals to my local matrix */
val_t * const matv = localmat->vals;
#pragma omp for
for(idx_t r=0; r < rinfo->nlocal2nbr[m]; ++r) {
idx_t const row = local2nbr_inds[r];
for(idx_t f=0; f < nfactors; ++f) {
matv[f+(row*nfactors)] = nbr2local_buf[f+(r*nfactors)];
}
}
} /* end omp parallel */
}
/******************************************************************************
* PUBLIC FUNCTIONS
*****************************************************************************/
double mpi_cpd_als_iterate(
splatt_csf const * const tensors,
matrix_t ** mats,
matrix_t ** globmats,
val_t * const lambda,
idx_t const nfactors,
rank_info * const rinfo,
double const * const opts)
{
idx_t const nmodes = tensors[0].nmodes;
idx_t const nthreads = (idx_t) opts[SPLATT_OPTION_NTHREADS];
/* Setup thread structures. + 64 bytes is to avoid false sharing. */
splatt_omp_set_num_threads(nthreads);
thd_info * thds = thd_init(nthreads, 3,
(nfactors * nfactors * sizeof(val_t)) + 64,
(TILE_SIZES[0] * nfactors * sizeof(val_t)) + 64,
(nmodes * nfactors * sizeof(val_t)) + 64);
matrix_t * m1 = mats[MAX_NMODES];
/* Extract MPI communication structures */
idx_t maxdim = 0;
idx_t maxlocal2nbr = 0;
idx_t maxnbr2globs = 0;
for(idx_t m=0; m < nmodes; ++m) {
maxlocal2nbr = SS_MAX(maxlocal2nbr, rinfo->nlocal2nbr[m]);
maxnbr2globs = SS_MAX(maxnbr2globs, rinfo->nnbr2globs[m]);
maxdim = SS_MAX(globmats[m]->I, maxdim);
}
maxlocal2nbr *= nfactors;
maxnbr2globs *= nfactors;
val_t * local2nbr_buf = (val_t *) splatt_malloc(maxlocal2nbr * sizeof(val_t));
val_t * nbr2globs_buf = (val_t *) splatt_malloc(maxnbr2globs * sizeof(val_t));
if(rinfo->decomp != SPLATT_DECOMP_COARSE) {
m1 = mat_alloc(maxdim, nfactors);
}
/* Exchange initial matrices */
for(idx_t m=1; m < nmodes; ++m) {
mpi_update_rows(rinfo->indmap[m], nbr2globs_buf, local2nbr_buf, mats[m],
globmats[m], rinfo, nfactors, m, opts[SPLATT_OPTION_COMM]);
}
matrix_t * m1ptr = m1; /* for restoring m1 */
/* Initialize first A^T * A mats. We redundantly do the first because it
* makes communication easier. */
matrix_t * aTa[MAX_NMODES+1];
for(idx_t m=0; m < nmodes; ++m) {
aTa[m] = mat_alloc(nfactors, nfactors);
mat_aTa_mpi(globmats[m], aTa[m], rinfo->comm_3d);
}
/* used as buffer space */
aTa[MAX_NMODES] = mat_alloc(nfactors, nfactors);
/* mttkrp workspace */
splatt_mttkrp_ws * mttkrp_ws = splatt_mttkrp_alloc_ws(tensors,nfactors,opts);
/* Compute input tensor norm */
double oldfit = 0;
double fit = 0;
val_t mynorm = csf_frobsq(tensors);
val_t ttnormsq = 0;
MPI_Allreduce(&mynorm, &ttnormsq, 1, SPLATT_MPI_VAL, MPI_SUM, rinfo->comm_3d);
/* setup timers */
p_reset_cpd_timers(rinfo);
sp_timer_t itertime;
sp_timer_t modetime[MAX_NMODES];
timer_start(&timers[TIMER_CPD]);
idx_t const niters = (idx_t) opts[SPLATT_OPTION_NITER];
for(idx_t it=0; it < niters; ++it) {
timer_fstart(&itertime);
for(idx_t m=0; m < nmodes; ++m) {
timer_fstart(&modetime[m]);
mats[MAX_NMODES]->I = tensors[0].dims[m];
/* M1 = X * (C o B) */
timer_start(&timers[TIMER_MTTKRP]);
mttkrp_csf(tensors, mats, m, thds, mttkrp_ws, opts);
timer_stop(&timers[TIMER_MTTKRP]);
m1->I = globmats[m]->I;
m1ptr->I = globmats[m]->I;
if(rinfo->decomp != SPLATT_DECOMP_COARSE && rinfo->layer_size[m] > 1) {
m1 = m1ptr;
/* add my partial multiplications to globmats[m] */
mpi_add_my_partials(rinfo->indmap[m], mats[MAX_NMODES], m1, rinfo,
nfactors, m);
/* incorporate neighbors' partials */
mpi_reduce_rows(local2nbr_buf, nbr2globs_buf, mats[MAX_NMODES], m1,
rinfo, nfactors, m, opts[SPLATT_OPTION_COMM]);
} else {
/* skip the whole process */
m1 = mats[MAX_NMODES];
}
/* XXX removed for merge */
#if 0
/* invert normal equations (Cholesky factorization) for new factor */
par_memcpy(globmats[m]->vals, m1->vals, m1->I * nfactors * sizeof(val_t));
mat_solve_normals(m, nmodes, aTa, globmats[m],
opts[SPLATT_OPTION_REGULARIZE]);
#endif
/* normalize columns and extract lambda */
mat_normalize_mpi(globmats[m], lambda, rinfo->comm_3d);
/* send updated rows to neighbors */
mpi_update_rows(rinfo->indmap[m], nbr2globs_buf, local2nbr_buf, mats[m],
globmats[m], rinfo, nfactors, m, opts[SPLATT_OPTION_COMM]);
/* update A^T*A */
mat_aTa_mpi(globmats[m], aTa[m], rinfo->comm_3d);
timer_stop(&modetime[m]);
} /* foreach mode */
fit = p_calc_fit(nmodes, rinfo, thds, ttnormsq, lambda, globmats, m1, aTa);
timer_stop(&itertime);
if(rinfo->rank == 0 &&
opts[SPLATT_OPTION_VERBOSITY] > SPLATT_VERBOSITY_NONE) {
printf(" its = %3"SPLATT_PF_IDX" (%0.3fs) fit = %0.5f delta = %+0.4e\n",
it+1, itertime.seconds, fit, fit - oldfit);
if(opts[SPLATT_OPTION_VERBOSITY] > SPLATT_VERBOSITY_LOW) {
for(idx_t m=0; m < nmodes; ++m) {
printf(" mode = %1"SPLATT_PF_IDX" (%0.3fs)\n", m+1,
modetime[m].seconds);
}
}
}
if(it > 0 && fabs(fit - oldfit) < opts[SPLATT_OPTION_TOLERANCE]) {
break;
}
oldfit = fit;
}
timer_stop(&timers[TIMER_CPD]);
if(rinfo->rank == 0 &&
opts[SPLATT_OPTION_VERBOSITY] > SPLATT_VERBOSITY_NONE) {
printf("Final fit: %0.5f\n", fit);
}
/* POST PROCESSING */
/* normalize each mat and adjust lambda */
val_t * tmp = (val_t *) splatt_malloc(nfactors * sizeof(val_t));
for(idx_t m=0; m < nmodes; ++m) {
mat_normalize_mpi(globmats[m], tmp, rinfo->comm_3d);
for(idx_t f=0; f < nfactors; ++f) {
lambda[f] *= tmp[f];
}
}
free(tmp);
/* CLEAN UP */
splatt_mttkrp_free_ws(mttkrp_ws);
for(idx_t m=0; m < nmodes; ++m) {
mat_free(aTa[m]);
}
mat_free(aTa[MAX_NMODES]);
thd_free(thds, nthreads);
if(rinfo->decomp != SPLATT_DECOMP_COARSE) {
mat_free(m1ptr);
}
free(local2nbr_buf);
free(nbr2globs_buf);
mpi_time_stats(rinfo);
return fit;
}
void mpi_update_rows(
idx_t const * const indmap,
val_t * const nbr2globs_buf,
val_t * const nbr2local_buf,
matrix_t * const localmat,
matrix_t * const globalmat,
rank_info * const rinfo,
idx_t const nfactors,
idx_t const mode,
splatt_comm_type const which)
{
timer_start(&timers[TIMER_MPI_UPDATE]);
switch(which) {
case SPLATT_COMM_POINT2POINT:
p_update_rows_point2point(nbr2globs_buf, nbr2local_buf, localmat,
globalmat, rinfo, nfactors, mode);
break;
case SPLATT_COMM_ALL2ALL:
p_update_rows_all2all(nbr2globs_buf, nbr2local_buf, localmat, globalmat,
rinfo, nfactors, mode);
break;
}
/* ensure the local matrix is up to date too */
p_flush_glob_to_local(indmap, localmat, globalmat, rinfo, nfactors, mode);
timer_stop(&timers[TIMER_MPI_UPDATE]);
}
void mpi_reduce_rows(
val_t * const restrict local2nbr_buf,
val_t * const restrict nbr2globs_buf,
matrix_t const * const localmat,
matrix_t * const globalmat,
rank_info * const rinfo,
idx_t const nfactors,
idx_t const mode,
splatt_comm_type const which)
{
timer_start(&timers[TIMER_MPI_REDUCE]);
switch(which) {
case SPLATT_COMM_POINT2POINT:
p_reduce_rows_point2point(local2nbr_buf, nbr2globs_buf, localmat,
globalmat, rinfo, nfactors, mode);
break;
case SPLATT_COMM_ALL2ALL:
p_reduce_rows_all2all(local2nbr_buf, nbr2globs_buf, localmat, globalmat,
rinfo, nfactors, mode);
break;
}
timer_stop(&timers[TIMER_MPI_REDUCE]);
}
void mpi_add_my_partials(
idx_t const * const indmap,
matrix_t const * const localmat,
matrix_t * const globmat,
rank_info const * const rinfo,
idx_t const nfactors,
idx_t const mode)
{
timer_start(&timers[TIMER_MPI_PARTIALS]);
idx_t const m = mode;
idx_t const mat_start = rinfo->mat_start[m];
idx_t const mat_end = rinfo->mat_end[m];
idx_t const start = rinfo->ownstart[m];
idx_t const nowned = rinfo->nowned[m];
memset(globmat->vals, 0, globmat->I * nfactors * sizeof(val_t));
idx_t const goffset = (indmap == NULL) ?
start - mat_start : indmap[start] - mat_start;
par_memcpy(globmat->vals + (goffset * nfactors),
localmat->vals + (start * nfactors),
nowned * nfactors * sizeof(val_t));
timer_stop(&timers[TIMER_MPI_PARTIALS]);
}
void mpi_time_stats(
rank_info const * const rinfo)
{
double max_mttkrp, avg_mttkrp;
double max_mpi, avg_mpi;
double max_idle, avg_idle;
double max_com, avg_com;
timers[TIMER_MPI].seconds =
timers[TIMER_MPI_ATA].seconds
+ timers[TIMER_MPI_REDUCE].seconds
+ timers[TIMER_MPI_PARTIALS].seconds
+ timers[TIMER_MPI_NORM].seconds
+ timers[TIMER_MPI_UPDATE].seconds
+ timers[TIMER_MPI_FIT].seconds;
/* get avg times */
MPI_Reduce(&timers[TIMER_MTTKRP].seconds, &avg_mttkrp, 1, MPI_DOUBLE,
MPI_SUM, 0, rinfo->comm_3d);
MPI_Reduce(&timers[TIMER_MPI].seconds, &avg_mpi, 1, MPI_DOUBLE,
MPI_SUM, 0, rinfo->comm_3d);
MPI_Reduce(&timers[TIMER_MPI_IDLE].seconds, &avg_idle, 1, MPI_DOUBLE,
MPI_SUM, 0, rinfo->comm_3d);
MPI_Reduce(&timers[TIMER_MPI_COMM].seconds, &avg_com, 1, MPI_DOUBLE,
MPI_SUM, 0, rinfo->comm_3d);
/* get max times */
MPI_Reduce(&timers[TIMER_MTTKRP].seconds, &max_mttkrp, 1, MPI_DOUBLE,
MPI_MAX, 0, rinfo->comm_3d);
MPI_Reduce(&timers[TIMER_MPI].seconds, &max_mpi, 1, MPI_DOUBLE,
MPI_MAX, 0, rinfo->comm_3d);
MPI_Reduce(&timers[TIMER_MPI_IDLE].seconds, &max_idle, 1, MPI_DOUBLE,
MPI_MAX, 0, rinfo->comm_3d);
MPI_Reduce(&timers[TIMER_MPI_COMM].seconds, &max_com, 1, MPI_DOUBLE,
MPI_MAX, 0, rinfo->comm_3d);
/* set avg times */
timers[TIMER_MTTKRP].seconds = avg_mttkrp / rinfo->npes;
timers[TIMER_MPI].seconds = avg_mpi / rinfo->npes;
timers[TIMER_MPI_IDLE].seconds = avg_idle / rinfo->npes;
timers[TIMER_MPI_COMM].seconds = avg_com / rinfo->npes;
/* set max times */
timers[TIMER_MTTKRP_MAX].seconds = max_mttkrp;
timers[TIMER_MPI_MAX].seconds = max_mpi;
timers[TIMER_MPI_IDLE_MAX].seconds = max_idle;
timers[TIMER_MPI_COMM_MAX].seconds = max_com;
}
|
flexProxDualFrobenius.h | #ifndef flexProxDualFrobenius_H
#define flexProxDualFrobenius_H
#include "flexProx.h"
//! represents prox for a Frobenius term
/*!
\f$ \alpha\|\cdot\|_{F} \f$
*/
template<typename T>
class flexProxDualFrobenius : public flexProx<T>
{
#ifdef __CUDACC__
typedef thrust::device_vector<T> Tdata;
#else
typedef std::vector<T> Tdata;
#endif
public:
flexProxDualFrobenius() : flexProx<T>(dualFrobeniusProx)
{
}
~flexProxDualFrobenius()
{
if (VERBOSE > 0) printf("Destructor prox\n!");
}
#ifdef __CUDACC__
struct flexFrobeniusSquareFunctor
{
__host__ __device__
flexFrobeniusSquareFunctor(){};
__host__ __device__ T
operator()(const T& x) const
{
return x * x;
}
};
struct flexProxDualFrobeniusFunctor
{
__host__ __device__
flexProxDualFrobeniusFunctor(T _norm) : norm(_norm){};
template <typename Tuple>
__host__ __device__
void operator()(Tuple t)
{
thrust::get<0>(t) = this->norm * thrust::get<1>(t);
}
const T norm;
};
#endif
void applyProx(T alpha, flexBoxData<T>* data, const std::vector<int> &dualNumbers, const std::vector<int> &primalNumbers)
{
#ifdef __CUDACC__
flexFrobeniusSquareFunctor unary_op;
thrust::plus<T> binary_op;
T norm = (T)0;
for (int k = 0; k < dualNumbers.size(); k++)
{
//add sum of squared elements to norm
norm += thrust::transform_reduce(data->yTilde[dualNumbers[k]].begin(), data->yTilde[dualNumbers[k]].end(), unary_op, (T)0, binary_op);
}
norm = (T)1 / std::max((T)1, std::sqrt(norm) / alpha);
for (int k = 0; k < dualNumbers.size(); k++)
{
auto startIterator = thrust::make_zip_iterator(thrust::make_tuple(data->y[dualNumbers[k]].begin(), data->yTilde[dualNumbers[k]].begin()));
auto endIterator = thrust::make_zip_iterator( thrust::make_tuple(data->y[dualNumbers[k]].end(), data->yTilde[dualNumbers[k]].end()));
thrust::for_each(startIterator,endIterator,flexProxDualFrobeniusFunctor(norm));
}
#else
T norm = (T)0;
for (int k = 0; k < dualNumbers.size(); k++)
{
T* ptrYTilde = data->yTilde[dualNumbers[k]].data();
int numElements = (int)data->yTilde[dualNumbers[k]].size();
#pragma omp parallel for reduction(+: norm)
for (int i = 0; i < numElements; i++)
{
norm += ptrYTilde[i] * ptrYTilde[i];
}
}
norm = (T)1 / std::max((T)1, std::sqrt(norm) / alpha);
for (int k = 0; k < dualNumbers.size(); k++)
{
T* ptrY = data->y[dualNumbers[k]].data();
T* ptrYTilde = data->yTilde[dualNumbers[k]].data();
int numElements = (int)data->yTilde[dualNumbers[k]].size();
#pragma omp parallel for
for (int i = 0; i < numElements; i++)
{
ptrY[i] = ptrYTilde[i] * norm;
}
}
#endif
}
void applyProx(T alpha, flexBoxData<T>* data, const std::vector<int> &dualNumbers, const std::vector<int> &primalNumbers, std::vector<Tdata> &fList)
{
}
};
#endif
|
target_update.c | // --------------------------------------------------
// Check 'to'
// --------------------------------------------------
// RUN: %libomptarget-compile-aarch64-unknown-linux-gnu \
// RUN: -fopenmp-version=51 -DCLAUSE=to
// RUN: %libomptarget-run-fail-aarch64-unknown-linux-gnu 2>&1 \
// RUN: | %fcheck-aarch64-unknown-linux-gnu
// RUN: %libomptarget-compile-powerpc64-ibm-linux-gnu \
// RUN: -fopenmp-version=51 -DCLAUSE=to
// RUN: %libomptarget-run-fail-powerpc64-ibm-linux-gnu 2>&1 \
// RUN: | %fcheck-powerpc64-ibm-linux-gnu
// RUN: %libomptarget-compile-powerpc64le-ibm-linux-gnu \
// RUN: -fopenmp-version=51 -DCLAUSE=to
// RUN: %libomptarget-run-fail-powerpc64le-ibm-linux-gnu 2>&1 \
// RUN: | %fcheck-powerpc64le-ibm-linux-gnu
// RUN: %libomptarget-compile-x86_64-pc-linux-gnu \
// RUN: -fopenmp-version=51 -DCLAUSE=to
// RUN: %libomptarget-run-fail-x86_64-pc-linux-gnu 2>&1 \
// RUN: | %fcheck-x86_64-pc-linux-gnu
// --------------------------------------------------
// Check 'from'
// --------------------------------------------------
// RUN: %libomptarget-compile-aarch64-unknown-linux-gnu \
// RUN: -fopenmp-version=51 -DCLAUSE=from
// RUN: %libomptarget-run-fail-aarch64-unknown-linux-gnu 2>&1 \
// RUN: | %fcheck-aarch64-unknown-linux-gnu
// RUN: %libomptarget-compile-powerpc64-ibm-linux-gnu \
// RUN: -fopenmp-version=51 -DCLAUSE=from
// RUN: %libomptarget-run-fail-powerpc64-ibm-linux-gnu 2>&1 \
// RUN: | %fcheck-powerpc64-ibm-linux-gnu
// RUN: %libomptarget-compile-powerpc64le-ibm-linux-gnu \
// RUN: -fopenmp-version=51 -DCLAUSE=from
// RUN: %libomptarget-run-fail-powerpc64le-ibm-linux-gnu 2>&1 \
// RUN: | %fcheck-powerpc64le-ibm-linux-gnu
// RUN: %libomptarget-compile-x86_64-pc-linux-gnu \
// RUN: -fopenmp-version=51 -DCLAUSE=from
// RUN: %libomptarget-run-fail-x86_64-pc-linux-gnu 2>&1 \
// RUN: | %fcheck-x86_64-pc-linux-gnu
#include <stdio.h>
int main() {
int i;
// CHECK: addr=0x[[#%x,HOST_ADDR:]], size=[[#%u,SIZE:]]
fprintf(stderr, "addr=%p, size=%ld\n", &i, sizeof i);
// CHECK-NOT: Libomptarget
#pragma omp target enter data map(alloc: i)
#pragma omp target update CLAUSE(present: i)
#pragma omp target exit data map(delete: i)
// CHECK: i is present
fprintf(stderr, "i is present\n");
// CHECK: Libomptarget message: device mapping required by 'present' motion modifier does not exist for host address 0x{{0*}}[[#HOST_ADDR]] ([[#SIZE]] bytes)
// CHECK: Libomptarget fatal error 1: failure of target construct while offloading is mandatory
#pragma omp target update CLAUSE(present: i)
// CHECK-NOT: i is present
fprintf(stderr, "i is present\n");
return 0;
}
|
9625.c |
/*
* Compile using the command:
* `cc 27Stencil.c -o oa -fopenmp -lm`
*/
#include <math.h>
#include <omp.h>
#include <stdint.h>
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
#ifdef _OPENACC
#include <openacc.h>
#endif
#define DEFAULT_DATASIZE 1048576 /* Default datasize. */
#define DEFAULT_REPS 10 /* Default repetitions. */
#define CONF95 1.96
#define ITERATIONS 10
#define FAC (1./26)
#define TOLERANCE 1.0e-15
extern int reps; /* Repetitions. */
extern double *times; /* Array to store results in. */
extern int flag; /* Flag to set CPU or GPU invocation. */
extern unsigned int datasize; /* Datasize passed to benchmark functions. */
unsigned int datasize = -1; /* Datasize for tests in bytes. */
int reps = -1; /* Repetitions. */
double *times; /* Array of doubles storing the benchmark times in microseconds. */
double testtime; /* The average test time in microseconds for reps runs. */
double testsd; /* The standard deviation in the test time in microseconds for reps runs. */
int flag = 0; /* 0 indicates CPU. */
/*
* Function prototypes for common functions.
*/
void init(int argc, char **argv);
void finalisetest(char *);
void finalise(void);
void benchmark(char *, double (*test)(void));
void print_results(char *, double, double);
/* Forward Declarations of utility functions*/
double max_diff(double *, double *, int);
void wul();
void usage(char *argv[]) {
printf("Usage: %s \n"
"\t--reps <repetitions> (default %d)\n"
"\t--datasize <datasize> (default %d bytes)\n",
argv[0],
DEFAULT_REPS, DEFAULT_DATASIZE);
}
/*
* This function parses the parameters from the command line.
*/
void parse_args(int argc, char *argv[]) {
int arg;
for (arg = 1; arg < argc; arg++) {
if (strcmp(argv[arg], "--reps") == 0) {
reps = atoi(argv[++arg]);
if (reps == 0) {
printf("Invalid integer:--reps: %s\n", argv[arg]);
usage(argv);
exit(EXIT_FAILURE);
}
} else if (strcmp(argv[arg], "--datasize") == 0) {
datasize = atoi(argv[++arg]);
if (datasize == 0) {
printf("Invalid integer:--datasize: %s\n", argv[arg]);
usage(argv);
exit(EXIT_FAILURE);
}
} else if (strcmp(argv[arg], "-h") == 0) {
usage(argv);
exit(EXIT_SUCCESS);
} else {
printf("Invalid parameters: %s\n", argv[arg]);
usage(argv);
exit(EXIT_FAILURE);
}
}
}
void stats(double *mtp, double *sdp) {
double meantime, totaltime, sumsq, mintime, maxtime, sd;
int i, good_reps;
mintime = 1.0e10;
maxtime = 0.;
totaltime = 0.;
good_reps = 0;
for (i = 0; i < reps; i++) {
/* Skip entries where times is 0, this indicates an error occured */
if (times[i] != 0){
mintime = (mintime < times[i]) ? mintime : times[i];
maxtime = (maxtime > times[i]) ? maxtime : times[i];
totaltime += times[i];
good_reps++;
}
}
meantime = totaltime / good_reps;
sumsq = 0;
for (i = 0; i < reps; i++) {
if (times[i] != 0){
sumsq += (times[i] - meantime) * (times[i] - meantime);
}
}
sd = sqrt(sumsq / good_reps);
*mtp = meantime;
*sdp = sd;
}
/*
* This function prints the results of the tests.
* If you use a compiler which sets a different preprocessor flag
* you may wish to add it here.
*/
void print_results(char *name, double testtime, double testsd) {
char compiler[20];
/* Set default compiler idetifier. */
sprintf(compiler, "COMPILER");
/* Set compiler identifier based on known preprocessor flags. */
#ifdef __PGI
sprintf(compiler, "PGI");
#endif
#ifdef __HMPP
sprintf(compiler, "CAPS");
#endif
//printf("%s %s %d %f %f\n", compiler, name, datasize, testtime*1e6, CONF95*testsd*1e6);
printf("%f\n", testtime*1e6);
}
/*
* This function initialises the storage for the test results and set the defaults.
*/
void init(int argc, char **argv)
{
parse_args(argc, argv);
if (reps == -1) {
reps = DEFAULT_REPS;
}
if (datasize == (unsigned int)-1) {
datasize = DEFAULT_DATASIZE;
}
times = (double *)malloc((reps) * sizeof(double));
/*
#ifdef __PGI
acc_init(acc_device_nvidia);
// printf("PGI INIT\n");
#endif
#ifdef __HMPP
int a[5] = {1,2,3,4,5};
#pragma acc data copyin(a[0:5])
{}
#endif
#ifdef _CRAYC
int a[5] = {1,2,3,4,5};
#pragma acc data copyin(a[0:5])
{}
#endif
*/
}
void finalise(void) {
free(times);
}
/*
* This function runs the benchmark specified.
*/
void benchmark(char *name, double (*test)(void))
{
int i = 0;
double tmp = 0;
for (i=0; i<reps; i++) {
tmp = test();
if (tmp == -10000){
printf("Memory allocation failure in %s\n", name);
times[i] = 0;
}
else if (tmp == -11000){
printf("CPU/GPU mismatch in %s\n", name);
times[i] = 0;
}
else{
times[i] = tmp;
}
}
stats(&testtime, &testsd);
//printf("in benchmark\n");
print_results(name, testtime, testsd);
//printf("printed result\n");
}
double stencil()
{
extern unsigned int datasize;
int sz = cbrt((datasize/sizeof(double))/2);
int i, j, k, iter;
int n = sz-2;
double fac = FAC;
double t1, t2;
double md;
//printf("size = %d\n", sz);
/* Work buffers, with halos */
double *a0 = (double*)malloc(sizeof(double)*sz*sz*sz);
double *device_result = (double*)malloc(sizeof(double)*sz*sz*sz);
double *a1 = (double*)malloc(sizeof(double)*sz*sz*sz);
double *host_result = (double*)malloc(sizeof(double)*sz*sz*sz);
double *a0_init = (double*)malloc(sizeof(double)*sz*sz*sz);
if(a0==NULL||device_result==NULL||a1==NULL||host_result==NULL||a0_init==NULL){
/* Something went wrong in the memory allocation here, fail gracefully */
return(-10000);
}
/* initialize input array a0 */
/* zero all of array (including halos) */
//printf("size = %d\n", sz);
for (i = 0; i < sz; i++) {
for (j = 0; j < sz; j++) {
for (k = 0; k < sz; k++) {
a0[i*sz*sz+j*sz+k] = 0.0;
//printf("%d\t", (i*sz*sz+j*sz+k));
}
}
}
//printf("\n");
//int size_of_a0 = sizeof(a0) / sizeof(*a0);
//printf("size of a0 = %d\n", size_of_a0);
/* use random numbers to fill interior */
for (i = 1; i < n+1; i++) {
for (j = 1; j < n+1; j++) {
for (k = 1; k < n+1; k++) {
a0[i*sz*sz+j*sz+k] = (double) rand()/ (double)(1.0 + RAND_MAX);
}
}
}
/* memcpy(&a0_init[0], &a0[0], sizeof(double)*sz*sz*sz); */
/* save initial input array for later GPU run */
for (i = 0; i < sz; i++) {
for (j = 0; j < sz; j++) {
for (k = 0; k < sz; k++) {
a0_init[i*sz*sz+j*sz+k] = a0[i*sz*sz+j*sz+k];
}
}
}
//printf("Host computation\n");
/* run main computation on host */
for (iter = 0; iter < ITERATIONS; iter++) {
for (i = 1; i < n+1; i++) {
for (j = 1; j < n+1; j++) {
for (k = 1; k < n+1; k++) {
a1[i*sz*sz+j*sz+k] = (
a0[i*sz*sz+(j-1)*sz+k] + a0[i*sz*sz+(j+1)*sz+k] +
a0[(i-1)*sz*sz+j*sz+k] + a0[(i+1)*sz*sz+j*sz+k] +
a0[(i-1)*sz*sz+(j-1)*sz+k] + a0[(i-1)*sz*sz+(j+1)*sz+k] +
a0[(i+1)*sz*sz+(j-1)*sz+k] + a0[(i+1)*sz*sz+(j+1)*sz+k] +
a0[i*sz*sz+(j-1)*sz+(k-1)] + a0[i*sz*sz+(j+1)*sz+(k-1)] +
a0[(i-1)*sz*sz+j*sz+(k-1)] + a0[(i+1)*sz*sz+j*sz+(k-1)] +
a0[(i-1)*sz*sz+(j-1)*sz+(k-1)] + a0[(i-1)*sz*sz+(j+1)*sz+(k-1)] +
a0[(i+1)*sz*sz+(j-1)*sz+(k-1)] + a0[(i+1)*sz*sz+(j+1)*sz+(k-1)] +
a0[i*sz*sz+(j-1)*sz+(k+1)] + a0[i*sz*sz+(j+1)*sz+(k+1)] +
a0[(i-1)*sz*sz+j*sz+(k+1)] + a0[(i+1)*sz*sz+j*sz+(k+1)] +
a0[(i-1)*sz*sz+(j-1)*sz+(k+1)] + a0[(i-1)*sz*sz+(j+1)*sz+(k+1)] +
a0[(i+1)*sz*sz+(j-1)*sz+(k+1)] + a0[(i+1)*sz*sz+(j+1)*sz+(k+1)] +
a0[i*sz*sz+j*sz+(k-1)] + a0[i*sz*sz+j*sz+(k+1)]
) * fac;
}
}
}
for (i = 1; i < n+1; i++) {
for (j = 1; j < n+1; j++) {
for (k = 1; k < n+1; k++) {
a0[i*sz*sz+j*sz+k] = a1[i*sz*sz+j*sz+k];
}
}
}
} /* end iteration loop */
/* save result */
/* memcpy(&host_result[0], &a0[0], sizeof(double)*sz*sz*sz); */
for (i = 0; i < sz; i++) {
for (j = 0; j < sz; j++) {
for (k = 0; k < sz; k++) {
host_result[i*sz*sz+j*sz+k] = a0[i*sz*sz+j*sz+k];
// printf("%lf\t", a0[i*sz*sz+j*sz+k]);
}
}
}
//int size = sizeof(host_result)/sizeof(host_result[0]);
//for(i = 0; i < size; i++) {
// printf("%lf\t", host_result[i]);
//}
//printf("\n");
/* copy initial array back to a0 */
/* memcpy(&a0[0], &a0_init[0], sizeof(double)*sz*sz*sz); */
for (i = 0; i < sz; i++) {
for (j = 0; j < sz; j++) {
for (k = 0; k < sz; k++) {
a0[i*sz*sz+j*sz+k] = a0_init[i*sz*sz+j*sz+k];
}
}
}
//printf("Starting acc pragma code\n");
t1 = omp_get_wtime();
#pragma acc data copy(a0[0:sz*sz*sz]), create(a1[0:sz*sz*sz], i,j,k,iter), copyin(sz,fac,n)
{
for (iter = 0; iter < ITERATIONS; iter++) {
#pragma omp parallel for
for (i = 1; i < n+1; i++) {
#pragma omp parallel for
for (j = 1; j < n+1; j++) {
#pragma omp parallel for
for (k = 1; k < n+1; k++) {
a1[i*sz*sz+j*sz+k] = (
a0[i*sz*sz+(j-1)*sz+k] + a0[i*sz*sz+(j+1)*sz+k] +
a0[(i-1)*sz*sz+j*sz+k] + a0[(i+1)*sz*sz+j*sz+k] +
a0[(i-1)*sz*sz+(j-1)*sz+k] + a0[(i-1)*sz*sz+(j+1)*sz+k] +
a0[(i+1)*sz*sz+(j-1)*sz+k] + a0[(i+1)*sz*sz+(j+1)*sz+k] +
a0[i*sz*sz+(j-1)*sz+(k-1)] + a0[i*sz*sz+(j+1)*sz+(k-1)] +
a0[(i-1)*sz*sz+j*sz+(k-1)] + a0[(i+1)*sz*sz+j*sz+(k-1)] +
a0[(i-1)*sz*sz+(j-1)*sz+(k-1)] + a0[(i-1)*sz*sz+(j+1)*sz+(k-1)] +
a0[(i+1)*sz*sz+(j-1)*sz+(k-1)] + a0[(i+1)*sz*sz+(j+1)*sz+(k-1)] +
a0[i*sz*sz+(j-1)*sz+(k+1)] + a0[i*sz*sz+(j+1)*sz+(k+1)] +
a0[(i-1)*sz*sz+j*sz+(k+1)] + a0[(i+1)*sz*sz+j*sz+(k+1)] +
a0[(i-1)*sz*sz+(j-1)*sz+(k+1)] + a0[(i-1)*sz*sz+(j+1)*sz+(k+1)] +
a0[(i+1)*sz*sz+(j-1)*sz+(k+1)] + a0[(i+1)*sz*sz+(j+1)*sz+(k+1)] +
a0[i*sz*sz+j*sz+(k-1)] + a0[i*sz*sz+j*sz+(k+1)]
) * fac;
}
}
}
#pragma acc parallel loop
for (i = 1; i < n+1; i++) {
#pragma acc loop
for (j = 1; j < n+1; j++) {
#pragma acc loop
for (k = 1; k < n+1; k++) {
a0[i*sz*sz+j*sz+k] = a1[i*sz*sz+j*sz+k];
}
}
}
} /* end iteration loop */
} /* end data region */
#pragma acc wait
t2 = omp_get_wtime();
memcpy(&device_result[0], &a0[0], sizeof(double)*sz*sz*sz);
md = max_diff(&host_result[0],&device_result[0], sz);
/* Free malloc'd memory to prevent leaks */
free(a0);
free(a0_init);
free(a1);
free(host_result);
free(device_result);
//printf("md: %lf \t tolerance: %lf", md, TOLERANCE);
if (md < TOLERANCE ){
//printf ("GPU matches host to within tolerance of %1.1e\n\n", TOLERANCE);
return(t2 - t1);
}
else{
// printf ("WARNING: GPU does not match to within tolerance of %1.1e\nIt is %lf\n", TOLERANCE, md);
return(-11000);
}
}
/* Utility Functions */
double max_diff(double *array1,double *array2, int sz)
{
double tmpdiff, diff;
int i,j,k;
int n = sz-2;
diff=0.0;
for (i = 1; i < n+1; i++) {
for (j = 1; j < n+1; j++) {
for (k = 1; k < n+1; k++) {
tmpdiff = fabs(array1[i*sz*sz+j*sz+k] - array2[i*sz*sz+j*sz+k]);
//printf("diff: %lf", tmpdiff);
if (tmpdiff > diff) diff = tmpdiff;
}
}
}
return diff;
}
/*
* This function ensures the device is awake.
* It is more portable than acc_init().
*/
void wul(){
int data = 8192;
double *arr_a = (double *)malloc(sizeof(double) * data);
double *arr_b = (double *)malloc(sizeof(double) * data);
int i = 0;
if (arr_a==NULL||arr_b==NULL) {
printf("Unable to allocate memory in wul.\n");
}
for (i=0;i<data;i++){
arr_a[i] = (double) (rand()/(1.0+RAND_MAX));
}
#pragma acc data copy(arr_b[0:data]), copyin(arr_a[0:data])
{
#pragma acc parallel loop
for (i=0;i<data;i++){
arr_b[i] = arr_a[i] * 2;
}
}
if (arr_a[0] < 0){
printf("Error in WUL\n");
/*
* This should never be called as rands should be in the range (0,1].
* This stops clever optimizers.
*/
}
free(arr_a);
free(arr_b);
}
int main(int argc, char **argv) {
char testName[32];
//printf("compiler name datasize testtime*1e6 CONF95*testsd*1e6\n");
/* Initialise storage for test results & parse input arguements. */
init(argc, argv);
/* Ensure device is awake. */
wul();
sprintf(testName, "27S");
benchmark(testName, &stencil);
/* Print results & free results storage */
finalise();
return EXIT_SUCCESS;
}
|
ParticleContainer.h | /**
* @file ParticleContainer.h
*
* @date 17 Jan 2018
* @author tchipevn
*/
#pragma once
#include <array>
#include "autopas/containers/ParticleContainerInterface.h"
#include "autopas/containers/TraversalInterface.h"
#ifdef AUTOPAS_OPENMP
#include <omp.h>
#endif
namespace autopas {
// consider multiple inheritance or delegation to avoid virtual call to Functor
/**
* The ParticleContainer class stores particles in some object and provides
* methods to iterate over its particles.
* @tparam Particle Class for particles
* @tparam ParticleCell Class for the particle cells
*/
template <class Particle, class ParticleCell, class SoAArraysType = typename Particle::SoAArraysType>
class ParticleContainer : public ParticleContainerInterface<Particle, ParticleCell> {
public:
/**
* Type of the Particle.
*/
typedef Particle ParticleType;
/**
* Type of the ParticleCell.
*/
typedef ParticleCell ParticleCellType;
/**
* Constructor of ParticleContainer
* @param boxMin
* @param boxMax
* @param cutoff
* @param skin
*/
ParticleContainer(const std::array<double, 3> boxMin, const std::array<double, 3> boxMax, const double cutoff,
const double skin)
: _cells(), _boxMin(boxMin), _boxMax(boxMax), _cutoff(cutoff), _skin(skin) {}
/**
* destructor of ParticleContainer
*/
~ParticleContainer() override = default;
/**
* Delete the copy constructor to prevent unwanted copies.
* No particle container should ever be copied.
* @param obj
*/
ParticleContainer(const ParticleContainer &obj) = delete;
/**
* Delete the copy assignment operator to prevent unwanted copies
* No particle container should ever be copied.
* @param other
* @return
*/
ParticleContainer &operator=(const ParticleContainer &other) = delete;
/**
* @copydoc autopas::ParticleContainerInterface::getBoxMax()
*/
const std::array<double, 3> &getBoxMax() const override final { return _boxMax; }
/**
* @copydoc autopas::ParticleContainerInterface::setBoxMax()
*/
void setBoxMax(const std::array<double, 3> &boxMax) override final { _boxMax = boxMax; }
/**
* @copydoc autopas::ParticleContainerInterface::getBoxMin()
*/
const std::array<double, 3> &getBoxMin() const override final { return _boxMin; }
/**
* @copydoc autopas::ParticleContainerInterface::setBoxMin()
*/
void setBoxMin(const std::array<double, 3> &boxMin) override final { _boxMin = boxMin; }
/**
* @copydoc autopas::ParticleContainerInterface::getCutoff()
*/
double getCutoff() const override final { return _cutoff; }
/**
* @copydoc autopas::ParticleContainerInterface::setCutoff()
*/
void setCutoff(double cutoff) override final { _cutoff = cutoff; }
/**
* @copydoc autopas::ParticleContainerInterface::getSkin()
*/
double getSkin() const override final { return _skin; }
/**
* @copydoc autopas::ParticleContainerInterface::setSkin()
*/
void setSkin(double skin) override final { _skin = skin; }
/**
* @copydoc autopas::ParticleContainerInterface::getInteractionLength()
*/
double getInteractionLength() const override final { return _cutoff + _skin; }
/**
* Checks if the given traversals are applicable to this container.
* @param traversalOptions
* @return True iff traversalOptions is a subset of _applicableTraversals
*/
bool checkIfTraversalsAreApplicable(std::set<TraversalOption> traversalOptions) {
auto applicableTraversals = compatibleTraversals::allCompatibleTraversals(this->getContainerType());
return std::includes(applicableTraversals.begin(), applicableTraversals.end(), traversalOptions.begin(),
traversalOptions.end());
}
/**
* Deletes all particles from the container.
*/
void deleteAllParticles() override {
#ifdef AUTOPAS_OPENMP
// @todo: find a sensible value for magic number
// numThreads should be at least 1 and maximal max_threads
int numThreads = std::max(1, std::min(omp_get_max_threads(), (int)(this->_cells.size() / 1000)));
AutoPasLog(trace, "Using {} threads", numThreads);
#pragma omp parallel for num_threads(numThreads)
#endif
for (size_t i = 0; i < this->_cells.size(); ++i) {
this->_cells[i].clear();
}
}
/**
* Get the number of particles saved in the container.
* @return Number of particles in the container.
*/
unsigned long getNumParticles() override {
size_t numParticles = 0ul;
#ifdef AUTOPAS_OPENMP
// @todo: find a sensible value for magic number
// numThreads should be at least 1 and maximal max_threads
int numThreads = std::max(1, std::min(omp_get_max_threads(), (int)(this->_cells.size() / 1000)));
AutoPasLog(trace, "Using {} threads", numThreads);
#pragma omp parallel for num_threads(numThreads) reduction(+ : numParticles)
#endif
for (size_t index = 0; index < _cells.size(); ++index) {
numParticles += _cells[index].numParticles();
}
return numParticles;
}
protected:
/**
* Vector of particle cells.
* All particle containers store their particles in ParticleCells. This is the
* common vector for this purpose.
*/
std::vector<ParticleCell> _cells;
private:
std::array<double, 3> _boxMin;
std::array<double, 3> _boxMax;
double _cutoff;
double _skin;
};
} // namespace autopas
|
compiler_cgen.c | /* Generated by Nim Compiler v0.15.0 */
/* (c) 2016 Andreas Rumpf */
/* The generated code is subject to the original license. */
#define NIM_INTBITS 32
#include "nimbase.h"
#include <string.h>
typedef struct Tcgen529027 Tcgen529027;
typedef struct TNimType TNimType;
typedef struct TNimNode TNimNode;
typedef struct Ropeobj178006 Ropeobj178006;
typedef struct NimStringDesc NimStringDesc;
typedef struct TGenericSeq TGenericSeq;
typedef struct Cell47705 Cell47705;
typedef struct Cellseq47721 Cellseq47721;
typedef struct Gcheap50218 Gcheap50218;
typedef struct Gcstack50216 Gcstack50216;
typedef struct Memregion29885 Memregion29885;
typedef struct Smallchunk29839 Smallchunk29839;
typedef struct Llchunk29879 Llchunk29879;
typedef struct Bigchunk29841 Bigchunk29841;
typedef struct Intset29814 Intset29814;
typedef struct Trunk29810 Trunk29810;
typedef struct Avlnode29883 Avlnode29883;
typedef struct Gcstat50214 Gcstat50214;
typedef struct Cellset47717 Cellset47717;
typedef struct Pagedesc47713 Pagedesc47713;
typedef struct Ttypeseq292836 Ttypeseq292836;
typedef struct Ttype292840 Ttype292840;
typedef struct Intset268030 Intset268030;
typedef struct Trunk268026 Trunk268026;
typedef struct Trunkseq268028 Trunkseq268028;
typedef struct Tpasscontext341002 Tpasscontext341002;
typedef struct Tsym292834 Tsym292834;
typedef struct Tidobj199004 Tidobj199004;
typedef struct TNimObject TNimObject;
typedef struct TY292929 TY292929;
typedef struct Tstrtable292806 Tstrtable292806;
typedef struct Tsymseq292804 Tsymseq292804;
typedef struct Tident199010 Tident199010;
typedef struct Tlineinfo191336 Tlineinfo191336;
typedef struct Tnode292802 Tnode292802;
typedef struct Tloc292816 Tloc292816;
typedef struct Tlib292820 Tlib292820;
typedef struct TY529153 TY529153;
typedef struct TY203018 TY203018;
typedef struct Tidtable292850 Tidtable292850;
typedef struct Tidpairseq292848 Tidpairseq292848;
typedef struct Tlinkedlist147013 Tlinkedlist147013;
typedef struct Tlistentry147007 Tlistentry147007;
typedef struct Tcproc529021 Tcproc529021;
typedef struct Tnodetable292862 Tnodetable292862;
typedef struct Tnodepairseq292860 Tnodepairseq292860;
typedef struct Debuginfo203009 Debuginfo203009;
typedef struct TY203021 TY203021;
typedef struct TY203023 TY203023;
typedef struct Tnodeseq292796 Tnodeseq292796;
typedef struct TY191350 TY191350;
typedef struct TY529095 TY529095;
typedef struct Trodreader332021 Trodreader332021;
typedef struct TY292960 TY292960;
typedef struct TY203017 TY203017;
typedef struct Enumdesc203007 Enumdesc203007;
typedef struct Tinfocc273008 Tinfocc273008;
typedef struct Tblock529019 Tblock529019;
typedef struct Ttraversalclosure537019 Ttraversalclosure537019;
typedef struct TY135002 TY135002;
typedef struct Tbitset339004 Tbitset339004;
typedef struct TY191612 TY191612;
typedef struct Tfileinfo191334 Tfileinfo191334;
typedef struct Tinfoos176035 Tinfoos176035;
typedef struct Tinfocpu176476 Tinfocpu176476;
typedef struct Tstrentry147009 Tstrentry147009;
typedef struct TY128506 TY128506;
typedef struct Basechunk29837 Basechunk29837;
typedef struct Freecell29829 Freecell29829;
typedef struct Tinstantiation292824 Tinstantiation292824;
typedef struct Tidpair292846 Tidpair292846;
typedef struct Tnodepair292858 Tnodepair292858;
typedef struct Filenamemapping203005 Filenamemapping203005;
typedef struct TY332033 TY332033;
typedef struct Tindex332019 Tindex332019;
typedef struct Tiitable299142 Tiitable299142;
typedef struct Tiipairseq299140 Tiipairseq299140;
typedef struct Table332054 Table332054;
typedef struct Keyvaluepairseq332057 Keyvaluepairseq332057;
typedef struct Memfile330202 Memfile330202;
typedef struct TY292961 TY292961;
typedef struct Tiipair299138 Tiipair299138;
typedef struct Keyvaluepair332060 Keyvaluepair332060;
typedef NU8 Tnimkind3403;
typedef NU8 Tnimtypeflag3409Set;
typedef N_NIMCALL_PTR(void, TY3489) (void* p0, NI op0);
typedef N_NIMCALL_PTR(void*, TY3494) (void* p0);
struct TNimType {
NI size;
Tnimkind3403 kind;
Tnimtypeflag3409Set flags;
TNimType* base;
TNimNode* node;
void* finalizer;
TY3489 marker;
TY3494 deepcopy;
};
typedef NU8 Tnimnodekind3405;
struct TNimNode {
Tnimnodekind3405 kind;
NI offset;
TNimType* typ;
NCSTRING name;
NI len;
TNimNode** sons;
};
typedef N_NIMCALL_PTR(void, Globalmarkerproc56202) (void);
struct TGenericSeq {
NI len;
NI reserved;
};
struct NimStringDesc {
TGenericSeq Sup;
NIM_CHAR data[SEQ_DECL_SIZE];
};
struct Cell47705 {
NI refcount;
TNimType* typ;
};
struct Cellseq47721 {
NI len;
NI cap;
Cell47705** d;
};
typedef Smallchunk29839* TY29900[512];
typedef Trunk29810* Trunkbuckets29812[256];
struct Intset29814 {
Trunkbuckets29812 data;
};
struct Memregion29885 {
NI minlargeobj;
NI maxlargeobj;
TY29900 freesmallchunks;
Llchunk29879* llmem;
NI currmem;
NI maxmem;
NI freemem;
NI lastsize;
Bigchunk29841* freechunkslist;
Intset29814 chunkstarts;
Avlnode29883* root;
Avlnode29883* deleted;
Avlnode29883* last;
Avlnode29883* freeavlnodes;
NIM_BOOL locked;
};
struct Gcstat50214 {
NI stackscans;
NI cyclecollections;
NI maxthreshold;
NI maxstacksize;
NI maxstackcells;
NI cycletablesize;
NI64 maxpause;
};
struct Cellset47717 {
NI counter;
NI max;
Pagedesc47713* head;
Pagedesc47713** data;
};
struct Gcheap50218 {
Gcstack50216* stack;
void* stackbottom;
NI cyclethreshold;
Cellseq47721 zct;
Cellseq47721 decstack;
Cellseq47721 tempstack;
NI recgclock;
Memregion29885 region;
Gcstat50214 stat;
Cellset47717 marked;
Cellseq47721 additionalroots;
};
struct Intset268030 {
NI counter;
NI max;
Trunk268026* head;
Trunkseq268028* data;
};
struct TNimObject {
TNimType* m_type;
};
struct Tidobj199004 {
TNimObject Sup;
NI id;
};
typedef NU8 Tsymkind292435;
struct Tstrtable292806 {
NI counter;
Tsymseq292804* data;
};
typedef NU16 Tmagic292524;
struct Tlineinfo191336 {
NI16 line;
NI16 col;
NI32 fileindex;
};
typedef NU32 Tsymflag292184Set;
typedef NU32 Toption169009Set;
typedef NU8 Tlockind292808;
typedef NU8 Tstorageloc292812;
typedef NU16 Tlocflag292810Set;
struct Tloc292816 {
Tlockind292808 k;
Tstorageloc292812 s;
Tlocflag292810Set flags;
Ttype292840* t;
Ropeobj178006* r;
};
struct Tsym292834 {
Tidobj199004 Sup;
Tsymkind292435 kind;
union{
struct {Ttypeseq292836* typeinstcache;
} S1;
struct {TY292929* procinstcache;
Tsym292834* gcunsafetyreason;
} S2;
struct {TY292929* usedgenerics;
Tstrtable292806 tab;
} S3;
struct {Tsym292834* guard;
NI bitsize;
} S4;
} kindU;
Tmagic292524 magic;
Ttype292840* typ;
Tident199010* name;
Tlineinfo191336 info;
Tsym292834* owner;
Tsymflag292184Set flags;
Tnode292802* ast;
Toption169009Set options;
NI position;
NI offset;
Tloc292816 loc;
Tlib292820* annex;
Tnode292802* constraint;
};
struct TY203018 {
NimStringDesc* Field0;
NI Field1;
};
struct Tpasscontext341002 {
TNimObject Sup;
NIM_BOOL fromcache;
};
typedef Ropeobj178006* Tcfilesections529009[18];
typedef NU8 Codegenflag529025Set;
struct Tidtable292850 {
NI counter;
Tidpairseq292848* data;
};
struct Tlinkedlist147013 {
Tlistentry147007* head;
Tlistentry147007* tail;
NI counter;
};
struct Tnodetable292862 {
NI counter;
Tnodepairseq292860* data;
};
typedef Ropeobj178006* TY529136[10];
struct Tcgen529027 {
Tpasscontext341002 Sup;
Tcfilesections529009 s;
Codegenflag529025Set flags;
Tsym292834* module;
NimStringDesc* filename;
NimStringDesc* cfilename;
Ropeobj178006* tmpbase;
Tidtable292850 typecache;
Tidtable292850 forwtypecache;
Intset268030 declaredthings;
Intset268030 declaredprotos;
Tlinkedlist147013 headerfiles;
Intset268030 typeinfomarker;
Tcproc529021* initproc;
Tcproc529021* postinitproc;
Tcproc529021* preinitproc;
Ttypeseq292836* typestack;
Tnodetable292862 datacache;
Tsymseq292804* forwardedprocs;
NI typenodes;
NI nimtypes;
Ropeobj178006* typenodesname;
Ropeobj178006* nimtypesname;
NI labels;
TY529136 extensionloaders;
Ropeobj178006* injectstmt;
};
struct Debuginfo203009 {
NI version;
TY203021* files;
TY203023* enums;
NIM_BOOL conflicts;
};
struct Tident199010 {
Tidobj199004 Sup;
NimStringDesc* s;
Tident199010* next;
NI h;
};
struct Tcproc529021 {
Tsym292834* prc;
NIM_BOOL beforeretneeded;
NIM_BOOL threadvaraccessed;
Tlineinfo191336 lastlineinfo;
Tnodeseq292796* nestedtrystmts;
NI inexceptblock;
TY191350* finallysafepoints;
NI labels;
TY529095* blocks;
NI breakidx;
Toption169009Set options;
NI maxframelen;
Tcgen529027* module;
NI withinloop;
NI splitdecls;
NI gcframeid;
Ropeobj178006* gcframetype;
};
typedef NU8 Tsymflag292184;
typedef NU8 Codegenflag529025;
typedef NU8 Toption169009;
typedef NU64 Tglobaloption169013Set;
typedef NU8 Tglobaloption169013;
typedef NU8 Tcommands169076;
typedef NU16 Tnodeflag292427Set;
typedef NU8 Tnodekind292020;
struct Tnode292802 {
Ttype292840* typ;
Tlineinfo191336 info;
Tnodeflag292427Set flags;
Tnodekind292020 kind;
union{
struct {NI64 intval;
} S1;
struct {NF floatval;
} S2;
struct {NimStringDesc* strval;
} S3;
struct {Tsym292834* sym;
} S4;
struct {Tident199010* ident;
} S5;
struct {Tnodeseq292796* sons;
} S6;
} kindU;
NimStringDesc* comment;
};
typedef Ropeobj178006* TY533289[1];
typedef NU8 Tlocflag292810;
struct Tlistentry147007 {
TNimObject Sup;
Tlistentry147007* prev;
Tlistentry147007* next;
};
typedef NU8 Tlibkind292818;
struct Tlib292820 {
Tlistentry147007 Sup;
Tlibkind292818 kind;
NIM_BOOL generated;
NIM_BOOL isoverriden;
Ropeobj178006* name;
Tnode292802* path;
};
typedef NU8 Tcfilesection529005;
typedef NU8 Ttypekind292244;
typedef NU8 Tcallingconvention292002;
typedef NU32 Ttypeflag292431Set;
struct Ttype292840 {
Tidobj199004 Sup;
Ttypekind292244 kind;
Tcallingconvention292002 callconv;
Ttypeflag292431Set flags;
Ttypeseq292836* sons;
Tnode292802* n;
Tsym292834* owner;
Tsym292834* sym;
Tsym292834* destructor;
Tsym292834* deepcopy;
Tsym292834* assignment;
TY292960* methods;
NI64 size;
NI16 align;
NI16 locklevel;
Tloc292816 loc;
};
typedef Ropeobj178006* TY532811[2];
typedef NU8 Tctypekind529007;
typedef NU64 Ttypekind292244Set;
typedef NU8 Ttypeflag292431;
typedef NimStringDesc* TY533943[14];
typedef NU8 Tprefereddesc320011;
typedef Ropeobj178006* TY178507[1];
struct Enumdesc203007 {
NI size;
NU32 owner;
NI id;
NimStringDesc* name;
TY203017* values;
};
typedef Ropeobj178006* TY535235[4];
typedef NimStringDesc* TY292016[10];
typedef Ropeobj178006* TY535238[3];
struct Ropeobj178006 {
TNimObject Sup;
Ropeobj178006* left;
Ropeobj178006* right;
NI length;
NimStringDesc* data;
};
typedef NU8 Tinfoccprop273004Set;
struct Tinfocc273008 {
NimStringDesc* Field0;
NimStringDesc* Field1;
NimStringDesc* Field2;
NimStringDesc* Field3;
NimStringDesc* Field4;
NimStringDesc* Field5;
NimStringDesc* Field6;
NimStringDesc* Field7;
NimStringDesc* Field8;
NimStringDesc* Field9;
NimStringDesc* Field10;
NimStringDesc* Field11;
NimStringDesc* Field12;
NimStringDesc* Field13;
NimStringDesc* Field14;
NimStringDesc* Field15;
NimStringDesc* Field16;
NimStringDesc* Field17;
NimStringDesc* Field18;
NimStringDesc* Field19;
Tinfoccprop273004Set Field20;
};
typedef Tinfocc273008 TY273427[13];
typedef NU8 Tsystemcc273002;
typedef NU8 Tnodeflag292427;
typedef NU8 Tcprocsection529011;
typedef Ropeobj178006* Tcprocsections529013[3];
struct Tblock529019 {
NI id;
Ropeobj178006* label;
Tcprocsections529013 sections;
NIM_BOOL isloop;
NI16 nestedtrystmts;
NI16 nestedexceptstmts;
NI16 framelen;
};
typedef NU8 Tgcmode169080;
typedef NU8 Ttypeinforeason537016;
struct Ttraversalclosure537019 {
Tcproc529021* p;
NimStringDesc* visitorfrmt;
};
typedef NU8 Ttypefieldresult320145;
typedef NU8 Tinfoccprop273004;
typedef Ropeobj178006* TY536847[6];
typedef Ropeobj178006* TY536401[7];
typedef Ropeobj178006* TY536475[5];
typedef NU16 Tmsgkind191002;
typedef NU8 Tassignmentflag538302Set;
typedef NU8 Tassignmentflag538302;
typedef NimStringDesc* TY552655[19];
typedef NimStringDesc* TY551642[3];
typedef NimStringDesc* TY556764[4];
typedef NimStringDesc* TY551828[42];
typedef NimStringDesc* TY551281[7];
typedef NU8 Trenderflag311004Set;
typedef NimStringDesc* TY557052[2];
typedef NU8 Tclosuretypekind535679;
typedef NimStringDesc* TY556428[6];
typedef NU8 Tanalysisresult473003;
typedef NU8 char136Set[32];
typedef NU8 Tdistinctcompare324427;
typedef NU8 Ttypecmpflag324429Set;
typedef NU16 Tspecialword275003;
typedef NU8 Tsystemos176004;
struct Tfileinfo191334 {
NimStringDesc* fullpath;
NimStringDesc* projpath;
NimStringDesc* shortname;
Ropeobj178006* quotedname;
Ropeobj178006* quotedfullname;
TY191350* lines;
NimStringDesc* dirtyfile;
};
typedef NU8 Tinfoosprop176031Set;
struct Tinfoos176035 {
NimStringDesc* Field0;
NimStringDesc* Field1;
NimStringDesc* Field2;
NimStringDesc* Field3;
NimStringDesc* Field4;
NimStringDesc* Field5;
NimStringDesc* Field6;
NimStringDesc* Field7;
NimStringDesc* Field8;
NimStringDesc* Field9;
NimStringDesc* Field10;
NimStringDesc* Field11;
Tinfoosprop176031Set Field12;
};
typedef Tinfoos176035 TY176082[24];
typedef NU8 Tendian176474;
struct Tinfocpu176476 {
NimStringDesc* Field0;
NI Field1;
Tendian176474 Field2;
NI Field3;
NI Field4;
};
typedef Tinfocpu176476 TY176510[19];
typedef NU8 Tsystemcpu176452;
struct Tstrentry147009 {
Tlistentry147007 Sup;
NimStringDesc* data;
};
struct TY128506 {
NimStringDesc* Field0;
NimStringDesc* Field1;
NimStringDesc* Field2;
};
struct Gcstack50216 {
Gcstack50216* prev;
Gcstack50216* next;
void* starts;
void* pos;
NI maxstacksize;
};
struct Basechunk29837 {
NI prevsize;
NI size;
NIM_BOOL used;
};
struct Smallchunk29839 {
Basechunk29837 Sup;
Smallchunk29839* next;
Smallchunk29839* prev;
Freecell29829* freelist;
NI free;
NI acc;
NF data;
};
struct Llchunk29879 {
NI size;
NI acc;
Llchunk29879* next;
};
struct Bigchunk29841 {
Basechunk29837 Sup;
Bigchunk29841* next;
Bigchunk29841* prev;
NI align;
NF data;
};
typedef NI TY29818[16];
struct Trunk29810 {
Trunk29810* next;
NI key;
TY29818 bits;
};
typedef Avlnode29883* TY29890[2];
struct Avlnode29883 {
TY29890 link;
NI key;
NI upperbound;
NI level;
};
struct Pagedesc47713 {
Pagedesc47713* next;
NI key;
TY29818 bits;
};
struct Trunk268026 {
Trunk268026* next;
NI key;
TY29818 bits;
};
struct Tidpair292846 {
Tidobj199004* key;
TNimObject* val;
};
struct Tnodepair292858 {
NI h;
Tnode292802* key;
NI val;
};
struct Filenamemapping203005 {
NimStringDesc* package;
NimStringDesc* file;
NU32 mangled;
};
typedef NU8 Treasonforrecompile332002;
struct Tiitable299142 {
NI counter;
Tiipairseq299140* data;
};
struct Tindex332019 {
NI lastidxkey;
NI lastidxval;
Tiitable299142 tab;
NimStringDesc* r;
NI offset;
};
struct Table332054 {
Keyvaluepairseq332057* data;
NI counter;
};
struct Memfile330202 {
void* mem;
NI size;
int handle;
};
struct Trodreader332021 {
TNimObject Sup;
NI pos;
NCSTRING s;
Toption169009Set options;
Treasonforrecompile332002 reason;
TY332033* moddeps;
TY332033* files;
NI dataidx;
NI convertersidx;
NI initidx;
NI interfidx;
NI compilerprocsidx;
NI methodsidx;
NimStringDesc* filename;
Tindex332019 index;
Tindex332019 imports;
NI readerindex;
NI line;
NI moduleid;
Table332054 syms;
Memfile330202 memfile;
Tsymseq292804* methods;
NimStringDesc* origfile;
NIM_BOOL inviewmode;
};
struct TY292961 {
NI Field0;
Tsym292834* Field1;
};
struct Freecell29829 {
Freecell29829* next;
NI zerofield;
};
struct Tinstantiation292824 {
Tsym292834* sym;
Ttypeseq292836* concretetypes;
NI compilesid;
};
struct Tiipair299138 {
NI key;
NI val;
};
struct Keyvaluepair332060 {
NI Field0;
NI Field1;
Tsym292834* Field2;
};
struct Ttypeseq292836 {
TGenericSeq Sup;
Ttype292840* data[SEQ_DECL_SIZE];
};
struct TY529153 {
TGenericSeq Sup;
Tcgen529027* data[SEQ_DECL_SIZE];
};
struct Tsymseq292804 {
TGenericSeq Sup;
Tsym292834* data[SEQ_DECL_SIZE];
};
struct TY203017 {
TGenericSeq Sup;
TY203018 data[SEQ_DECL_SIZE];
};
struct TY135002 {
TGenericSeq Sup;
NimStringDesc* data[SEQ_DECL_SIZE];
};
struct Tbitset339004 {
TGenericSeq Sup;
NI8 data[SEQ_DECL_SIZE];
};
struct TY529095 {
TGenericSeq Sup;
Tblock529019 data[SEQ_DECL_SIZE];
};
struct TY191350 {
TGenericSeq Sup;
Ropeobj178006* data[SEQ_DECL_SIZE];
};
struct Tnodeseq292796 {
TGenericSeq Sup;
Tnode292802* data[SEQ_DECL_SIZE];
};
struct TY191612 {
TGenericSeq Sup;
Tfileinfo191334 data[SEQ_DECL_SIZE];
};
struct Trunkseq268028 {
TGenericSeq Sup;
Trunk268026* data[SEQ_DECL_SIZE];
};
struct TY292929 {
TGenericSeq Sup;
Tinstantiation292824* data[SEQ_DECL_SIZE];
};
struct Tidpairseq292848 {
TGenericSeq Sup;
Tidpair292846 data[SEQ_DECL_SIZE];
};
struct Tnodepairseq292860 {
TGenericSeq Sup;
Tnodepair292858 data[SEQ_DECL_SIZE];
};
struct TY203021 {
TGenericSeq Sup;
Filenamemapping203005 data[SEQ_DECL_SIZE];
};
struct TY203023 {
TGenericSeq Sup;
Enumdesc203007 data[SEQ_DECL_SIZE];
};
struct TY292960 {
TGenericSeq Sup;
TY292961 data[SEQ_DECL_SIZE];
};
struct TY332033 {
TGenericSeq Sup;
NI32 data[SEQ_DECL_SIZE];
};
struct Tiipairseq299140 {
TGenericSeq Sup;
Tiipair299138 data[SEQ_DECL_SIZE];
};
struct Keyvaluepairseq332057 {
TGenericSeq Sup;
Keyvaluepair332060 data[SEQ_DECL_SIZE];
};
N_NIMCALL(void, nimGCvisit)(void* d0, NI op0);
N_NIMCALL(void, T839829468_2)(void);
N_NIMCALL(void, nimRegisterGlobalMarker)(Globalmarkerproc56202 markerproc0);
N_NIMCALL(void, T839829468_3)(void);
N_NIMCALL(Ropeobj178006*, rope_178277_2381377266)(NimStringDesc* s0);
static N_INLINE(void, asgnRefNoCycle)(void** dest0, void* src0);
static N_INLINE(Cell47705*, usrtocell_51840_1689653243)(void* usr0);
static N_INLINE(void, rtladdzct_53001_1689653243)(Cell47705* c0);
N_NOINLINE(void, addzct_51817_1689653243)(Cellseq47721* s0, Cell47705* c0);
N_NIMCALL(void, T839829468_5)(void);
N_NIMCALL(void, T839829468_6)(void);
static N_INLINE(void, nimGCunrefNoCycle)(void* p0);
N_NIMCALL(void*, newSeqRC1)(TNimType* typ0, NI len0);
N_NIMCALL(void, T839829468_7)(void);
N_NIMCALL(void, initintset_268885_2627731572)(Intset268030* Result);
N_NOINLINE(void, chckNil)(void* p0);
N_NIMCALL(void, genericReset)(void* dest0, TNimType* mt0);
N_NIMCALL(void, T839829468_8)(void);
N_NIMCALL(Tcgen529027*, newmodule_563045_839829468)(Tsym292834* module0);
N_NIMCALL(Tcgen529027*, getcgenmodule_532226_839829468)(Tsym292834* s0);
N_NIMCALL(void, internalerror_196113_155036129)(NimStringDesc* errmsg0);
N_NIMCALL(NimStringDesc*, HEX24_196185_1689653243)(TY203018 x0);
N_NIMCALL(Tcgen529027*, rawnewmodule_563038_839829468)(Tsym292834* module0);
N_NIMCALL(Tcgen529027*, rawnewmodule_562663_839829468)(Tsym292834* module0, NimStringDesc* filename0);
N_NIMCALL(void*, newObj)(TNimType* typ0, NI size0);
static N_INLINE(void, appendString)(NimStringDesc* dest0, NimStringDesc* src0);
static N_INLINE(void, copymem_7485_1689653243)(void* dest0, void* source0, NI size0);
N_NIMCALL(NimStringDesc*, HEX24_8401_1689653243)(NU64 x0);
N_NIMCALL(NU32, hashowner_532977_839829468)(Tsym292834* s0);
N_NIMCALL(NU32, register_203121_1926258066)(Debuginfo203009* self0, NimStringDesc* package0, NimStringDesc* file0);
N_NIMCALL(NimStringDesc*, rawNewString)(NI space0);
N_NIMCALL(void, initlinkedlist_147031_3771138726)(Tlinkedlist147013* list0);
N_NIMCALL(NimStringDesc*, copyStringRC1)(NimStringDesc* src0);
N_NIMCALL(void, initidtable_296019_850551059)(Tidtable292850* x0);
N_NIMCALL(Tcproc529021*, newproc_529206_3723162438)(Tsym292834* prc0, Tcgen529027* module0);
static N_INLINE(void, asgnRef)(void** dest0, void* src0);
static N_INLINE(void, incref_53819_1689653243)(Cell47705* c0);
static N_INLINE(void, decref_53401_1689653243)(Cell47705* c0);
N_NIMCALL(Toption169009Set, initprocoptions_562635_839829468)(Tcgen529027* m0);
N_NIMCALL(Tcproc529021*, newpreinitproc_562625_839829468)(Tcgen529027* m0);
N_NIMCALL(Tcproc529021*, newpostinitproc_562630_839829468)(Tcgen529027* m0);
N_NIMCALL(void, initnodetable_296085_850551059)(Tnodetable292862* x0);
N_NIMCALL(Ropeobj178006*, gettempname_533596_839829468)(Tcgen529027* m0);
N_NIMCALL(Ropeobj178006*, HEX26_178418_2381377266)(Ropeobj178006* a0, Ropeobj178006* b0);
N_NIMCALL(Ropeobj178006*, rope_178401_2381377266)(NI64 i0);
N_NIMCALL(NimStringDesc*, tofullpath_192264_155036129)(NI32 fileidx0);
N_NIMCALL(TGenericSeq*, setLengthSeq)(TGenericSeq* seq0, NI elemsize0, NI newlen0);
N_NIMCALL(NimStringDesc*, tofilename_192260_155036129)(NI32 fileidx0);
N_NIMCALL(NimStringDesc*, noschangeFileExt)(NimStringDesc* filename0, NimStringDesc* ext0);
N_NIMCALL(NimStringDesc*, completecfilepath_273854_2528170400)(NimStringDesc* cfile0, NIM_BOOL createsubdir0);
N_NIMCALL(void, readmergeinfo_530613_2760143328)(NimStringDesc* cfilename0, Tcgen529027* m0);
N_NIMCALL(NimStringDesc*, getcfile_563204_839829468)(Tcgen529027* m0);
N_NIMCALL(NimStringDesc*, copyString)(NimStringDesc* src0);
N_NIMCALL(NimStringDesc*, withpackagename_170073_2607990831)(NimStringDesc* path0);
static N_INLINE(NIM_BOOL, skipcodegen_341085_2355241294)(Tnode292802* n0);
N_NIMCALL(void, genstmts_539244_839829468)(Tcproc529021* p0, Tnode292802* t0);
N_NIMCALL(void, expr_539248_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0);
N_NIMCALL(void, fillprocloc_539201_839829468)(Tsym292834* sym0);
N_NIMCALL(void, fillloc_532282_839829468)(Tloc292816* a0, Tlockind292808 k0, Ttype292840* typ0, Ropeobj178006* r0, Tstorageloc292812 s0);
N_NIMCALL(void, unsureAsgnRef)(void** dest0, void* src0);
N_NIMCALL(Ropeobj178006*, manglename_533205_839829468)(Tsym292834* s0);
N_NIMCALL(NIM_BOOL, iskeyword_532960_839829468)(Tident199010* w0);
N_NIMCALL(NimStringDesc*, mangle_528847_2036603609)(NimStringDesc* name0);
N_NIMCALL(void, add_178487_2381377266)(Ropeobj178006** a0, NimStringDesc* b0);
N_NIMCALL(void, add_178482_2381377266)(Ropeobj178006** a0, Ropeobj178006* b0);
N_NIMCALL(Ropeobj178006*, HEX25_178905_2381377266)(NimStringDesc* frmt0, Ropeobj178006** args0, NI args0Len0);
N_NIMCALL(void, genprocprototype_539254_839829468)(Tcgen529027* m0, Tsym292834* sym0);
N_NIMCALL(void, useheader_532369_839829468)(Tcgen529027* m0, Tsym292834* sym0);
N_NIMCALL(NIM_BOOL, includestr_147249_3771138726)(Tlinkedlist147013* list0, NimStringDesc* data0);
N_NIMCALL(NimStringDesc*, getstr_297230_850551059)(Tnode292802* a0);
N_NIMCALL(Tsym292834*, getmodule_299123_2984716966)(Tsym292834* s0);
N_NIMCALL(NIM_BOOL, containsorincl_268862_2627731572)(Intset268030* s0, NI key0);
N_NIMCALL(Ropeobj178006*, ropecg_532407_839829468)(Tcgen529027* m0, NimStringDesc* frmt0, Ropeobj178006** args0, NI args0Len0);
N_NIMCALL(NimStringDesc*, nimIntToStr)(NI x0);
static N_INLINE(void, appendChar)(NimStringDesc* dest0, NIM_CHAR c0);
N_NIMCALL(NimStringDesc*, copyStrLast)(NimStringDesc* s0, NI start_79610_1689653243, NI last0);
N_NIMCALL(NimStringDesc*, copyStrLast)(NimStringDesc* s0, NI first0, NI last0);
N_NIMCALL(Ropeobj178006*, cgsym_532403_839829468)(Tcgen529027* m0, NimStringDesc* name0);
N_NIMCALL(Tsym292834*, getcompilerproc_338746_3937434831)(NimStringDesc* name0);
N_NIMCALL(void, genproc_532951_839829468)(Tcgen529027* m0, Tsym292834* prc0);
N_NIMCALL(NIM_BOOL, isactivated_561431_839829468)(Tsym292834* prc0);
N_NIMCALL(void, addforwardedproc_532203_839829468)(Tcgen529027* m0, Tsym292834* prc0);
N_NIMCALL(TGenericSeq*, incrSeqV2)(TGenericSeq* seq0, NI elemsize0);
N_NIMCALL(void, genprocnoforward_560906_839829468)(Tcgen529027* m0, Tsym292834* prc0);
N_NIMCALL(void, genprocaux_560284_839829468)(Tcgen529027* m0, Tsym292834* prc0);
N_NIMCALL(Ropeobj178006*, genprocheader_535867_839829468)(Tcgen529027* m0, Tsym292834* prc0);
N_NIMCALL(void, genclinedir_532813_839829468)(Ropeobj178006** r0, Tlineinfo191336 info0);
N_NIMCALL(void, genclinedir_532725_839829468)(Ropeobj178006** r0, NimStringDesc* filename0, NI line0);
N_NIMCALL(void, addf_179205_2381377266)(Ropeobj178006** c0, NimStringDesc* frmt0, Ropeobj178006** args0, NI args0Len0);
N_NIMCALL(NimStringDesc*, makesinglelinecstring_528835_2036603609)(NimStringDesc* s0);
N_NIMCALL(NI, safelinenm_532721_839829468)(Tlineinfo191336 info0);
static N_INLINE(NI, tolinenumber_192415_155036129)(Tlineinfo191336 info0);
N_NIMCALL(void, genprocparams_534115_839829468)(Tcgen529027* m0, Ttype292840* t0, Ropeobj178006** rettype0, Ropeobj178006** params0, Intset268030* check0, NIM_BOOL declareenvironment0, NIM_BOOL weakdep0);
N_NIMCALL(NIM_BOOL, isinvalidreturntype_533548_839829468)(Ttype292840* rettype0);
N_NIMCALL(Tctypekind529007, maptype_533393_839829468)(Ttype292840* typ0);
N_NIMCALL(Tctypekind529007, mapsettype_533389_839829468)(Ttype292840* typ0);
N_NIMCALL(NI64, getsize_320135_3876443242)(Ttype292840* typ0);
N_NIMCALL(Ttype292840*, lastson_295377_850551059)(Ttype292840* n0);
N_NIMCALL(NI64, firstord_320001_3876443242)(Ttype292840* t0);
N_NIMCALL(Ttype292840*, skiptypes_296099_850551059)(Ttype292840* t0, Ttypekind292244Set kinds0);
N_NIMCALL(NIM_BOOL, isimportedcpptype_533476_839829468)(Ttype292840* t0);
N_NIMCALL(NIM_BOOL, needscomplexassignment_533509_839829468)(Ttype292840* typ0);
N_NIMCALL(NIM_BOOL, containsgarbagecollectedref_320117_3876443242)(Ttype292840* typ0);
static N_INLINE(NIM_BOOL, isobjlackingtypefield_533513_839829468)(Ttype292840* typ0);
N_NIMCALL(NIM_BOOL, ispureobject_320138_3876443242)(Ttype292840* typ0);
N_NIMCALL(Ropeobj178006*, gettypedescaux_533503_839829468)(Tcgen529027* m0, Ttype292840* typ0, Intset268030* check0);
N_NIMCALL(Ttype292840*, getuniquetype_528640_2036603609)(Ttype292840* key0);
N_NIMCALL(Ropeobj178006*, gettypepre_533972_839829468)(Tcgen529027* m0, Ttype292840* typ0);
N_NIMCALL(Ropeobj178006*, getsimpletypedesc_533936_839829468)(Tcgen529027* m0, Ttype292840* typ0);
N_NIMCALL(Ropeobj178006*, typenameorliteral_533898_839829468)(Ttype292840* t0, NimStringDesc* literal0);
N_NIMCALL(Ropeobj178006*, gettypename_533313_839829468)(Ttype292840* typ0);
N_NIMCALL(Ropeobj178006*, typename_533292_839829468)(Ttype292840* typ0);
N_NIMCALL(NimStringDesc*, reprEnum)(NI e0, TNimType* typ0);
N_NIMCALL(Ropeobj178006*, cachegettype_533591_839829468)(Tidtable292850 tab0, Ttype292840* key0);
N_NIMCALL(TNimObject*, idtableget_299086_2984716966)(Tidtable292850 t0, Tidobj199004* key0);
N_NIMCALL(NimStringDesc*, typetostring_320017_3876443242)(Ttype292840* typ0, Tprefereddesc320011 prefer0);
N_NIMCALL(Ttype292840*, elemtype_320394_3876443242)(Ttype292840* t0);
N_NIMCALL(Ropeobj178006*, HEX26_178447_2381377266)(Ropeobj178006* a0, NimStringDesc* b0);
N_NIMCALL(Ropeobj178006*, gettypeforward_534039_839829468)(Tcgen529027* m0, Ttype292840* typ0);
N_NIMCALL(NIM_BOOL, isimportedtype_533449_839829468)(Ttype292840* t0);
N_NIMCALL(NimStringDesc*, getforwardstructformat_534015_839829468)(Tcgen529027* m0);
N_NIMCALL(Ropeobj178006*, structorunion_534001_839829468)(Ttype292840* t0);
N_NIMCALL(void, idtableput_299094_2984716966)(Tidtable292850* t0, Tidobj199004* key0, TNimObject* val0);
N_NIMCALL(void, pushtype_533958_839829468)(Tcgen529027* m0, Ttype292840* typ0);
N_NIMCALL(Ropeobj178006*, gettypedescweak_534079_839829468)(Tcgen529027* m0, Ttype292840* t0, Intset268030* check0);
N_NIMCALL(void, internalerror_196100_155036129)(Tlineinfo191336 info0, NimStringDesc* errmsg0);
N_NIMCALL(NIM_BOOL, hasenum_203230_1926258066)(Debuginfo203009 self0, NimStringDesc* ename0, NI id0, NU32 owner0);
N_NIMCALL(void*, newSeq)(TNimType* typ0, NI len0);
static N_INLINE(NI, len_293081_850551059)(Tnode292802* n0);
N_NIMCALL(void, registerenum_203419_1926258066)(Debuginfo203009* self0, Enumdesc203007* ed0);
N_NIMCALL(void, genericSeqAssign)(void* dest0, void* src_86804_1689653243, TNimType* mt0);
N_NIMCALL(void, appcg_532632_839829468)(Tcgen529027* m0, Ropeobj178006** c0, NimStringDesc* frmt0, Ropeobj178006** args0, NI args0Len0);
N_NIMCALL(NI64, lengthord_320007_3876443242)(Ttype292840* t0);
N_NIMCALL(NIM_BOOL, scancppgenericslot_534827_839829468)(NimStringDesc* pat0, NI* cursor0, NI* outidx0, NI* outstars0);
N_NIMCALL(Ttype292840*, resolvestarsincpptype_534891_839829468)(Ttype292840* typ0, NI idx0, NI stars0);
N_NIMCALL(NI, len_295339_850551059)(Ttype292840* n0);
N_NIMCALL(NimStringDesc*, copyStr)(NimStringDesc* s0, NI start0);
N_NIMCALL(NimStringDesc*, copyStr)(NimStringDesc* s0, NI first0);
N_NIMCALL(Ropeobj178006*, getrecorddesc_534643_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ropeobj178006* name0, Intset268030* check0);
N_NIMCALL(Ropeobj178006*, getrecordfields_534636_839829468)(Tcgen529027* m0, Ttype292840* typ0, Intset268030* check0);
N_NIMCALL(Ropeobj178006*, genrecordfieldsaux_534421_839829468)(Tcgen529027* m0, Tnode292802* n0, Ropeobj178006* accessexpr0, Ttype292840* rectype0, Intset268030* check0);
N_NIMCALL(NI, sonslen_295351_850551059)(Tnode292802* n0);
N_NIMCALL(Tnode292802*, lastson_295364_850551059)(Tnode292802* n0);
N_NIMCALL(Ropeobj178006*, HEX26_178452_2381377266)(NimStringDesc* a0, Ropeobj178006* b0);
N_NIMCALL(Ropeobj178006*, manglerecfieldname_534361_839829468)(Tsym292834* field0, Ttype292840* rectype0);
N_NIMCALL(NimStringDesc*, manglefield_532973_839829468)(Tident199010* name0);
N_NIMCALL(NIM_CHAR, nsuToUpperAsciiChar)(NIM_CHAR c0);
N_NIMCALL(Ropeobj178006*, gettupledesc_534777_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ropeobj178006* name0, Intset268030* check0);
N_NIMCALL(NI, sonslen_295327_850551059)(Ttype292840* n0);
N_NIMCALL(void, excl_268841_2627731572)(Intset268030* s0, NI key0);
static N_INLINE(NIM_BOOL, iscompiletimeonly_328706_3876443242)(Ttype292840* t0);
N_NIMCALL(Tstorageloc292812, paramstorageloc_534098_839829468)(Tsym292834* param0);
N_NIMCALL(NIM_BOOL, ccgintroducedptr_533609_839829468)(Tsym292834* s0);
N_NIMCALL(Tctypekind529007, mapreturntype_533445_839829468)(Ttype292840* typ0);
N_NIMCALL(Tnode292802*, easyresultasgn_560191_839829468)(Tnode292802* n0);
static N_INLINE(Tnode292802*, HEX5BHEX5D_293238_850551059)(Tnode292802* n0, NI i0);
N_NIMCALL(Tnode292802*, getbody_335227_1724185294)(Tsym292834* s0);
N_NIMCALL(Ropeobj178006*, localvardecl_538532_839829468)(Tcproc529021* p0, Tsym292834* s0);
N_NIMCALL(Ropeobj178006*, gettypedesc_535671_839829468)(Tcgen529027* m0, Ttype292840* typ0);
N_NIMCALL(void, initlocexprsingleuse_539289_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* result0);
N_NIMCALL(void, initloc_532273_839829468)(Tloc292816* result0, Tlockind292808 k0, Ttype292840* typ0, Tstorageloc292812 s0);
N_NIMCALL(void, linefmt_532714_839829468)(Tcproc529021* p0, Tcprocsection529011 s0, NimStringDesc* frmt0, Ropeobj178006** args0, NI args0Len0);
static N_INLINE(Ropeobj178006**, s_529179_3723162438)(Tcproc529021* p0, Tcprocsection529011 s0);
N_NIMCALL(Ropeobj178006*, indentline_532656_839829468)(Tcproc529021* p0, Ropeobj178006* r0);
N_NIMCALL(void, prepend_178893_2381377266)(Ropeobj178006** a0, Ropeobj178006* b0);
N_NIMCALL(Ropeobj178006*, rdloc_538188_839829468)(Tloc292816 a0);
N_NIMCALL(void, assignlocalvar_538614_839829468)(Tcproc529021* p0, Tsym292834* s0);
N_NIMCALL(void, line_532690_839829468)(Tcproc529021* p0, Tcprocsection529011 s0, Ropeobj178006* r0);
N_NIMCALL(void, localdebuginfo_538449_839829468)(Tcproc529021* p0, Tsym292834* s0);
N_NIMCALL(void, linef_532700_839829468)(Tcproc529021* p0, Tcprocsection529011 s0, NimStringDesc* frmt0, Ropeobj178006** args0, NI args0Len0);
N_NIMCALL(Ropeobj178006*, makecstring_191638_155036129)(NimStringDesc* s0);
N_NIMCALL(NimStringDesc*, nsuNormalize)(NimStringDesc* s0);
N_NIMCALL(Ropeobj178006*, gentypeinfo_535941_839829468)(Tcgen529027* m0, Ttype292840* t_535944_839829468);
N_NIMCALL(Tcgen529027*, bmod_529201_3723162438)(Tsym292834* module0);
N_NIMCALL(void, gentypeinfoauxbase_535960_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ttype292840* origtype0, Ropeobj178006* name0, Ropeobj178006* base0);
N_NIMCALL(NIM_BOOL, canformacycle_320123_3876443242)(Ttype292840* typ0);
N_NIMCALL(void, gentupleinfo_536549_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ropeobj178006* name0);
N_NIMCALL(Ropeobj178006*, getnimnode_535945_839829468)(Tcgen529027* m0);
N_NIMCALL(Ttype292840*, fakeclosuretype_537010_839829468)(Tsym292834* owner0);
N_NIMCALL(Ttype292840*, newtype_295107_850551059)(Ttypekind292244 kind0, Tsym292834* owner0);
N_NIMCALL(void, rawaddson_296394_850551059)(Ttype292840* father0, Ttype292840* son0);
N_NIMCALL(void, gentypeinfoaux_536027_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ttype292840* origtype0, Ropeobj178006* name0);
N_NIMCALL(Ropeobj178006*, gentraverseproc_537632_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ttypeinforeason537016 reason0);
N_NIMCALL(void, gentraverseprocseq_537399_839829468)(Ttraversalclosure537019* c0, Ropeobj178006* accessor0, Ttype292840* typ0);
N_NIMCALL(void, gettemp_537032_839829468)(Tcproc529021* p0, Ttype292840* t0, Tloc292816* result0, NIM_BOOL needsinit0);
N_NIMCALL(void, constructloc_538388_839829468)(Tcproc529021* p0, Tloc292816 loc0, NIM_BOOL istemp0);
static N_INLINE(NIM_BOOL, iscomplexvaluetype_538317_839829468)(Ttype292840* t0);
N_NIMCALL(void, usestringh_532345_839829468)(Tcgen529027* m0);
N_NIMCALL(Ropeobj178006*, addrloc_538204_839829468)(Tloc292816 a0);
N_NIMCALL(void, genobjectinit_538242_839829468)(Tcproc529021* p0, Tcprocsection529011 section0, Ttype292840* t0, Tloc292816 a0, NIM_BOOL takeaddr0);
N_NIMCALL(Ttypefieldresult320145, analyseobjectwithtypefield_320149_3876443242)(Ttype292840* t0);
N_NIMCALL(Ttype292840*, getsystype_338150_3937434831)(Ttypekind292244 kind0);
N_NIMCALL(void, gentraverseproc_537022_839829468)(Ttraversalclosure537019* c0, Ropeobj178006* accessor0, Ttype292840* typ_537027_839829468);
static N_INLINE(Ropeobj178006*, parentobj_537257_839829468)(Ropeobj178006* accessor0, Tcgen529027* m0);
N_NIMCALL(void, gentraverseproc_537039_839829468)(Ttraversalclosure537019* c0, Ropeobj178006* accessor0, Tnode292802* n0);
N_NIMCALL(void, gencaserange_537028_839829468)(Tcproc529021* p0, Tnode292802* branch0);
N_NIMCALL(Ropeobj178006*, genliteral_539273_839829468)(Tcproc529021* p0, Tnode292802* n0);
N_NIMCALL(Ropeobj178006*, genliteral_549476_839829468)(Tcproc529021* p0, Tnode292802* n0, Ttype292840* ty0);
N_NIMCALL(Ropeobj178006*, intliteral_539270_839829468)(NI64 i0);
N_NIMCALL(Ropeobj178006*, int64literal_549430_839829468)(NI64 i0);
N_NIMCALL(Ropeobj178006*, uint64literal_549442_839829468)(NU64 i0);
N_NIMCALL(NI, nodetabletestorset_342682_1142335848)(Tnodetable292862* t0, Tnode292802* key0, NI val0);
N_NIMCALL(Ropeobj178006*, getstrlit_549468_839829468)(Tcgen529027* m0, NimStringDesc* s0);
N_NIMCALL(NimStringDesc*, tostrmaxprecision_298007_3471544153)(NF f0);
N_NIMCALL(Tnode292802*, copynode_296528_850551059)(Tnode292802* src0);
N_NIMCALL(void, linecg_532707_839829468)(Tcproc529021* p0, Tcprocsection529011 s0, NimStringDesc* frmt0, Ropeobj178006** args0, NI args0Len0);
N_NIMCALL(void, genarrayinfo_537005_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ropeobj178006* name0);
N_NIMCALL(void, gensetinfo_536867_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ropeobj178006* name0);
N_NIMCALL(void, genenuminfo_536597_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ropeobj178006* name0);
N_NIMCALL(void, genobjectinfo_536506_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ttype292840* origtype0, Ropeobj178006* name0);
N_NIMCALL(void, genobjectfields_536104_839829468)(Tcgen529027* m0, Ttype292840* typ0, Tnode292802* n0, Ropeobj178006* expr0);
N_NIMCALL(Ropeobj178006*, discriminatortablename_536057_839829468)(Tcgen529027* m0, Ttype292840* objtype_536060_839829468, Tsym292834* d0);
N_NIMCALL(Tsym292834*, lookupinrecord_299119_2984716966)(Tnode292802* n0, Tident199010* field0);
N_NIMCALL(NI64, getordvalue_320129_3876443242)(Tnode292802* n0);
N_NIMCALL(void, gendeepcopyproc_538066_839829468)(Tcgen529027* m0, Tsym292834* s0, Ropeobj178006* result0);
N_NIMCALL(void, initlocalvar_538398_839829468)(Tcproc529021* p0, Tsym292834* v0, NIM_BOOL immediateasgn0);
N_NIMCALL(void, fillresult_533865_839829468)(Tsym292834* param0);
N_NIMCALL(void, assignparam_538994_839829468)(Tcproc529021* p0, Tsym292834* s0);
N_NIMCALL(void, closuresetup_560158_839829468)(Tcproc529021* p0, Tsym292834* prc0);
N_NIMCALL(Ropeobj178006*, initgcframe_538435_839829468)(Tcproc529021* p0);
N_NIMCALL(Ropeobj178006*, initframe_560140_839829468)(Tcproc529021* p0, Ropeobj178006* procname0, Ropeobj178006* filename0);
N_NIMCALL(Ropeobj178006*, quotedfilename_196818_155036129)(Tlineinfo191336 i0);
N_NIMCALL(void, appcg_532648_839829468)(Tcproc529021* p0, Tcprocsection529011 s0, NimStringDesc* frmt0, Ropeobj178006** args0, NI args0Len0);
N_NIMCALL(Ropeobj178006*, deinitgcframe_538441_839829468)(Tcproc529021* p0);
N_NIMCALL(Ropeobj178006*, deinitframe_560150_839829468)(Tcproc529021* p0);
N_NIMCALL(Tcgen529027*, findpendingmodule_532241_839829468)(Tcgen529027* m0, Tsym292834* s0);
N_NIMCALL(void, symindynamiclib_559929_839829468)(Tcgen529027* m0, Tsym292834* sym0);
N_NIMCALL(NIM_BOOL, isgetprocaddr_559442_839829468)(Tlib292820* lib0);
N_NIMCALL(void, loaddynamiclib_559480_839829468)(Tcgen529027* m0, Tlib292820* lib0);
N_NIMCALL(void, libcandidates_170605_2607990831)(NimStringDesc* s0, TY135002** dest0);
N_NIMCALL(void, rawmessage_194612_155036129)(Tmsgkind191002 msg0, NimStringDesc* arg0);
N_NIMCALL(void, initlocexpr_539283_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* result0);
N_NIMCALL(Ropeobj178006*, mangledynlibproc_538816_839829468)(Tsym292834* sym0);
N_NIMCALL(NimStringDesc*, HEX24_178856_2381377266)(Ropeobj178006* r0);
N_NIMCALL(void, symindynamiclibpartial_560071_839829468)(Tcgen529027* m0, Tsym292834* sym0);
N_NIMCALL(void, genvarprototype_539236_839829468)(Tcgen529027* m0, Tsym292834* sym0);
N_NIMCALL(void, genvarprototypeaux_544254_839829468)(Tcgen529027* m0, Tsym292834* sym0);
N_NIMCALL(void, declarethreadvar_538676_839829468)(Tcgen529027* m0, Tsym292834* s0, NIM_BOOL isextern0);
static N_INLINE(NIM_BOOL, emulatedthreadvars_532949_839829468)(void);
static N_INLINE(NIM_BOOL, crossescppboundary_560754_839829468)(Tcgen529027* m0, Tsym292834* sym0);
N_NIMCALL(void, putlocintodest_539258_839829468)(Tcproc529021* p0, Tloc292816* d0, Tloc292816 s0);
N_NIMCALL(void, genassignment_539264_839829468)(Tcproc529021* p0, Tloc292816 dest0, Tloc292816 src0, Tassignmentflag538302Set flags0);
N_NIMCALL(void, genrefassign_538311_839829468)(Tcproc529021* p0, Tloc292816 dest0, Tloc292816 src0, Tassignmentflag538302Set flags0);
static N_INLINE(NIM_BOOL, usesnativegc_169177_2607990831)(void);
N_NIMCALL(void, optasgnloc_549788_839829468)(Tloc292816 a0, Ttype292840* t0, Ropeobj178006* field0, Tloc292816* Result);
N_NIMCALL(void, genoptasgntuple_550001_839829468)(Tcproc529021* p0, Tloc292816 dest0, Tloc292816 src0, Tassignmentflag538302Set flags0);
N_NIMCALL(void, gengenericasgn_550167_839829468)(Tcproc529021* p0, Tloc292816 dest0, Tloc292816 src0, Tassignmentflag538302Set flags0);
N_NIMCALL(NI, asgncomplexity_549750_839829468)(Tnode292802* n0);
N_NIMCALL(void, genoptasgnobject_550084_839829468)(Tcproc529021* p0, Tloc292816 dest0, Tloc292816 src0, Tassignmentflag538302Set flags0, Tnode292802* t0);
N_NIMCALL(void, genericAssign)(void* dest0, void* src0, TNimType* mt0);
N_NIMCALL(void, localerror_196085_155036129)(Tlineinfo191336 info0, NimStringDesc* arg0);
N_NIMCALL(NIM_BOOL, issimpleconst_532311_839829468)(Ttype292840* typ0);
N_NIMCALL(void, putintodest_550468_839829468)(Tcproc529021* p0, Tloc292816* d0, Ttype292840* t0, Ropeobj178006* r0, Tstorageloc292812 s0);
N_NIMCALL(void, gencomplexconst_558249_839829468)(Tcproc529021* p0, Tsym292834* sym0, Tloc292816* d0);
N_NIMCALL(void, requestconstimpl_539240_839829468)(Tcproc529021* p0, Tsym292834* sym0);
N_NIMCALL(Ropeobj178006*, genconstexpr_554849_839829468)(Tcproc529021* p0, Tnode292802* n0);
N_NIMCALL(void, tobitset_340001_452470228)(Tnode292802* s0, Tbitset339004** b0);
N_NIMCALL(Ropeobj178006*, genrawsetdata_549629_839829468)(Tbitset339004* cs0, NI size0);
N_NIMCALL(NimStringDesc*, nsuToHex)(NI64 x0, NI len0);
N_NIMCALL(NI64, bitsettoword_549578_839829468)(Tbitset339004* s0, NI size0);
N_NIMCALL(Ropeobj178006*, genconstseq_559371_839829468)(Tcproc529021* p0, Tnode292802* n0, Ttype292840* t0);
N_NIMCALL(void, appcg_532640_839829468)(Tcgen529027* m0, Tcfilesection529005 s0, NimStringDesc* frmt0, Ropeobj178006** args0, NI args0Len0);
N_NIMCALL(Ropeobj178006*, genconstsimplelist_559299_839829468)(Tcproc529021* p0, Tnode292802* n0);
N_NIMCALL(Ropeobj178006*, gennamedconstexpr_559284_839829468)(Tcproc529021* p0, Tnode292802* n0);
N_NIMCALL(void, accessthreadlocalvar_532945_839829468)(Tcproc529021* p0, Tsym292834* s0);
static N_INLINE(Ropeobj178006**, procsec_529194_3723162438)(Tcproc529021* p0, Tcprocsection529011 s0);
static N_INLINE(NIM_BOOL, isemptytype_297440_850551059)(Ttype292840* t0);
N_NIMCALL(void, putdataintodest_550436_839829468)(Tcproc529021* p0, Tloc292816* d0, Ttype292840* t0, Ropeobj178006* r0);
N_NIMCALL(void, genlinedir_532823_839829468)(Tcproc529021* p0, Tnode292802* t0);
N_NIMCALL(Ropeobj178006*, sourceline_192068_155036129)(Tlineinfo191336 i0);
N_NIMCALL(NIM_BOOL, freshlineinfo_532818_839829468)(Tcproc529021* p0, Tlineinfo191336 info0);
N_NIMCALL(void, genmagicexpr_557033_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 op0);
N_NIMCALL(void, genandor_554311_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 m0);
N_NIMCALL(Ropeobj178006*, getlabel_539217_839829468)(Tcproc529021* p0);
N_NIMCALL(void, fixlabel_539230_839829468)(Tcproc529021* p0, Ropeobj178006* labl0);
N_NIMCALL(void, unaryarith_552646_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 op0);
N_NIMCALL(void, unaryarithoverflow_551633_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 m0);
N_NIMCALL(void, binaryfloatarith_556728_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 m0);
N_NIMCALL(void, binaryarith_551819_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 op0);
N_NIMCALL(void, geneqproc_552214_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(void, binaryarithoverflow_551262_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 m0);
N_NIMCALL(Ropeobj178006*, binaryarithoverflowraw_551235_839829468)(Tcproc529021* p0, Ttype292840* t0, Tloc292816 a0, Tloc292816 b0, NimStringDesc* frmt0);
N_NIMCALL(Ropeobj178006*, rdcharloc_538227_839829468)(Tloc292816 a0);
N_NIMCALL(NI64, lastord_320004_3876443242)(Ttype292840* t0);
N_NIMCALL(void, genrepr_555339_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(Ropeobj178006*, lenfield_539305_839829468)(Tcproc529021* p0);
N_NIMCALL(void, gcusage_554439_839829468)(Tnode292802* n0);
N_NIMCALL(void, message_196095_155036129)(Tlineinfo191336 info0, Tmsgkind191002 msg0, NimStringDesc* arg0);
N_NIMCALL(NimStringDesc*, rendertree_311044_382274130)(Tnode292802* n0, Trenderflag311004Set renderflags0);
N_NIMCALL(void, gengettypeinfo_555383_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(void, genswap_555638_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(void, unaryexpr_551209_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, NimStringDesc* frmt0);
N_NIMCALL(void, binarystmt_550501_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, NimStringDesc* frmt0);
N_NIMCALL(void, genstrconcat_554452_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(void, genstrappend_554554_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(void, genseqelemappend_554683_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(void, genstrequals_556666_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(void, binaryexpr_550549_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, NimStringDesc* frmt0);
N_NIMCALL(void, genisnil_552620_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(void, gendollar_555391_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0, NimStringDesc* frmt0);
N_NIMCALL(void, genof_555331_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0);
N_NIMCALL(void, genof_555201_839829468)(Tcproc529021* p0, Tnode292802* x0, Ttype292840* typ0, Tloc292816* d0);
N_NIMCALL(void, globalerror_196071_155036129)(Tlineinfo191336 info0, Tmsgkind191002 msg0, NimStringDesc* arg0);
N_NIMCALL(Ropeobj178006*, genofhelper_555139_839829468)(Tcproc529021* p0, Ttype292840* dest0, Ropeobj178006* a0);
N_NIMCALL(void, gennew_554782_839829468)(Tcproc529021* p0, Tnode292802* e0);
N_NIMCALL(void, rawgennew_554741_839829468)(Tcproc529021* p0, Tloc292816 a0, Ropeobj178006* sizeexpr_554745_839829468);
N_NIMCALL(void, gennewfinalize_555110_839829468)(Tcproc529021* p0, Tnode292802* e0);
N_NIMCALL(void, gennewseq_554824_839829468)(Tcproc529021* p0, Tnode292802* e0);
N_NIMCALL(void, gennewseqaux_554795_839829468)(Tcproc529021* p0, Tloc292816 dest0, Ropeobj178006* length0);
N_NIMCALL(void, gennewseqofcap_554836_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(void, gensomecast_556480_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(Ropeobj178006*, getclosuretype_535683_839829468)(Tcgen529027* m0, Ttype292840* t0, Tclosuretypekind535679 kind0);
N_NIMCALL(void, genord_556474_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(void, unaryexprchar_551222_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, NimStringDesc* frmt0);
N_NIMCALL(void, genarraylen_555415_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 op0);
N_NIMCALL(void, unarystmt_550527_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, NimStringDesc* frmt0);
N_NIMCALL(void, gensetlengthstr_555632_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(void, gensetlengthseq_555500_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(void, gensetop_556419_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 op0);
N_NIMCALL(void, binarystmtinexcl_555857_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, NimStringDesc* frmt0);
N_NIMCALL(Ropeobj178006*, rdsetelemloc_555662_839829468)(Tloc292816 a0, Ttype292840* settype0);
N_NIMCALL(void, binaryexprchar_550809_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, NimStringDesc* frmt0);
N_NIMCALL(void, geninop_556009_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(NIM_BOOL, fewcmps_555803_839829468)(Tnode292802* s0);
N_NIMCALL(void, geninexpraux_553496_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* a0, Tloc292816* b0, Tloc292816* d0);
N_NIMCALL(void, binaryexprin_555837_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* a0, Tloc292816* b0, Tloc292816* d0, NimStringDesc* frmt0);
N_NIMCALL(void, gencall_543632_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(void, genclosurecall_540452_839829468)(Tcproc529021* p0, Tnode292802* le0, Tnode292802* ri0, Tloc292816* d0);
N_NIMCALL(Ropeobj178006*, genarg_539787_839829468)(Tcproc529021* p0, Tnode292802* n_539790_839829468, Tsym292834* param0, Tnode292802* call0);
static N_INLINE(Ropeobj178006*, genargstringtocstring_539776_839829468)(Tcproc529021* p0, Tnode292802* n0);
N_NIMCALL(Ropeobj178006*, openarrayloc_539665_839829468)(Tcproc529021* p0, Tnode292802* n0);
N_NIMCALL(Tnode292802*, skipconv_328882_3876443242)(Tnode292802* n0);
N_NIMCALL(Tmagic292524, getmagic_318502_2616423590)(Tnode292802* op0);
N_NIMCALL(Ropeobj178006*, genargnoparam_539938_839829468)(Tcproc529021* p0, Tnode292802* n0);
N_NIMCALL(Ropeobj178006*, getrawproctype_540459_839829468)(Tcproc529021* p0, Ttype292840* t0);
N_NIMCALL(NIM_BOOL, leftappearsonrightside_539329_839829468)(Tnode292802* le0, Tnode292802* ri0);
N_NIMCALL(Tanalysisresult473003, ispartof_473340_788060399)(Tnode292802* a0, Tnode292802* b0);
static N_INLINE(NIM_BOOL, hasnoinit_539383_839829468)(Tnode292802* call0);
N_NIMCALL(void, resetloc_538350_839829468)(Tcproc529021* p0, Tloc292816* loc0);
N_NIMCALL(Ropeobj178006*, addcomma_540464_839829468)(Ropeobj178006* r0);
N_NIMCALL(void, geninfixcall_541929_839829468)(Tcproc529021* p0, Tnode292802* le0, Tnode292802* ri0, Tloc292816* d0);
N_NIMCALL(NIM_BOOL, contains_110056_4286263276)(NimStringDesc* s0, char136Set chars0);
N_NIMCALL(Ropeobj178006*, genpatterncall_541699_839829468)(Tcproc529021* p0, Tnode292802* ri_541702_839829468, NimStringDesc* pat0, Ttype292840* typ_541704_839829468);
N_NIMCALL(Ropeobj178006*, genotherarg_539277_839829468)(Tcproc529021* p0, Tnode292802* ri0, NI i0, Ttype292840* typ0);
N_NIMCALL(Ropeobj178006*, genthisarg_541475_839829468)(Tcproc529021* p0, Tnode292802* ri_541478_839829468, NI i0, Ttype292840* typ0);
N_NIMCALL(Tnode292802*, skipaddrderef_541433_839829468)(Tnode292802* node0);
N_NIMCALL(void, fixupcall_539410_839829468)(Tcproc529021* p0, Tnode292802* le0, Tnode292802* ri0, Tloc292816* d0, Ropeobj178006* callee0, Ropeobj178006* params0);
N_NIMCALL(void, gennamedparamcall_542616_839829468)(Tcproc529021* p0, Tnode292802* ri0, Tloc292816* d0);
N_NIMCALL(NIM_BOOL, contains_110046_4286263276)(NimStringDesc* s0, NIM_CHAR c0);
N_NIMCALL(void, genprefixcall_539960_839829468)(Tcproc529021* p0, Tnode292802* le0, Tnode292802* ri0, Tloc292816* d0);
static N_INLINE(void, poststmtactions_532942_839829468)(Tcproc529021* p0);
N_NIMCALL(void, genreset_554731_839829468)(Tcproc529021* p0, Tnode292802* n0);
N_NIMCALL(void, genecho_554369_839829468)(Tcproc529021* p0, Tnode292802* n0);
N_NIMCALL(NimStringDesc*, nsuRepeatStr)(NimStringDesc* s0, NI n0);
N_NIMCALL(void, genarrtoseq_555046_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0);
N_NIMCALL(void, genseqconstr_555004_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0);
N_NIMCALL(void, localerror_196080_155036129)(Tlineinfo191336 info0, Tmsgkind191002 msg0, NimStringDesc* arg0);
N_NIMCALL(Tnode292802*, wrapprocforspawn_435501_2218250499)(Tsym292834* owner0, Tnode292802* spawnexpr0, Ttype292840* rettype0, Tnode292802* barrier0, Tnode292802* dest0);
N_NIMCALL(Tnode292802*, liftparallel_478822_1773027539)(Tsym292834* owner0, Tnode292802* n0);
N_NIMCALL(void, gendeepcopy_550374_839829468)(Tcproc529021* p0, Tloc292816 dest0, Tloc292816 src0);
N_NIMCALL(NIM_BOOL, isdeepconstexpr_318566_2616423590)(Tnode292802* n0);
N_NIMCALL(Ropeobj178006*, gensetnode_549664_839829468)(Tcproc529021* p0, Tnode292802* n0);
N_NIMCALL(void, gensetconstr_557496_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(NimStringDesc*, nimInt64ToStr)(NI64 x0);
N_NIMCALL(void, exprcomplexconst_558684_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0);
N_NIMCALL(void, genarrayconstr_558207_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0);
N_NIMCALL(NIM_BOOL, handleconstexpr_554853_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0);
N_NIMCALL(void, gentupleconstr_557618_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0);
N_NIMCALL(void, genobjconstr_554903_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(Tsym292834*, lookupfieldagain_553153_839829468)(Tcproc529021* p0, Ttype292840* ty_553156_839829468, Tsym292834* field0, Ropeobj178006** r0);
N_NIMCALL(void, genfieldcheck_553504_839829468)(Tcproc529021* p0, Tnode292802* e0, Ropeobj178006* obj0, Tsym292834* field0, Ttype292840* origty0);
N_NIMCALL(Tnode292802*, newstrnode_293678_850551059)(Tnodekind292020 kind0, NimStringDesc* strval0);
N_NIMCALL(void, gencast_556537_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(void, genconv_556632_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(NIM_BOOL, comparetypes_326214_3876443242)(Ttype292840* x0, Ttype292840* y0, Tdistinctcompare324427 cmp0, Ttypecmpflag324429Set flags0);
N_NIMCALL(void, genaddr_553051_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
static N_INLINE(NIM_BOOL, iscppref_552807_839829468)(Tcproc529021* p0, Ttype292840* typ0);
N_NIMCALL(void, genbracketexpr_554277_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0);
N_NIMCALL(void, genarrayelem_554093_839829468)(Tcproc529021* p0, Tnode292802* x0, Tnode292802* y0, Tloc292816* d0);
N_NIMCALL(NIM_BOOL, isconstexpr_318510_2616423590)(Tnode292802* n0);
N_NIMCALL(void, genopenarrayelem_554169_839829468)(Tcproc529021* p0, Tnode292802* x0, Tnode292802* y0, Tloc292816* d0);
N_NIMCALL(void, genseqelem_554205_839829468)(Tcproc529021* p0, Tnode292802* x0, Tnode292802* y0, Tloc292816* d0);
N_NIMCALL(void, gencstringelem_554144_839829468)(Tcproc529021* p0, Tnode292802* x0, Tnode292802* y0, Tloc292816* d0);
N_NIMCALL(void, gentupleelem_553124_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(void, genderef_543921_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, NIM_BOOL enforcederef0);
N_NIMCALL(void, genrecordfield_553448_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(Ttype292840*, genrecordfieldaux_553096_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tloc292816* a0);
N_NIMCALL(void, gencheckedrecordfield_554046_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(void, genblock_546083_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0);
N_NIMCALL(NI, startblock_543978_839829468)(Tcproc529021* p0, NimStringDesc* start0, Ropeobj178006** args0, NI args0Len0);
N_NIMCALL(void, endblock_544060_839829468)(Tcproc529021* p0);
N_NIMCALL(void, endblock_544035_839829468)(Tcproc529021* p0, Ropeobj178006* blockend0);
N_NIMCALL(Ropeobj178006*, blockbody_544025_839829468)(Tblock529019* b0);
N_NIMCALL(void, genstmtlistexpr_558402_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0);
N_NIMCALL(void, genif_544982_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0);
N_NIMCALL(void, downconv_558581_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0);
N_NIMCALL(NI, inheritancediff_326252_3876443242)(Ttype292840* a0, Ttype292840* b0);
N_NIMCALL(void, upconv_558431_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0);
N_NIMCALL(void, genrangechck_556590_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0, NimStringDesc* magic0);
N_NIMCALL(void, convstrtocstr_556642_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0);
N_NIMCALL(void, convcstrtostr_556654_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0);
N_NIMCALL(void, genclosure_557836_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0);
static N_INLINE(NIM_BOOL, isconstclosure_557810_839829468)(Tnode292802* n0);
static N_INLINE(NIM_BOOL, isroutine_297323_850551059)(Tsym292834* s0);
N_NIMCALL(void, genwhilestmt_545984_839829468)(Tcproc529021* p0, Tnode292802* t0);
static N_INLINE(Ropeobj178006*, assignlabel_544020_839829468)(Tblock529019* b0);
N_NIMCALL(NIM_BOOL, stmtscontainpragma_528083_2036603609)(Tnode292802* n0, Tspecialword275003 w0);
N_NIMCALL(void, gencomputedgoto_545744_839829468)(Tcproc529021* p0, Tnode292802* n0);
N_NIMCALL(void, genvarstmt_544854_839829468)(Tcproc529021* p0, Tnode292802* n0);
N_NIMCALL(void, gensinglevar_544276_839829468)(Tcproc529021* p0, Tnode292802* a0);
N_NIMCALL(void, gengotovar_544258_839829468)(Tcproc529021* p0, Tnode292802* value0);
N_NIMCALL(void, assignglobalvar_538819_839829468)(Tcproc529021* p0, Tsym292834* s0);
N_NIMCALL(void, varindynamiclib_538812_839829468)(Tcgen529027* m0, Tsym292834* sym0);
N_NIMCALL(void, registergcroot_543762_839829468)(Tcproc529021* p0, Tsym292834* v0);
N_NIMCALL(Ropeobj178006*, gentraverseprocforglobal_538032_839829468)(Tcgen529027* m0, Tsym292834* s0);
static N_INLINE(NIM_BOOL, isassignedimmediately_543781_839829468)(Tnode292802* n0);
N_NIMCALL(NIM_BOOL, containshiddenpointer_320120_3876443242)(Ttype292840* typ0);
static N_INLINE(void, loadinto_543928_839829468)(Tcproc529021* p0, Tnode292802* le0, Tnode292802* ri0, Tloc292816* a0);
N_NIMCALL(void, genasgncall_543695_839829468)(Tcproc529021* p0, Tnode292802* le0, Tnode292802* ri0, Tloc292816* d0);
N_NIMCALL(void, genclosurevar_544832_839829468)(Tcproc529021* p0, Tnode292802* a0);
N_NIMCALL(void, genvartuple_543794_839829468)(Tcproc529021* p0, Tnode292802* n0);
N_NIMCALL(Tnode292802*, lowertupleunpacking_433037_2218250499)(Tnode292802* n0, Tsym292834* owner0);
N_NIMCALL(void, genconststmt_544909_839829468)(Tcproc529021* p0, Tnode292802* t0);
N_NIMCALL(NIM_BOOL, containscompiletimeonly_328721_3876443242)(Ttype292840* t0);
static N_INLINE(NIM_BOOL, emitlazily_532248_839829468)(Tsym292834* s0);
N_NIMCALL(void, gencase_547826_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0);
N_NIMCALL(void, genstringcase_547416_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0);
N_NIMCALL(NI, nextpoweroftwo_101629_1009420244)(NI x0);
N_NIMCALL(void, gencasestringbranch_547100_839829468)(Tcproc529021* p0, Tnode292802* b0, Tloc292816 e0, Ropeobj178006* labl0, Ropeobj178006** branches0, NI branches0Len0);
N_NIMCALL(NI64, hashstring_528100_2036603609)(NimStringDesc* s0);
N_NIMCALL(Ropeobj178006*, gencasesecondpass_546965_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0, NI labid0, NI until0);
N_NIMCALL(void, exprblock_544103_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0);
N_NIMCALL(void, gencasegeneric_547087_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0, NimStringDesc* rangeformat0, NimStringDesc* eqformat0);
N_NIMCALL(Ropeobj178006*, genifforcaseuntil_547021_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0, NimStringDesc* rangeformat0, NimStringDesc* eqformat0, NI until0, Tloc292816 a0);
N_NIMCALL(void, gencasegenericbranch_546910_839829468)(Tcproc529021* p0, Tnode292802* b0, Tloc292816 e0, NimStringDesc* rangeformat0, NimStringDesc* eqformat0, Ropeobj178006* labl0);
N_NIMCALL(void, gengotoforcase_545673_839829468)(Tcproc529021* p0, Tnode292802* casestmt0);
N_NIMCALL(void, genordinalcase_547724_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0);
N_NIMCALL(NI, ifswitchsplitpoint_547615_839829468)(Tcproc529021* p0, Tnode292802* n0);
N_NIMCALL(NIM_BOOL, branchhastoobigrange_547575_839829468)(Tnode292802* b0);
N_NIMCALL(void, genreturnstmt_545617_839829468)(Tcproc529021* p0, Tnode292802* t0);
N_NIMCALL(void, blockleaveactions_545442_839829468)(Tcproc529021* p0, NI howmanytrys0, NI howmanyexcepts0);
static N_INLINE(Tnode292802*, pop_318246_1689653243)(Tnodeseq292796** s0);
N_NIMCALL(void, genbreakstmt_546444_839829468)(Tcproc529021* p0, Tnode292802* t0);
N_NIMCALL(void, genasgn_549239_839829468)(Tcproc529021* p0, Tnode292802* e0, NIM_BOOL fastasgn0);
N_NIMCALL(NIM_BOOL, fielddiscriminantcheckneeded_549080_839829468)(Tcproc529021* p0, Tnode292802* asgn0);
N_NIMCALL(void, asgnfielddiscriminant_549209_839829468)(Tcproc529021* p0, Tnode292802* e0);
N_NIMCALL(void, gendiscriminantcheck_549144_839829468)(Tcproc529021* p0, Tloc292816 a0, Tloc292816 tmp0, Ttype292840* objtype0, Tsym292834* field0);
N_NIMCALL(Ropeobj178006*, discriminatortabledecl_536094_839829468)(Tcgen529027* m0, Ttype292840* objtype0, Tsym292834* d0);
N_NIMCALL(void, genasmstmt_548659_839829468)(Tcproc529021* p0, Tnode292802* t0);
N_NIMCALL(Ropeobj178006*, genasmoremitstmt_548529_839829468)(Tcproc529021* p0, Tnode292802* t0, NIM_BOOL isasmstmt0);
N_NIMCALL(NimStringDesc*, resizeString)(NimStringDesc* dest0, NI addlen0);
N_NIMCALL(void, gentrycpp_547865_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0);
static N_INLINE(void, gensimpleblock_544095_839829468)(Tcproc529021* p0, Tnode292802* stmts0);
N_NIMCALL(void, gentry_548114_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0);
N_NIMCALL(NIM_BOOL, isdefined_200011_1967573533)(NimStringDesc* symbol0);
N_NIMCALL(void, line_532695_839829468)(Tcproc529021* p0, Tcprocsection529011 s0, NimStringDesc* r0);
static N_INLINE(Ropeobj178006*, pop_178530_1689653243)(TY191350** s0);
N_NIMCALL(void, genraisestmt_546828_839829468)(Tcproc529021* p0, Tnode292802* t0);
N_NIMCALL(NimStringDesc*, getraisefrmt_546824_839829468)(Tcproc529021* p0);
N_NIMCALL(void, gentypesection_538184_839829468)(Tcgen529027* m0, Tnode292802* n0);
N_NIMCALL(void, genpragma_549039_839829468)(Tcproc529021* p_549041_839829468, Tnode292802* n0);
N_NIMCALL(Tspecialword275003, whichpragma_318911_2616423590)(Tnode292802* n0);
N_NIMCALL(void, genemit_548839_839829468)(Tcproc529021* p0, Tnode292802* t0);
N_NIMCALL(Tcfilesection529005, determinesection_548819_839829468)(Tnode292802* n0);
N_NIMCALL(NIM_BOOL, nsuStartsWith)(NimStringDesc* s0, NimStringDesc* prefix0);
N_NIMCALL(void, genbreakpoint_548862_839829468)(Tcproc529021* p0, Tnode292802* t0);
N_NIMCALL(void, genwatchpoint_549016_839829468)(Tcproc529021* p0, Tnode292802* n0);
N_NIMCALL(Tsym292834*, skipgenericowner_297279_850551059)(Tsym292834* s0);
N_NIMCALL(void, genparforstmt_546208_839829468)(Tcproc529021* p0, Tnode292802* t0);
N_NIMCALL(void, genstate_544117_839829468)(Tcproc529021* p0, Tnode292802* n0);
N_NIMCALL(void, gengotostate_544144_839829468)(Tcproc529021* p0, Tnode292802* n0);
N_NIMCALL(void, genbreakstate_544229_839829468)(Tcproc529021* p0, Tnode292802* n0);
N_NIMCALL(void, registermoduletomain_562243_839829468)(Tsym292834* m0);
N_NIMCALL(Ropeobj178006*, getinitname_562235_839829468)(Tsym292834* m0);
N_NIMCALL(Ropeobj178006*, getsomeinitname_561904_839829468)(Tsym292834* m0, NimStringDesc* suffix0);
N_NIMCALL(Ropeobj178006*, getdatinitname_562239_839829468)(Tsym292834* m0);
N_NIMCALL(Tnode292802*, generatemethoddispatchers_432151_3853300031)(void);
N_NIMCALL(void, genmainproc_561729_839829468)(Tcgen529027* m0);
N_NIMCALL(Ropeobj178006*, genfilenames_561688_839829468)(Tcgen529027* m0);
N_NIMCALL(void, finishmodule_563420_839829468)(Tcgen529027* m0);
N_NIMCALL(void, updatecachedmodule_563813_839829468)(Tcgen529027* m0);
N_NIMCALL(NIM_BOOL, mergerequired_530832_2760143328)(Tcgen529027* m0);
N_NIMCALL(void, mergefiles_531241_2760143328)(NimStringDesc* cfilename0, Tcgen529027* m0);
N_NIMCALL(void, geninitcode_562286_839829468)(Tcgen529027* m0);
N_NIMCALL(Ropeobj178006*, gensectionstart_530081_2760143328)(Tcprocsection529011 ps0);
N_NIMCALL(Ropeobj178006*, gensectionend_530116_2760143328)(Tcprocsection529011 ps0);
N_NIMCALL(Ropeobj178006*, gensectionstart_530015_2760143328)(Tcfilesection529005 fs0);
N_NIMCALL(Ropeobj178006*, gensectionend_530050_2760143328)(Tcfilesection529005 fs0);
N_NIMCALL(void, finishtypedescriptions_535842_839829468)(Tcgen529027* m0);
N_NIMCALL(Ropeobj178006*, genmodule_562491_839829468)(Tcgen529027* m0, NimStringDesc* cfile0);
N_NIMCALL(Ropeobj178006*, getfileheader_561683_839829468)(NimStringDesc* cfile0);
N_NIMCALL(Ropeobj178006*, getcopyright_561665_839829468)(NimStringDesc* cfile0);
N_NIMCALL(NimStringDesc*, getcompilecfilecmd_274284_2528170400)(NimStringDesc* cfilename0, NIM_BOOL isexternal0);
static N_INLINE(void, addinttypes_561659_839829468)(Ropeobj178006** result0);
N_NIMCALL(Ropeobj178006*, genmergeinfo_530203_2760143328)(Tcgen529027* m0);
N_NIMCALL(void, generatethreadlocalstorage_538717_839829468)(Tcgen529027* m0);
N_NIMCALL(void, generateheaders_560104_839829468)(Tcgen529027* m0);
N_NIMCALL(NimStringDesc*, nsuReplaceChar)(NimStringDesc* s0, NIM_CHAR sub0, NIM_CHAR by0);
N_NIMCALL(void, writerope_178836_2381377266)(Ropeobj178006* head0, NimStringDesc* filename0, NIM_BOOL usewarning0);
N_NIMCALL(void, addfiletocompile_273863_2528170400)(NimStringDesc* filename0);
N_NIMCALL(void, addfiletolink_273872_2528170400)(NimStringDesc* filename0);
N_NIMCALL(void, writemodule_563637_839829468)(Tcgen529027* m0, NIM_BOOL pending0);
N_NIMCALL(void, generatethreadvarssize_538771_839829468)(Tcgen529027* m0);
N_NIMCALL(NIM_BOOL, shouldrecompile_563621_839829468)(Ropeobj178006* code0, NimStringDesc* cfile0);
N_NIMCALL(NimStringDesc*, toobjfile_273859_2528170400)(NimStringDesc* filename0);
N_NIMCALL(NIM_BOOL, writeropeifnotequal_179511_2381377266)(Ropeobj178006* r0, NimStringDesc* filename0);
N_NIMCALL(NIM_BOOL, nosexistsFile)(NimStringDesc* filename0);
N_NIMCALL(NIM_BOOL, nosfileNewer)(NimStringDesc* a0, NimStringDesc* b0);
N_NIMCALL(void, writemapping_274789_2528170400)(Ropeobj178006* gsymbolmapping0);
N_NIMCALL(void, writeheader_563152_839829468)(Tcgen529027* m0);
N_NIMCALL(void, nossplitFile)(NimStringDesc* path0, TY128506* Result);
N_NIMCALL(void, resetmodule_562763_839829468)(Tcgen529027* m0);
N_NIMCALL(void, nullify_562833_839829468)(Ropeobj178006** arr0);
N_NIMCALL(void, nullify_562858_839829468)(Ropeobj178006** arr0);
STRING_LITERAL(T839829468_4, "\011", 1);
STRING_LITERAL(T839829468_10, "compiler/cgen.nim", 17);
NIM_CONST TY203018 T839829468_9 = {((NimStringDesc*) &T839829468_10),
((NI) 1158)}
;
STRING_LITERAL(T839829468_11, "T", 1);
STRING_LITERAL(T839829468_12, "_", 1);
STRING_LITERAL(T839829468_13, "added pending module twice: ", 28);
STRING_LITERAL(T839829468_14, ".h", 2);
STRING_LITERAL(T839829468_15, ".cpp", 4);
STRING_LITERAL(T839829468_16, ".m", 2);
STRING_LITERAL(T839829468_17, ".c", 2);
STRING_LITERAL(T839829468_18, "0", 1);
STRING_LITERAL(T839829468_19, "$", 1);
STRING_LITERAL(T839829468_20, "ropes: invalid format string $", 30);
STRING_LITERAL(T839829468_21, "$N#line $2 $1$N", 15);
STRING_LITERAL(T839829468_22, "N_LIB_IMPORT ", 13);
STRING_LITERAL(T839829468_23, "N_LIB_EXPORT ", 13);
STRING_LITERAL(T839829468_24, "static ", 7);
STRING_LITERAL(T839829468_25, "mapType", 7);
STRING_LITERAL(T839829468_26, "void", 4);
STRING_LITERAL(T839829468_27, "getTypeDescAux: t == nil", 24);
STRING_LITERAL(T839829468_28, "TY", 2);
STRING_LITERAL(T839829468_29, "getTypeName: ", 13);
STRING_LITERAL(T839829468_30, "void*", 5);
STRING_LITERAL(T839829468_31, "NimStringDesc", 13);
STRING_LITERAL(T839829468_32, "NimStringDesc*", 14);
STRING_LITERAL(T839829468_33, "NCSTRING", 8);
STRING_LITERAL(T839829468_34, "NIM_BOOL", 8);
STRING_LITERAL(T839829468_35, "NIM_CHAR", 8);
STRING_LITERAL(T839829468_36, "NI", 2);
STRING_LITERAL(T839829468_37, "NI8", 3);
STRING_LITERAL(T839829468_38, "NI16", 4);
STRING_LITERAL(T839829468_39, "NI32", 4);
STRING_LITERAL(T839829468_40, "NI64", 4);
STRING_LITERAL(T839829468_41, "NF", 2);
STRING_LITERAL(T839829468_42, "NF32", 4);
STRING_LITERAL(T839829468_43, "NF64", 4);
STRING_LITERAL(T839829468_44, "NF128", 5);
STRING_LITERAL(T839829468_45, "NU", 2);
STRING_LITERAL(T839829468_46, "NU8", 3);
STRING_LITERAL(T839829468_47, "NU16", 4);
STRING_LITERAL(T839829468_48, "NU32", 4);
STRING_LITERAL(T839829468_49, "NU64", 4);
NIM_CONST TY533943 Numericaltypetostr_533941_839829468 = {((NimStringDesc*) &T839829468_36),
((NimStringDesc*) &T839829468_37),
((NimStringDesc*) &T839829468_38),
((NimStringDesc*) &T839829468_39),
((NimStringDesc*) &T839829468_40),
((NimStringDesc*) &T839829468_41),
((NimStringDesc*) &T839829468_42),
((NimStringDesc*) &T839829468_43),
((NimStringDesc*) &T839829468_44),
((NimStringDesc*) &T839829468_45),
((NimStringDesc*) &T839829468_46),
((NimStringDesc*) &T839829468_47),
((NimStringDesc*) &T839829468_48),
((NimStringDesc*) &T839829468_49)}
;
STRING_LITERAL(T839829468_50, "tyStatic for getSimpleTypeDesc", 30);
STRING_LITERAL(T839829468_51, "cannot generate C type for: ", 28);
STRING_LITERAL(T839829468_52, "&", 1);
STRING_LITERAL(T839829468_53, "*", 1);
STRING_LITERAL(T839829468_54, "$1 $2;$n", 8);
STRING_LITERAL(T839829468_55, "typedef $1 $2 $2;$n", 19);
STRING_LITERAL(T839829468_56, "union", 5);
STRING_LITERAL(T839829468_57, "struct", 6);
STRING_LITERAL(T839829468_58, "getTypeForward(", 15);
STRING_LITERAL(T839829468_59, "typedef NI32 $1;$n", 18);
STRING_LITERAL(T839829468_60, "typedef NU8 $1;$n", 17);
STRING_LITERAL(T839829468_61, "typedef NU16 $1;$n", 18);
STRING_LITERAL(T839829468_62, "typedef NI64 $1;$n", 18);
STRING_LITERAL(T839829468_63, "getTypeDescAux: enum", 20);
STRING_LITERAL(T839829468_64, "typedef $1_PTR($2, $3) $4;$n", 28);
STRING_LITERAL(T839829468_65, "N_NIMCALL", 9);
STRING_LITERAL(T839829468_66, "N_STDCALL", 9);
STRING_LITERAL(T839829468_67, "N_CDECL", 7);
STRING_LITERAL(T839829468_68, "N_SAFECALL", 10);
STRING_LITERAL(T839829468_69, "N_SYSCALL", 9);
STRING_LITERAL(T839829468_70, "N_INLINE", 8);
STRING_LITERAL(T839829468_71, "N_NOINLINE", 10);
STRING_LITERAL(T839829468_72, "N_FASTCALL", 10);
STRING_LITERAL(T839829468_73, "N_CLOSURE", 9);
STRING_LITERAL(T839829468_74, "N_NOCONV", 8);
NIM_CONST TY292016 Callingconvtostr_533585_839829468 = {((NimStringDesc*) &T839829468_65),
((NimStringDesc*) &T839829468_66),
((NimStringDesc*) &T839829468_67),
((NimStringDesc*) &T839829468_68),
((NimStringDesc*) &T839829468_69),
((NimStringDesc*) &T839829468_70),
((NimStringDesc*) &T839829468_71),
((NimStringDesc*) &T839829468_72),
((NimStringDesc*) &T839829468_73),
((NimStringDesc*) &T839829468_74)}
;
STRING_LITERAL(T839829468_75, "typedef struct {$nN_NIMCALL_PTR($2, ClPrc) $3;$nvoid* ClEnv;$n}"
" $1;$n", 69);
STRING_LITERAL(T839829468_76, "struct $2 : #TGenericSeq {$n", 28);
STRING_LITERAL(T839829468_77, "struct $2 {$n #TGenericSeq Sup;$n", 34);
STRING_LITERAL(T839829468_78, " $1 data[SEQ_DECL_SIZE];$n};$n", 31);
STRING_LITERAL(T839829468_79, "TGenericSeq", 11);
STRING_LITERAL(T839829468_80, "typedef $1 $2[$3];$n", 20);
STRING_LITERAL(T839829468_81, "invalid apostrophe type parameter index", 39);
STRING_LITERAL(T839829468_82, "<", 1);
STRING_LITERAL(T839829468_83, " COMMA ", 7);
STRING_LITERAL(T839829468_84, "> ", 2);
extern NIM_CONST TY273427 Cc_273413_2528170400;
STRING_LITERAL(T839829468_85, " {$n", 4);
STRING_LITERAL(T839829468_86, " {$n#TNimType* m_type;$n", 24);
STRING_LITERAL(T839829468_87, " : public $1 {$n", 16);
STRING_LITERAL(T839829468_88, " {$n $1 Sup;$n", 15);
STRING_LITERAL(T839829468_89, "genRecordFieldsAux", 18);
STRING_LITERAL(T839829468_90, "$1.$2", 5);
STRING_LITERAL(T839829468_91, "S", 1);
STRING_LITERAL(T839829468_92, "struct {", 8);
STRING_LITERAL(T839829468_93, "} $1;$n", 7);
STRING_LITERAL(T839829468_94, "genRecordFieldsAux(record case branch)", 38);
STRING_LITERAL(T839829468_95, "union{$n$1} $2;$n", 17);
STRING_LITERAL(T839829468_96, "mangleRecFieldName", 18);
STRING_LITERAL(T839829468_97, "$1 $2[SEQ_DECL_SIZE];$n", 23);
STRING_LITERAL(T839829468_98, "$1 $2:$3;$n", 11);
STRING_LITERAL(T839829468_99, "genRecordFieldsAux()", 20);
STRING_LITERAL(T839829468_100, "char dummy;$n", 13);
STRING_LITERAL(T839829468_101, "};", 2);
STRING_LITERAL(T839829468_102, "$1 $2 {$n", 9);
STRING_LITERAL(T839829468_103, "$1 Field$2;$n", 13);
STRING_LITERAL(T839829468_104, "char dummy;", 11);
STRING_LITERAL(T839829468_105, "Set", 3);
STRING_LITERAL(T839829468_106, "typedef NU$2 $1;$n", 18);
STRING_LITERAL(T839829468_107, "typedef NU8 $1[$2];$n", 21);
STRING_LITERAL(T839829468_108, "getTypeDescAux(", 15);
STRING_LITERAL(T839829468_109, "genProcParams", 13);
STRING_LITERAL(T839829468_110, ", ", 2);
STRING_LITERAL(T839829468_111, " ", 1);
STRING_LITERAL(T839829468_112, ", NI $1Len$2", 12);
STRING_LITERAL(T839829468_113, " Result", 7);
STRING_LITERAL(T839829468_114, "void* ClEnv", 11);
STRING_LITERAL(T839829468_115, "...", 3);
STRING_LITERAL(T839829468_116, "void)", 5);
STRING_LITERAL(T839829468_117, ")", 1);
STRING_LITERAL(T839829468_118, "(", 1);
STRING_LITERAL(T839829468_119, "$1($2, $3)$4", 12);
STRING_LITERAL(T839829468_120, "proc has no result symbol", 25);
STRING_LITERAL(T839829468_121, " register", 9);
STRING_LITERAL(T839829468_122, " volatile", 9);
STRING_LITERAL(T839829468_123, "$1 = $2;$n", 10);
STRING_LITERAL(T839829468_124, "(*$1)", 5);
STRING_LITERAL(T839829468_125, ";", 1);
STRING_LITERAL(T839829468_126, "FR.s[$1].address = (void*)$3; FR.s[$1].typ = $4; FR.s[$1].name "
"= $2;$n", 70);
STRING_LITERAL(T839829468_127, "NTI$1", 5);
STRING_LITERAL(T839829468_128, "(&", 2);
STRING_LITERAL(T839829468_129, "TNimType", 8);
STRING_LITERAL(T839829468_130, "TNimNode", 8);
STRING_LITERAL(T839829468_131, "extern TNimType $1; /* $2 */$n", 30);
STRING_LITERAL(T839829468_132, "0", 1);
STRING_LITERAL(T839829468_133, "void*", 5);
STRING_LITERAL(T839829468_134, "$1.size = sizeof($2);$n$1.kind = $3;$n$1.base = $4;$n", 53);
STRING_LITERAL(T839829468_135, "$1.flags = $2;$n", 16);
STRING_LITERAL(T839829468_136, "TNimType $1; /* $2 */$n", 23);
STRING_LITERAL(T839829468_137, "genTypeInfo(", 12);
STRING_LITERAL(T839829468_138, "$1[$2]", 6);
STRING_LITERAL(T839829468_139, "static TNimNode* $1[$2];$n", 26);
STRING_LITERAL(T839829468_140, "$1[$2] = &$3;$n", 15);
STRING_LITERAL(T839829468_141, "$1.kind = 1;$n$1.offset = offsetof($2, Field$3);$n$1.typ = $4;$"
"n$1.name = \"Field$3\";$n", 86);
STRING_LITERAL(T839829468_142, "$1.len = $2; $1.kind = 2; $1.sons = &$3[0];$n", 45);
STRING_LITERAL(T839829468_143, "$1.len = $2; $1.kind = 2;$n", 27);
STRING_LITERAL(T839829468_144, "$1.node = &$2;$n", 16);
STRING_LITERAL(T839829468_145, "#nimGCvisit((void*)$1, op);$n", 29);
STRING_LITERAL(T839829468_146, "N_NIMCALL(void, $1)(void* p, NI op)", 35);
STRING_LITERAL(T839829468_147, "$1 a;$n", 7);
STRING_LITERAL(T839829468_148, "a = ($1)p;$n", 12);
STRING_LITERAL(T839829468_149, "LOC", 3);
STRING_LITERAL(T839829468_150, "$1 = ($2)0;$n", 13);
STRING_LITERAL(T839829468_151, "<string.h>", 10);
STRING_LITERAL(T839829468_152, "memset((void*)$1, 0, sizeof($2));$n", 35);
STRING_LITERAL(T839829468_153, ".Sup", 4);
STRING_LITERAL(T839829468_154, "$1.m_type = $2;$n", 17);
STRING_LITERAL(T839829468_155, "#objectInit($1, $2);$n", 22);
STRING_LITERAL(T839829468_156, "for ($1 = 0; $1 < $2->$3; $1++) {$n", 35);
STRING_LITERAL(T839829468_157, "len", 3);
STRING_LITERAL(T839829468_158, "Sup.len", 7);
STRING_LITERAL(T839829468_159, "for ($1 = 0; $1 < $2; $1++) {$n", 31);
STRING_LITERAL(T839829468_160, "}$n", 3);
STRING_LITERAL(T839829468_161, "$1.Sup", 6);
STRING_LITERAL(T839829468_162, "genTraverseProc", 15);
STRING_LITERAL(T839829468_163, "switch ($1.$2) {$n", 18);
STRING_LITERAL(T839829468_164, "case $1 ... $2:$n", 17);
STRING_LITERAL(T839829468_165, "genLiteral: ty is nil", 21);
STRING_LITERAL(T839829468_166, "(-2147483647 -1)", 16);
STRING_LITERAL(T839829468_167, "IL64($1)", 8);
STRING_LITERAL(T839829468_168, "(IL64(-9223372036854775807) - IL64(1))", 38);
STRING_LITERAL(T839829468_169, "NIM_TRUE", 8);
STRING_LITERAL(T839829468_170, "NIM_FALSE", 9);
STRING_LITERAL(T839829468_171, "ULL", 3);
STRING_LITERAL(T839829468_172, "(($1) $2)", 9);
STRING_LITERAL(T839829468_173, "static NIM_CONST $1 $2 = {NIM_NIL,NIM_NIL};$n", 45);
STRING_LITERAL(T839829468_174, "NIM_NIL", 7);
STRING_LITERAL(T839829468_175, "((#NimStringDesc*) NIM_NIL)", 27);
STRING_LITERAL(T839829468_176, "((#NimStringDesc*) &$1)", 23);
STRING_LITERAL(T839829468_177, "STRING_LITERAL($1, $2, $3);$n", 29);
STRING_LITERAL(T839829468_178, "((#NimStringDesc*) &$1$2)", 25);
STRING_LITERAL(T839829468_179, "genLiteral(", 11);
STRING_LITERAL(T839829468_180, "case $1:$n", 10);
STRING_LITERAL(T839829468_181, "default:$n", 10);
STRING_LITERAL(T839829468_182, "break;$n", 8);
STRING_LITERAL(T839829468_183, "} $n", 4);
STRING_LITERAL(T839829468_184, "genTraverseProc()", 17);
STRING_LITERAL(T839829468_185, "$1.Field$2", 10);
STRING_LITERAL(T839829468_186, "$1.ClEnv", 8);
STRING_LITERAL(T839829468_187, "$1->data[$2]", 12);
STRING_LITERAL(T839829468_188, "a", 1);
STRING_LITERAL(T839829468_189, "(*a)", 4);
STRING_LITERAL(T839829468_190, "$1 {$n$2$3$4}$n", 15);
STRING_LITERAL(T839829468_191, "$1;$n", 5);
STRING_LITERAL(T839829468_192, "$1.marker = $2;$n", 17);
STRING_LITERAL(T839829468_193, "$1.len = $2; $1.kind = 0;$n$3.node = &$1;$n", 43);
STRING_LITERAL(T839829468_194, "$1.offset = $2;$n", 17);
STRING_LITERAL(T839829468_195, "NI $1;$n", 8);
STRING_LITERAL(T839829468_196, "static char* NIM_CONST $1[$2] = {$n$3};$n", 41);
STRING_LITERAL(T839829468_197, "for ($1 = 0; $1 < $2; $1++) {$n$3[$1+$4].kind = 1;$n$3[$1+$4].o"
"ffset = $1;$n$3[$1+$4].name = $5[$1];$n$6[$1] = &$3[$1+$4];$n}$n", 127);
STRING_LITERAL(T839829468_198, "$1.len = $2; $1.kind = 2; $1.sons = &$3[0];$n$4.node = &$1;$n", 61);
STRING_LITERAL(T839829468_199, "$1.flags = 1<<2;$n", 18);
STRING_LITERAL(T839829468_200, "anonymous obj with discriminator", 32);
STRING_LITERAL(T839829468_201, "NimDT_$1_$2", 11);
STRING_LITERAL(T839829468_202, "$1.kind = 3;$n$1.offset = offsetof($2, $3);$n$1.typ = $4;$n$1.n"
"ame = $5;$n$1.sons = &$6[0];$n$1.len = $7;$n", 107);
STRING_LITERAL(T839829468_203, "TNimNode* $1[$2];$n", 19);
STRING_LITERAL(T839829468_204, "genObjectFields; nkOfBranch broken", 34);
STRING_LITERAL(T839829468_205, "genObjectFields(nkRecCase)", 26);
STRING_LITERAL(T839829468_206, "$1.kind = 1;$n$1.offset = offsetof($2, $3);$n$1.typ = $4;$n$1.n"
"ame = $5;$n", 74);
STRING_LITERAL(T839829468_207, "genObjectFields", 15);
STRING_LITERAL(T839829468_208, "$1.deepcopy =(void* (N_RAW_NIMCALL*)(void*))$2;$n", 49);
STRING_LITERAL(T839829468_209, "\011return $1;$n", 13);
STRING_LITERAL(T839829468_210, "Result", 6);
STRING_LITERAL(T839829468_211, "closure generation failed", 25);
STRING_LITERAL(T839829468_212, "$1 = ($2) ClEnv;$n", 18);
STRING_LITERAL(T839829468_213, "__declspec(noreturn) ", 21);
STRING_LITERAL(T839829468_214, "__declspec(naked) ", 18);
STRING_LITERAL(T839829468_215, "$N$1 {$n$2$3$4}$N$N", 19);
STRING_LITERAL(T839829468_216, "$N$1 {$N", 8);
STRING_LITERAL(T839829468_217, "struct {$1} GCFRAME;$n", 22);
STRING_LITERAL(T839829468_218, "nimFrame", 8);
STRING_LITERAL(T839829468_219, "VarSlot", 7);
STRING_LITERAL(T839829468_220, "\011nimfrs($1, $2, $3, $4)$N", 25);
STRING_LITERAL(T839829468_221, "\011nimfr($1, $2)$N", 16);
STRING_LITERAL(T839829468_222, "\011#nimProfile();$n", 17);
STRING_LITERAL(T839829468_223, "{", 1);
STRING_LITERAL(T839829468_224, "\011}BeforeRet: ;$n", 16);
STRING_LITERAL(T839829468_225, "if (((NU)&GCFRAME) < 4096) #nimGCFrame(&GCFRAME);$n", 51);
STRING_LITERAL(T839829468_226, "\011#popFrame();$n", 15);
STRING_LITERAL(T839829468_227, "}$N", 3);
STRING_LITERAL(T839829468_228, "static void* $1;$n", 18);
STRING_LITERAL(T839829468_229, "||", 2);
STRING_LITERAL(T839829468_230, "($1 = #nimLoadLibrary((#NimStringDesc*) &$2))$n", 47);
STRING_LITERAL(T839829468_231, "if (!($1)) #nimLoadLibraryError((#NimStringDesc*) &$2);$n", 57);
STRING_LITERAL(T839829468_232, "if (!($1 = #nimLoadLibrary($2))) #nimLoadLibraryError($2);$n", 60);
STRING_LITERAL(T839829468_233, "loadDynamicLib", 14);
STRING_LITERAL(T839829468_234, "Dl_$1", 5);
STRING_LITERAL(T839829468_235, "\011$1 = ($2) ($3$4));$n", 21);
NIM_CONST TY203018 T839829468_236 = {((NimStringDesc*) &T839829468_10),
((NI) 535)}
;
STRING_LITERAL(T839829468_237, "wrong index: ", 13);
STRING_LITERAL(T839829468_238, "\011$1 = ($2) #nimGetProcAddr($3, $4);$n", 37);
STRING_LITERAL(T839829468_239, "$2 $1;$n", 8);
STRING_LITERAL(T839829468_240, "extern ", 7);
STRING_LITERAL(T839829468_241, "NIM_THREADVAR ", 14);
STRING_LITERAL(T839829468_242, " $1;$n", 6);
STRING_LITERAL(T839829468_243, "cgsym: ", 7);
STRING_LITERAL(T839829468_244, ": ", 2);
STRING_LITERAL(T839829468_245, "extern $1 $2;$n", 15);
STRING_LITERAL(T839829468_246, "extern \"C\" ", 11);
STRING_LITERAL(T839829468_247, " __attribute__((naked))", 23);
STRING_LITERAL(T839829468_248, " __attribute__((noreturn))", 26);
STRING_LITERAL(T839829468_249, "#asgnRef((void**) $1, $2);$n", 28);
STRING_LITERAL(T839829468_250, "#asgnRefNoCycle((void**) $1, $2);$n", 35);
STRING_LITERAL(T839829468_251, "#unsureAsgnRef((void**) $1, $2);$n", 34);
STRING_LITERAL(T839829468_252, "#genericSeqAssign($1, $2, $3);$n", 32);
STRING_LITERAL(T839829468_253, "$1 = #copyString($2);$n", 23);
STRING_LITERAL(T839829468_254, "$3 = $1; $1 = #copyStringRC1($2);$n", 35);
STRING_LITERAL(T839829468_255, "if ($1) #nimGCunrefNoCycle($1);$n", 33);
STRING_LITERAL(T839829468_256, "#unsureAsgnRef((void**) $1, #copyString($2));$n", 47);
STRING_LITERAL(T839829468_257, ".", 1);
STRING_LITERAL(T839829468_258, "ClEnv", 5);
STRING_LITERAL(T839829468_259, "$1.ClPrc = $2.ClPrc;$n", 22);
STRING_LITERAL(T839829468_260, "Field$1", 7);
STRING_LITERAL(T839829468_261, "memcpy((void*)$1, (NIM_CONST void*)$2, sizeof($3));$n", 53);
STRING_LITERAL(T839829468_262, "#genericShallowAssign((void*)$1, (void*)$2, $3);$n", 50);
STRING_LITERAL(T839829468_263, "#genericAssign((void*)$1, (void*)$2, $3);$n", 43);
STRING_LITERAL(T839829468_265, "compiler/ccgexprs.nim", 21);
NIM_CONST TY203018 T839829468_264 = {((NimStringDesc*) &T839829468_265),
((NI) 320)}
;
STRING_LITERAL(T839829468_266, "#genericAssignOpenArray((void*)$1, (void*)$2, $1Len0, $3);$n", 60);
STRING_LITERAL(T839829468_267, "memcpy((void*)$1, (NIM_CONST void*)$2, sizeof($1[0])*$1Len0);$n", 63);
STRING_LITERAL(T839829468_268, "memcpy((void*)$1, (NIM_CONST void*)$2, $3);$n", 45);
STRING_LITERAL(T839829468_269, "genAssignment: ", 15);
STRING_LITERAL(T839829468_270, "request to generate code for .compileTime proc: ", 48);
STRING_LITERAL(T839829468_271, "expr: proc not init ", 20);
STRING_LITERAL(T839829468_272, "NIM_CONST $1 $2 = $3;$n", 23);
STRING_LITERAL(T839829468_273, "{$n", 3);
STRING_LITERAL(T839829468_274, "0x$1,$n", 7);
STRING_LITERAL(T839829468_275, "0x$1, ", 6);
STRING_LITERAL(T839829468_276, "0x$1}$n", 7);
STRING_LITERAL(T839829468_277, "{{$1, $1}", 9);
STRING_LITERAL(T839829468_278, ", {", 3);
STRING_LITERAL(T839829468_279, ",$n", 3);
STRING_LITERAL(T839829468_280, "}", 1);
STRING_LITERAL(T839829468_281, "NIM_CONST struct {$n #TGenericSeq Sup;$n $1 data[$2];$n} $3 ="
" $4;$n", 69);
STRING_LITERAL(T839829468_282, "(($1)&$2)", 9);
STRING_LITERAL(T839829468_283, "$1,$n", 5);
STRING_LITERAL(T839829468_284, "extern NIM_CONST $1 $2;$n", 25);
STRING_LITERAL(T839829468_285, "expr: var not init ", 19);
STRING_LITERAL(T839829468_286, "\011NimThreadVars* NimTV;$n", 24);
STRING_LITERAL(T839829468_287, "\011NimTV = (NimThreadVars*) #GetThreadLocalVars();$n", 50);
STRING_LITERAL(T839829468_288, "NimTV->", 7);
STRING_LITERAL(T839829468_289, "expr: temp not init ", 20);
STRING_LITERAL(T839829468_290, "expr: param not init ", 21);
STRING_LITERAL(T839829468_291, "expr(", 5);
STRING_LITERAL(T839829468_292, "); unknown symbol", 17);
STRING_LITERAL(T839829468_293, "//", 2);
STRING_LITERAL(T839829468_294, "#endb($1, $2);$n", 16);
STRING_LITERAL(T839829468_295, "nimln($1, $2);$n", 16);
STRING_LITERAL(T839829468_296, "LA", 2);
STRING_LITERAL(T839829468_297, "if ($1) goto $2;$n", 18);
STRING_LITERAL(T839829468_298, "if (!($1)) goto $2;$n", 21);
STRING_LITERAL(T839829468_299, "$1: ;$n", 7);
STRING_LITERAL(T839829468_300, "!($1)", 5);
STRING_LITERAL(T839829468_301, "$1", 2);
STRING_LITERAL(T839829468_302, "($3)((NU$2) ~($1))", 18);
STRING_LITERAL(T839829468_303, "-($1)", 5);
STRING_LITERAL(T839829468_304, "($1 > 0? ($1) : -($1))", 22);
STRING_LITERAL(T839829468_305, "(($3)(NU)(NU8)($1))", 19);
STRING_LITERAL(T839829468_306, "(($3)(NU64)(NU8)($1))", 21);
STRING_LITERAL(T839829468_307, "(($3)(NU)(NU16)($1))", 20);
STRING_LITERAL(T839829468_308, "(($3)(NU64)(NU16)($1))", 22);
STRING_LITERAL(T839829468_309, "(($3)(NU64)(NU32)($1))", 22);
STRING_LITERAL(T839829468_310, "(($3)(NU64)(NU)($1))", 20);
STRING_LITERAL(T839829468_311, "(($3)(NU8)(NU)($1))", 19);
STRING_LITERAL(T839829468_312, "(($3)(NU16)(NU)($1))", 20);
STRING_LITERAL(T839829468_313, "(($3)(NU32)(NU64)($1))", 22);
STRING_LITERAL(T839829468_314, "((double) ($1))", 15);
STRING_LITERAL(T839829468_315, "float64ToInt32($1)", 18);
STRING_LITERAL(T839829468_316, "float64ToInt64($1)", 18);
NIM_CONST TY552655 unarithtab_552653_839829468 = {((NimStringDesc*) &T839829468_300),
((NimStringDesc*) &T839829468_301),
((NimStringDesc*) &T839829468_302),
((NimStringDesc*) &T839829468_301),
((NimStringDesc*) &T839829468_303),
((NimStringDesc*) &T839829468_304),
((NimStringDesc*) &T839829468_305),
((NimStringDesc*) &T839829468_306),
((NimStringDesc*) &T839829468_307),
((NimStringDesc*) &T839829468_308),
((NimStringDesc*) &T839829468_309),
((NimStringDesc*) &T839829468_310),
((NimStringDesc*) &T839829468_311),
((NimStringDesc*) &T839829468_312),
((NimStringDesc*) &T839829468_313),
((NimStringDesc*) &T839829468_314),
((NimStringDesc*) &T839829468_314),
((NimStringDesc*) &T839829468_315),
((NimStringDesc*) &T839829468_316)}
;
STRING_LITERAL(T839829468_317, "if ($1 == $2) #raiseOverflow();$n", 33);
STRING_LITERAL(T839829468_318, "((NI$2)-($1))", 13);
NIM_CONST TY551642 opr_551640_839829468 = {((NimStringDesc*) &T839829468_318),
((NimStringDesc*) &T839829468_303),
((NimStringDesc*) &T839829468_304)}
;
STRING_LITERAL(T839829468_319, "(($4)($2) $1 ($4)($3))", 22);
STRING_LITERAL(T839829468_320, "+", 1);
STRING_LITERAL(T839829468_321, "-", 1);
STRING_LITERAL(T839829468_322, "/", 1);
NIM_CONST TY556764 opr_556762_839829468 = {((NimStringDesc*) &T839829468_320),
((NimStringDesc*) &T839829468_321),
((NimStringDesc*) &T839829468_53),
((NimStringDesc*) &T839829468_322)}
;
STRING_LITERAL(T839829468_323, "#nanCheck($1);$n", 16);
STRING_LITERAL(T839829468_324, "#infCheck($1);$n", 16);
STRING_LITERAL(T839829468_325, "(($4)($1) + ($4)($2))", 21);
STRING_LITERAL(T839829468_326, "(($4)($1) - ($4)($2))", 21);
STRING_LITERAL(T839829468_327, "(($4)($1) * ($4)($2))", 21);
STRING_LITERAL(T839829468_328, "(($4)($1) / ($4)($2))", 21);
STRING_LITERAL(T839829468_329, "($4)((NU$3)($1) >> (NU$3)($2))", 30);
STRING_LITERAL(T839829468_330, "($4)((NU$3)($1) << (NU$3)($2))", 30);
STRING_LITERAL(T839829468_331, "($4)($1 & $2)", 13);
STRING_LITERAL(T839829468_332, "($4)($1 | $2)", 13);
STRING_LITERAL(T839829468_333, "($4)($1 ^ $2)", 13);
STRING_LITERAL(T839829468_334, "(($1 <= $2) ? $1 : $2)", 22);
STRING_LITERAL(T839829468_335, "(($1 >= $2) ? $1 : $2)", 22);
STRING_LITERAL(T839829468_336, "($4)((NU$3)($1) + (NU$3)($2))", 29);
STRING_LITERAL(T839829468_337, "($4)((NU$3)($1) - (NU$3)($2))", 29);
STRING_LITERAL(T839829468_338, "($4)((NU$3)($1) * (NU$3)($2))", 29);
STRING_LITERAL(T839829468_339, "($4)((NU$3)($1) / (NU$3)($2))", 29);
STRING_LITERAL(T839829468_340, "($4)((NU$3)($1) % (NU$3)($2))", 29);
STRING_LITERAL(T839829468_341, "($1 == $2)", 10);
STRING_LITERAL(T839829468_342, "($1 <= $2)", 10);
STRING_LITERAL(T839829468_343, "($1 < $2)", 9);
STRING_LITERAL(T839829468_344, "((NU$3)($1) <= (NU$3)($2))", 26);
STRING_LITERAL(T839829468_345, "((NU$3)($1) < (NU$3)($2))", 25);
STRING_LITERAL(T839829468_346, "((NU64)($1) <= (NU64)($2))", 26);
STRING_LITERAL(T839829468_347, "((NU64)($1) < (NU64)($2))", 25);
STRING_LITERAL(T839829468_348, "((NU8)($1) == (NU8)($2))", 24);
STRING_LITERAL(T839829468_349, "((NU8)($1) <= (NU8)($2))", 24);
STRING_LITERAL(T839829468_350, "((NU8)($1) < (NU8)($2))", 23);
STRING_LITERAL(T839829468_351, "($1 != $2)", 10);
NIM_CONST TY551828 binarithtab_551826_839829468 = {((NimStringDesc*) &T839829468_325),
((NimStringDesc*) &T839829468_326),
((NimStringDesc*) &T839829468_327),
((NimStringDesc*) &T839829468_328),
((NimStringDesc*) &T839829468_329),
((NimStringDesc*) &T839829468_330),
((NimStringDesc*) &T839829468_331),
((NimStringDesc*) &T839829468_332),
((NimStringDesc*) &T839829468_333),
((NimStringDesc*) &T839829468_334),
((NimStringDesc*) &T839829468_335),
((NimStringDesc*) &T839829468_334),
((NimStringDesc*) &T839829468_335),
((NimStringDesc*) &T839829468_336),
((NimStringDesc*) &T839829468_337),
((NimStringDesc*) &T839829468_338),
((NimStringDesc*) &T839829468_339),
((NimStringDesc*) &T839829468_340),
((NimStringDesc*) &T839829468_341),
((NimStringDesc*) &T839829468_342),
((NimStringDesc*) &T839829468_343),
((NimStringDesc*) &T839829468_341),
((NimStringDesc*) &T839829468_342),
((NimStringDesc*) &T839829468_343),
((NimStringDesc*) &T839829468_344),
((NimStringDesc*) &T839829468_345),
((NimStringDesc*) &T839829468_346),
((NimStringDesc*) &T839829468_347),
((NimStringDesc*) &T839829468_341),
((NimStringDesc*) &T839829468_342),
((NimStringDesc*) &T839829468_343),
((NimStringDesc*) &T839829468_348),
((NimStringDesc*) &T839829468_349),
((NimStringDesc*) &T839829468_350),
((NimStringDesc*) &T839829468_341),
((NimStringDesc*) &T839829468_342),
((NimStringDesc*) &T839829468_343),
((NimStringDesc*) &T839829468_341),
((NimStringDesc*) &T839829468_341),
((NimStringDesc*) &T839829468_342),
((NimStringDesc*) &T839829468_343),
((NimStringDesc*) &T839829468_351)}
;
STRING_LITERAL(T839829468_352, "($1.ClPrc == $2.ClPrc && $1.ClEnv == $2.ClEnv)", 46);
STRING_LITERAL(T839829468_353, "($#)($# + $#)", 13);
STRING_LITERAL(T839829468_354, "($#)($# - $#)", 13);
STRING_LITERAL(T839829468_355, "($#)($# * $#)", 13);
STRING_LITERAL(T839829468_356, "($#)($# / $#)", 13);
STRING_LITERAL(T839829468_357, "($#)($# % $#)", 13);
NIM_CONST TY551281 opr_551279_839829468 = {((NimStringDesc*) &T839829468_353),
((NimStringDesc*) &T839829468_354),
((NimStringDesc*) &T839829468_355),
((NimStringDesc*) &T839829468_356),
((NimStringDesc*) &T839829468_357),
((NimStringDesc*) &T839829468_353),
((NimStringDesc*) &T839829468_354)}
;
STRING_LITERAL(T839829468_358, "((NU8)($1))", 11);
STRING_LITERAL(T839829468_359, "if ($1 < $2 || $1 > $3) #raiseOverflow();$n", 43);
STRING_LITERAL(T839829468_360, "$# = #addInt64($#, $#);$n", 25);
STRING_LITERAL(T839829468_361, "$# = #subInt64($#, $#);$n", 25);
STRING_LITERAL(T839829468_362, "$# = #mulInt64($#, $#);$n", 25);
STRING_LITERAL(T839829468_363, "$# = #divInt64($#, $#);$n", 25);
STRING_LITERAL(T839829468_364, "$# = #modInt64($#, $#);$n", 25);
NIM_CONST TY551281 prc64_551274_839829468 = {((NimStringDesc*) &T839829468_360),
((NimStringDesc*) &T839829468_361),
((NimStringDesc*) &T839829468_362),
((NimStringDesc*) &T839829468_363),
((NimStringDesc*) &T839829468_364),
((NimStringDesc*) &T839829468_360),
((NimStringDesc*) &T839829468_361)}
;
STRING_LITERAL(T839829468_365, "$# = #addInt($#, $#);$n", 23);
STRING_LITERAL(T839829468_366, "$# = #subInt($#, $#);$n", 23);
STRING_LITERAL(T839829468_367, "$# = #mulInt($#, $#);$n", 23);
STRING_LITERAL(T839829468_368, "$# = #divInt($#, $#);$n", 23);
STRING_LITERAL(T839829468_369, "$# = #modInt($#, $#);$n", 23);
NIM_CONST TY551281 prc_551269_839829468 = {((NimStringDesc*) &T839829468_365),
((NimStringDesc*) &T839829468_366),
((NimStringDesc*) &T839829468_367),
((NimStringDesc*) &T839829468_368),
((NimStringDesc*) &T839829468_369),
((NimStringDesc*) &T839829468_365),
((NimStringDesc*) &T839829468_366)}
;
STRING_LITERAL(T839829468_370, "($#)($#)", 8);
STRING_LITERAL(T839829468_371, "#reprInt((NI64)$1)", 18);
STRING_LITERAL(T839829468_372, "#reprFloat($1)", 14);
STRING_LITERAL(T839829468_373, "#reprBool($1)", 13);
STRING_LITERAL(T839829468_374, "#reprChar($1)", 13);
STRING_LITERAL(T839829468_375, "#reprEnum((NI)$1, $2)", 21);
STRING_LITERAL(T839829468_376, "#reprStr($1)", 12);
STRING_LITERAL(T839829468_377, "#reprSet($1, $2)", 16);
STRING_LITERAL(T839829468_378, "$1, $1Len0", 10);
STRING_LITERAL(T839829468_379, "$1->data, $1->$2", 16);
STRING_LITERAL(T839829468_380, "$1, $2", 6);
STRING_LITERAL(T839829468_381, "genRepr()", 9);
STRING_LITERAL(T839829468_382, "#reprOpenArray($1, $2)", 22);
STRING_LITERAL(T839829468_383, "#reprAny($1, $2)", 16);
STRING_LITERAL(T839829468_384, "\'repr\' doesn\'t support \'void\' type", 34);
STRING_LITERAL(T839829468_385, "($1 - 1)", 8);
STRING_LITERAL(T839829468_386, "#subInt($1, 1)", 14);
STRING_LITERAL(T839829468_387, "binaryStmt", 10);
STRING_LITERAL(T839829468_388, "$1 += $2;$n", 11);
STRING_LITERAL(T839829468_389, "$1 -= $2;$n", 11);
NIM_CONST TY557052 opr_557050_839829468 = {((NimStringDesc*) &T839829468_388),
((NimStringDesc*) &T839829468_389)}
;
NIM_CONST TY557052 fun64_557055_839829468 = {((NimStringDesc*) &T839829468_360),
((NimStringDesc*) &T839829468_361)}
;
NIM_CONST TY557052 fun_557060_839829468 = {((NimStringDesc*) &T839829468_365),
((NimStringDesc*) &T839829468_366)}
;
STRING_LITERAL(T839829468_390, "#appendChar($1, $2);$n", 22);
STRING_LITERAL(T839829468_391, "$1->$2 + ", 9);
STRING_LITERAL(T839829468_392, "#appendString($1, $2);$n", 24);
STRING_LITERAL(T839829468_393, "$1 = #rawNewString($2$3);$n", 27);
STRING_LITERAL(T839829468_394, "$1 = #addChar($1, $2);$n", 24);
STRING_LITERAL(T839829468_395, "$1 = #resizeString($1, $2$3);$n", 31);
STRING_LITERAL(T839829468_396, "$1 = ($2) #incrSeqV2(&($1)->Sup, sizeof($3));$n", 47);
STRING_LITERAL(T839829468_397, "$1 = ($2) #incrSeqV2($1, sizeof($3));$n", 39);
STRING_LITERAL(T839829468_398, "$1->data[$1->$2]", 16);
STRING_LITERAL(T839829468_399, "++$1->$2;$n", 11);
STRING_LITERAL(T839829468_400, "(($1) && ($1)->$2 == 0)", 23);
STRING_LITERAL(T839829468_401, "#eqStrings($1, $2)", 18);
STRING_LITERAL(T839829468_402, "(#cmpStrings($1, $2) <= 0)", 26);
STRING_LITERAL(T839829468_403, "(#cmpStrings($1, $2) < 0)", 25);
STRING_LITERAL(T839829468_404, "$1.ClPrc == 0", 13);
STRING_LITERAL(T839829468_405, "$1 == 0", 7);
STRING_LITERAL(T839829468_406, "#nimIntToStr($1)", 16);
STRING_LITERAL(T839829468_407, "#nimInt64ToStr($1)", 18);
STRING_LITERAL(T839829468_408, "#nimBoolToStr($1)", 17);
STRING_LITERAL(T839829468_409, "#nimCharToStr($1)", 17);
STRING_LITERAL(T839829468_410, "#nimFloatToStr($1)", 18);
STRING_LITERAL(T839829468_411, "#cstrToNimstr($1)", 17);
STRING_LITERAL(T839829468_412, "no \'of\' operator available for pure objects", 43);
STRING_LITERAL(T839829468_413, "(($1) && ($2))", 14);
STRING_LITERAL(T839829468_414, "$1.m_type == $2", 15);
STRING_LITERAL(T839829468_415, "Nim_OfCheck_CACHE", 17);
STRING_LITERAL(T839829468_416, "static TNimType* $#[2];$n", 25);
STRING_LITERAL(T839829468_417, "#isObjWithCache($#.m_type, $#, $#)", 34);
STRING_LITERAL(T839829468_418, "($1)", 4);
STRING_LITERAL(T839829468_419, "sizeof($1)", 10);
STRING_LITERAL(T839829468_420, "if ($1) #nimGCunref($1);$n", 26);
STRING_LITERAL(T839829468_421, "($1) #newObjRC1($2, $3)", 23);
STRING_LITERAL(T839829468_422, "($1) #newObj($2, $3)", 20);
STRING_LITERAL(T839829468_423, "$1->finalizer = (void*)$2;$n", 28);
STRING_LITERAL(T839829468_424, "($1) #newObj($2, sizeof($3))", 28);
STRING_LITERAL(T839829468_425, "($1) #newSeqRC1($2, $3)", 23);
STRING_LITERAL(T839829468_426, "($1) #newSeq($2, $3)", 20);
STRING_LITERAL(T839829468_427, "($1)#nimNewSeqOfCap($2, $3)", 27);
STRING_LITERAL(T839829468_428, "((NI)sizeof($1))", 16);
STRING_LITERAL(T839829468_429, "(*($1*) ($2))", 13);
STRING_LITERAL(T839829468_430, "(($1) ($2))", 11);
STRING_LITERAL(T839829468_431, "($1Len0-1)", 10);
STRING_LITERAL(T839829468_432, "$1Len0", 6);
STRING_LITERAL(T839829468_433, "($1 ? (strlen($1)-1) : -1)", 26);
STRING_LITERAL(T839829468_434, "($1 ? strlen($1) : 0)", 21);
STRING_LITERAL(T839829468_435, "($1 ? ($1->Sup.len-1) : -1)", 27);
STRING_LITERAL(T839829468_436, "($1 ? $1->Sup.len : 0)", 22);
STRING_LITERAL(T839829468_437, "($1 ? ($1->len-1) : -1)", 23);
STRING_LITERAL(T839829468_438, "($1 ? $1->len : 0)", 18);
STRING_LITERAL(T839829468_439, "genArrayLen()", 13);
STRING_LITERAL(T839829468_440, "($1->Sup.len)", 13);
STRING_LITERAL(T839829468_441, "$1->len", 7);
STRING_LITERAL(T839829468_442, "unaryStmt", 9);
STRING_LITERAL(T839829468_443, "#nimGCref($1);$n", 16);
STRING_LITERAL(T839829468_444, "#nimGCunref($1);$n", 18);
STRING_LITERAL(T839829468_445, "$1 = #setLengthStr($1, $2);$n", 29);
STRING_LITERAL(T839829468_446, "$1 = ($3) #setLengthSeq(&($1)->Sup, sizeof($4), $2);$n", 54);
STRING_LITERAL(T839829468_447, "$1 = ($3) #setLengthSeq($1, sizeof($4), $2);$n", 46);
STRING_LITERAL(T839829468_448, "($1- $2)", 8);
STRING_LITERAL(T839829468_449, "$1 |= ((", 8);
STRING_LITERAL(T839829468_450, ")1)<<(($2)%(sizeof(", 19);
STRING_LITERAL(T839829468_451, ")*8));$n", 8);
STRING_LITERAL(T839829468_452, "$1 &= ~(((", 10);
STRING_LITERAL(T839829468_453, ")1) << (($2) % (sizeof(", 23);
STRING_LITERAL(T839829468_454, ")*8)));$n", 9);
STRING_LITERAL(T839829468_455, "#countBits32($1)", 16);
STRING_LITERAL(T839829468_456, "#countBits64($1)", 16);
STRING_LITERAL(T839829468_457, "(($1 & ~ $2 ==0)&&($1 != $2))", 29);
STRING_LITERAL(T839829468_458, "(($1 & ~ $2)==0)", 16);
STRING_LITERAL(T839829468_459, "($1 & $2)", 9);
STRING_LITERAL(T839829468_460, "($1 | $2)", 9);
STRING_LITERAL(T839829468_461, "($1 & ~ $2)", 11);
STRING_LITERAL(T839829468_462, "($1 ^ $2)", 9);
STRING_LITERAL(T839829468_463, "fewCmps", 7);
STRING_LITERAL(T839829468_464, "$1 >= $2 && $1 <= $3", 20);
STRING_LITERAL(T839829468_465, "$1 == $2", 8);
STRING_LITERAL(T839829468_466, " || ", 4);
STRING_LITERAL(T839829468_467, "(($1 &(1U<<((NU)($2)&7U)))!=0)", 30);
STRING_LITERAL(T839829468_468, "(($1 &(1U<<((NU)($2)&15U)))!=0)", 31);
STRING_LITERAL(T839829468_469, "(($1 &(1U<<((NU)($2)&31U)))!=0)", 31);
STRING_LITERAL(T839829468_470, "(($1 &((NU64)1<<((NU)($2)&63U)))!=0)", 36);
STRING_LITERAL(T839829468_471, "(($1[(NU)($2)>>3] &(1U<<((NU)($2)&7U)))!=0)", 43);
STRING_LITERAL(T839829468_472, "genSetOp()", 10);
STRING_LITERAL(T839829468_473, "$1[(NU)($2)>>3] |=(1U<<($2&7U));$n", 34);
STRING_LITERAL(T839829468_474, "$1[(NU)($2)>>3] &= ~(1U<<($2&7U));$n", 36);
STRING_LITERAL(T839829468_475, "#cardSet($1, ", 13);
STRING_LITERAL(T839829468_476, "for ($1 = 0; $1 < $2; $1++) { $n $3 = (($4[$1] & ~ $5[$1]) == "
"0);$n if (!$3) break;}$n", 88);
STRING_LITERAL(T839829468_477, "for ($1 = 0; $1 < $2; $1++) { $n $3 = (($4[$1] & ~ $5[$1]) == "
"0);$n if (!$3) break;}$nif ($3) $3 = (memcmp($4, $5, $2) != 0);"
"$n", 129);
STRING_LITERAL(T839829468_478, "|", 1);
STRING_LITERAL(T839829468_479, "& ~", 3);
STRING_LITERAL(T839829468_480, "^", 1);
NIM_CONST TY556428 lookupopr_556426_839829468 = {((NimStringDesc*) &T839829468_476),
((NimStringDesc*) &T839829468_477),
((NimStringDesc*) &T839829468_52),
((NimStringDesc*) &T839829468_478),
((NimStringDesc*) &T839829468_479),
((NimStringDesc*) &T839829468_480)}
;
STRING_LITERAL(T839829468_481, "(memcmp($1, $2, ", 16);
STRING_LITERAL(T839829468_482, ")==0)", 5);
STRING_LITERAL(T839829468_483, "for ($1 = 0; $1 < $2; $1++) $n $3[$1] = $4[$1] $6 $5[$1];$n", 60);
STRING_LITERAL(T839829468_484, "genSetOp", 8);
STRING_LITERAL(T839829468_485, "$1->data", 8);
STRING_LITERAL(T839829468_486, "($1)+($2), ($3)-($2)+1", 22);
STRING_LITERAL(T839829468_487, "(*$1)->data+($2), ($3)-($2)+1", 29);
STRING_LITERAL(T839829468_488, "$1->data+($2), ($3)-($2)+1", 26);
STRING_LITERAL(T839829468_489, "openArrayLoc: ", 14);
STRING_LITERAL(T839829468_490, "", 0);
STRING_LITERAL(T839829468_491, "(*$1)->data, (*$1)->$2", 22);
STRING_LITERAL(T839829468_492, "$1.ClPrc($3$1.ClEnv)", 20);
STRING_LITERAL(T839829468_493, "$1.ClEnv? $1.ClPrc($3$1.ClEnv):(($4)($1.ClPrc))($2)", 51);
STRING_LITERAL(T839829468_494, "$1 = 0;$n", 9);
STRING_LITERAL(T839829468_495, "#chckNil((void*)$1);$n", 22);
STRING_LITERAL(T839829468_496, "#genericReset((void*)$1, $2);$n", 31);
STRING_LITERAL(T839829468_497, ";$n", 3);
STRING_LITERAL(T839829468_499, "compiler/ccgcalls.nim", 21);
NIM_CONST TY203018 T839829468_498 = {((NimStringDesc*) &T839829468_499),
((NI) 423)}
;
static NIM_CONST char136Set T839829468_500 = {
0x00, 0x00, 0x00, 0x00, 0x88, 0x01, 0x00, 0x00,
0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
;
STRING_LITERAL(T839829468_501, "wrong argument count", 20);
STRING_LITERAL(T839829468_502, "call expression expected for C++ pattern", 40);
NIM_CONST TY203018 T839829468_503 = {((NimStringDesc*) &T839829468_499),
((NI) 328)}
;
STRING_LITERAL(T839829468_504, "->", 2);
STRING_LITERAL(T839829468_505, ");$n", 4);
STRING_LITERAL(T839829468_506, "[", 1);
NIM_CONST TY203018 T839829468_507 = {((NimStringDesc*) &T839829468_499),
((NI) 472)}
;
STRING_LITERAL(T839829468_508, "varargs for objective C method?", 31);
STRING_LITERAL(T839829468_509, "Result: ", 8);
STRING_LITERAL(T839829468_510, "];$n", 4);
STRING_LITERAL(T839829468_511, "]", 1);
NIM_CONST TY203018 T839829468_512 = {((NimStringDesc*) &T839829468_265),
((NI) 925)}
;
STRING_LITERAL(T839829468_513, "<stdio.h>", 9);
STRING_LITERAL(T839829468_514, ", \"nil\"", 7);
STRING_LITERAL(T839829468_515, ", $1? ($1)->data:\"nil\"", 22);
STRING_LITERAL(T839829468_516, "printf($1$2);$n", 15);
STRING_LITERAL(T839829468_517, "%s", 2);
STRING_LITERAL(T839829468_518, "fflush(stdout);$n", 17);
STRING_LITERAL(T839829468_519, "#genericDeepCopy((void*)$1, (void*)$2, $3);$n", 45);
STRING_LITERAL(T839829468_520, "#genericSeqDeepCopy($1, $2, $3);$n", 34);
STRING_LITERAL(T839829468_521, "#genericDeepCopyOpenArray((void*)$1, (void*)$2, $1Len0, $3);$n", 62);
STRING_LITERAL(T839829468_522, "genDeepCopy: ", 13);
STRING_LITERAL(T839829468_523, "genMagicExpr: ", 14);
STRING_LITERAL(T839829468_524, "static NIM_CONST $1 $2 = $3;$n", 30);
STRING_LITERAL(T839829468_525, "memset($1, 0, sizeof($1));$n", 28);
STRING_LITERAL(T839829468_526, "for ($1 = $3; $1 <= $4; $1++) $n$2[(NU)($1)>>3] |=(1U<<((NU)($1"
")&7U));$n", 72);
STRING_LITERAL(T839829468_527, "$1[(NU)($2)>>3] |=(1U<<((NU)($2)&7U));$n", 40);
STRING_LITERAL(T839829468_528, "for ($1 = $3; $1 <= $4; $1++) $n$2 |=((", 39);
STRING_LITERAL(T839829468_529, ")(1)<<(($1)%(sizeof(", 20);
STRING_LITERAL(T839829468_530, "$1 |=((", 7);
STRING_LITERAL(T839829468_531, ")(1)<<(($2)%(sizeof(", 20);
STRING_LITERAL(T839829468_532, "genCheckedRecordField", 21);
STRING_LITERAL(T839829468_533, "genObjConstr", 12);
STRING_LITERAL(T839829468_534, "if ($1) #raiseFieldError(((#NimStringDesc*) &$2));$n", 52);
STRING_LITERAL(T839829468_535, "if (!($1)) #raiseFieldError(((#NimStringDesc*) &$2));$n", 55);
STRING_LITERAL(T839829468_536, "LOC$1.source", 12);
STRING_LITERAL(T839829468_537, "union { $1 source; $2 dest; } LOC$3;$n", 38);
STRING_LITERAL(T839829468_538, "LOC$#.dest", 10);
STRING_LITERAL(T839829468_539, "if ((NU)($1) > (NU)($2)) #raiseIndexError();$n", 46);
STRING_LITERAL(T839829468_540, "if ($1 < $2 || $1 > $3) #raiseIndexError();$n", 45);
STRING_LITERAL(T839829468_541, "$1[($2)- $3]", 12);
STRING_LITERAL(T839829468_542, "if ((NU)($1) >= (NU)($2Len0)) #raiseIndexError();$n", 51);
STRING_LITERAL(T839829468_543, "if ((NU)($1) > (NU)($2->$3)) #raiseIndexError();$n", 50);
STRING_LITERAL(T839829468_544, "if ((NU)($1) >= (NU)($2->$3)) #raiseIndexError();$n", 51);
STRING_LITERAL(T839829468_545, "genTupleElem", 12);
STRING_LITERAL(T839829468_546, ".Field$1", 8);
STRING_LITERAL(T839829468_547, "expr(nkBracketExpr, ", 20);
STRING_LITERAL(T839829468_548, "genDeref ", 9);
STRING_LITERAL(T839829468_549, "genRecordFieldAux", 17);
STRING_LITERAL(T839829468_550, "genRecordField 3", 16);
STRING_LITERAL(T839829468_551, ".$1", 3);
STRING_LITERAL(T839829468_552, "} $1: ;$n", 9);
STRING_LITERAL(T839829468_553, "FR.len-=$1;$n", 13);
STRING_LITERAL(T839829468_554, "FR.len+=$1;$n", 13);
STRING_LITERAL(T839829468_555, "if (!$1) goto $2;$n", 19);
STRING_LITERAL(T839829468_556, "goto $1;$n", 10);
STRING_LITERAL(T839829468_557, "genIf()", 7);
STRING_LITERAL(T839829468_558, "->Sup", 5);
STRING_LITERAL(T839829468_559, "$1 = &$2;$n", 11);
STRING_LITERAL(T839829468_560, "if ($1) #chckObj($2.m_type, $3);$n", 34);
STRING_LITERAL(T839829468_561, "#chckObj($1.m_type, $2);$n", 26);
STRING_LITERAL(T839829468_562, "(($1)#$5($2, $3, $4))", 21);
STRING_LITERAL(T839829468_563, "chckRangeF", 10);
STRING_LITERAL(T839829468_564, "chckRange64", 11);
STRING_LITERAL(T839829468_565, "chckRange", 9);
STRING_LITERAL(T839829468_566, "CNSTCLOSURE", 11);
STRING_LITERAL(T839829468_567, "closure to closure created", 26);
STRING_LITERAL(T839829468_568, "$1.ClPrc = $2; $1.ClEnv = $3;$n", 31);
STRING_LITERAL(T839829468_569, "while (1) {$n", 13);
STRING_LITERAL(T839829468_570, "case statement must be exhaustive for computed goto", 51);
STRING_LITERAL(T839829468_571, "case statement has too many cases for computed goto", 51);
STRING_LITERAL(T839829468_572, "case statement has to start at 0 for computed goto", 50);
STRING_LITERAL(T839829468_573, "no case statement found for computed goto", 41);
STRING_LITERAL(T839829468_574, "TMP$1", 5);
STRING_LITERAL(T839829468_575, "static void* $#[$#] = {", 23);
STRING_LITERAL(T839829468_576, "&&TMP$#, ", 9);
STRING_LITERAL(T839829468_577, "&&TMP$#};$n", 11);
STRING_LITERAL(T839829468_578, "goto *$#[$#];$n", 15);
STRING_LITERAL(T839829468_579, "range notation not available for computed goto", 46);
STRING_LITERAL(T839829468_580, "TMP$#:$n", 8);
STRING_LITERAL(T839829468_581, "#nimProfile();$n", 16);
STRING_LITERAL(T839829468_582, "\'goto\' target must be a literal value", 37);
STRING_LITERAL(T839829468_583, "goto NIMSTATE_$#;$n", 19);
STRING_LITERAL(T839829468_584, "$1 = ($2*) #nimGetProcAddr($3, $4);$n", 37);
STRING_LITERAL(T839829468_585, "$2* $1;$n", 9);
STRING_LITERAL(T839829468_586, "#dbgRegisterGlobal($1, &$2, $3);$n", 34);
STRING_LITERAL(T839829468_587, "#nimGCvisit((void*)$1, 0);$n", 28);
STRING_LITERAL(T839829468_588, "N_NIMCALL(void, $1)(void)", 25);
STRING_LITERAL(T839829468_589, "#nimRegisterGlobalMarker($1);$n", 31);
STRING_LITERAL(T839829468_590, "$#($#);$n", 9);
STRING_LITERAL(T839829468_591, "$# = $#;$n", 10);
STRING_LITERAL(T839829468_592, "genVarTuple", 11);
STRING_LITERAL(T839829468_593, "genConstStmt", 12);
STRING_LITERAL(T839829468_594, "for statement not eliminated", 28);
STRING_LITERAL(T839829468_595, "if (#eqStrings($1, $2)) goto $3;$n", 34);
STRING_LITERAL(T839829468_596, "switch (#hashString($1) & $2) {$n", 33);
STRING_LITERAL(T839829468_597, "case $1: $n$2break;$n", 21);
STRING_LITERAL(T839829468_598, "goto LA$1;$n", 12);
STRING_LITERAL(T839829468_599, "LA$1: ;$n", 9);
STRING_LITERAL(T839829468_600, "if ($1 >= $2 && $1 <= $3) goto $4;$n", 36);
STRING_LITERAL(T839829468_601, "if ($1 == $2) goto $3;$n", 24);
STRING_LITERAL(T839829468_602, "NIMSTATE_$#:$n", 14);
STRING_LITERAL(T839829468_603, "switch ($1) {$n", 15);
STRING_LITERAL(T839829468_604, "default: __assume(0);$n", 23);
STRING_LITERAL(T839829468_605, "#popSafePoint();$n", 18);
STRING_LITERAL(T839829468_606, "#popCurrentException();$n", 25);
STRING_LITERAL(T839829468_607, "if ($1.status != 0) #popCurrentException();$n", 45);
STRING_LITERAL(T839829468_608, "goto BeforeRet;$n", 17);
STRING_LITERAL(T839829468_609, "no loop to break", 16);
STRING_LITERAL(T839829468_610, "extern $1", 9);
STRING_LITERAL(T839829468_611, "#FieldDiscriminantCheck((NI)(NU)($1), (NI)(NU)($2), $3, $4);$n", 62);
STRING_LITERAL(T839829468_612, "genAsmOrEmitStmt()", 18);
STRING_LITERAL(T839829468_613, "\"", 1);
STRING_LITERAL(T839829468_614, "\\n\"\012", 4);
STRING_LITERAL(T839829468_615, "Exception", 9);
STRING_LITERAL(T839829468_616, "E_Base", 6);
STRING_LITERAL(T839829468_617, "try {$n", 7);
STRING_LITERAL(T839829468_618, "} catch (NimException& $1) {$n", 30);
STRING_LITERAL(T839829468_619, "#setFrame((TFrame*)&FR);$n", 26);
STRING_LITERAL(T839829468_620, "else ", 5);
STRING_LITERAL(T839829468_621, "#isObj($1.exp->m_type, $2)", 26);
STRING_LITERAL(T839829468_622, "if ($1) ", 8);
STRING_LITERAL(T839829468_623, "throw;$n", 8);
STRING_LITERAL(T839829468_624, "<setjmp.h>", 10);
STRING_LITERAL(T839829468_625, "#TSafePoint $1;$n", 17);
STRING_LITERAL(T839829468_626, "#pushSafePoint(&$1);$n", 22);
STRING_LITERAL(T839829468_627, "nimStdSetjmp", 12);
STRING_LITERAL(T839829468_628, "$1.status = setjmp($1.context);$n", 33);
STRING_LITERAL(T839829468_629, "nimSigSetjmp", 12);
STRING_LITERAL(T839829468_630, "$1.status = sigsetjmp($1.context, 0);$n", 39);
STRING_LITERAL(T839829468_631, "nimRawSetjmp", 12);
STRING_LITERAL(T839829468_632, "$1.status = _setjmp($1.context);$n", 34);
STRING_LITERAL(T839829468_633, "if ($1.status == 0) {$n", 23);
STRING_LITERAL(T839829468_634, "else {$n", 8);
STRING_LITERAL(T839829468_635, "else", 4);
STRING_LITERAL(T839829468_636, "$1.status = 0;$n", 16);
STRING_LITERAL(T839829468_637, "#isObj(#getCurrentException()->Sup.m_type, $1)", 46);
STRING_LITERAL(T839829468_638, "#isObj(#getCurrentException()->m_type, $1)", 42);
STRING_LITERAL(T839829468_639, "if ($1) {$n", 11);
STRING_LITERAL(T839829468_640, "if ($1.status != 0) #reraiseException();$n", 42);
STRING_LITERAL(T839829468_641, "#raiseException((#Exception*)$1, $2);$n", 39);
STRING_LITERAL(T839829468_642, "#reraiseException();$n", 22);
STRING_LITERAL(T839829468_643, "/*TYPESECTION*/", 15);
STRING_LITERAL(T839829468_644, "/*VARSECTION*/", 14);
STRING_LITERAL(T839829468_645, "/*INCLUDESECTION*/", 18);
STRING_LITERAL(T839829468_646, "bp", 2);
STRING_LITERAL(T839829468_647, "#dbgRegisterBreakpoint($1, (NCSTRING)$2, (NCSTRING)$3);$n", 57);
STRING_LITERAL(T839829468_648, "#dbgRegisterWatchpoint($1, (NCSTRING)$2, $3);$n", 47);
STRING_LITERAL(T839829468_649, "#pragma omp parallel for $4$nfor ($1 = $2; $1 <= $3; ++$1)", 58);
STRING_LITERAL(T839829468_651, "compiler/ccgstmts.nim", 21);
NIM_CONST TY203018 T839829468_650 = {((NimStringDesc*) &T839829468_651),
((NI) 145)}
;
STRING_LITERAL(T839829468_652, "STATE$1: ;$n", 12);
STRING_LITERAL(T839829468_653, "case -1: goto BeforeRet;$n", 26);
STRING_LITERAL(T839829468_654, "case $1: goto STATE$1;$n", 24);
STRING_LITERAL(T839829468_655, "if (((NI*) $1)[0] < 0) break;$n", 31);
STRING_LITERAL(T839829468_656, "if ((((NI*) $1.ClEnv)[0]) < 0) break;$n", 39);
STRING_LITERAL(T839829468_657, "); unknown node kind", 20);
NIM_CONST TY203018 T839829468_658 = {((NimStringDesc*) &T839829468_651),
((NI) 1122)}
;
STRING_LITERAL(T839829468_659, "Init000", 7);
STRING_LITERAL(T839829468_660, "DatInit000", 10);
STRING_LITERAL(T839829468_661, "NIM_EXTERNC N_NOINLINE(void, $1)(void);$N", 41);
STRING_LITERAL(T839829468_662, "\011$1();$N", 8);
STRING_LITERAL(T839829468_663, "N_CDECL(void, NimMainInner)(void) {$N$1}$N$NN_CDECL(void, NimMa"
"in)(void) {$N\011void (*volatile inner)();$N\011PreMain();$N\011inner = N"
"imMainInner;$N$2\011(*inner)();$N}$N$N", 162);
STRING_LITERAL(T839829468_664, "N_STDCALL(int, WinMain)(HINSTANCE hCurInstance, $N "
" HINSTANCE hPrevInstance, $N LP"
"STR lpCmdLine, int nCmdShow) {$N\011NimMain();$N\011return nim_program"
"_result;$N}$N$N", 206);
STRING_LITERAL(T839829468_665, "N_LIB_EXPORT N_CDECL(void, NimMainInner)(void) {$N$1}$N$NN_CDEC"
"L(void, NimMain)(void) {$N\011void (*volatile inner)();$N\011PreMain()"
";$N\011inner = NimMainInner;$N$2\011(*inner)();$N}$N$N", 175);
STRING_LITERAL(T839829468_666, "BOOL WINAPI DllMain(HINSTANCE hinstDLL, DWORD fwdreason, $N "
" LPVOID lpvReserved) {$N\011if(fwdreason == DLL_PROC"
"ESS_ATTACH) {$N\011NimMain();$N}$N\011return 1;$N}$N$N", 175);
STRING_LITERAL(T839829468_667, "<windows.h>", 11);
STRING_LITERAL(T839829468_668, "void NIM_POSIX_INIT NimMainInit(void) {$N\011NimMain();$N}$N$N", 59);
STRING_LITERAL(T839829468_669, "int cmdCount;$Nchar** cmdLine;$Nchar** gEnv;$NN_CDECL(void, Nim"
"MainInner)(void) {$N$1}$N$NN_CDECL(void, NimMain)(void) {$N\011void"
" (*volatile inner)();$N\011PreMain();$N\011inner = NimMainInner;$N$2\011("
"*inner)();$N}$N$N", 208);
STRING_LITERAL(T839829468_670, "int main(void) {$N\011NimMain();$N\011return 0;$N}$N$N", 48);
STRING_LITERAL(T839829468_671, "int main(int argc, char** args, char** env) {$N\011cmdLine = args;"
"$N\011cmdCount = argc;$N\011gEnv = env;$N\011NimMain();$N\011return nim_prog"
"ram_result;$N}$N$N", 145);
STRING_LITERAL(T839829468_672, "dbgRegisterBreakpoint", 21);
STRING_LITERAL(T839829468_673, "dbgRegisterFilename", 19);
STRING_LITERAL(T839829468_674, "dbgRegisterFilename($1);$N", 26);
STRING_LITERAL(T839829468_675, "\011#initStackBottomWith((void *)&inner);$N", 40);
STRING_LITERAL(T839829468_676, "void PreMainInner() {$N\011systemInit000();$N$1$2$3}$N$Nvoid PreMa"
"in() {$N\011void (*volatile inner)();$N\011systemDatInit000();$N\011inner"
" = PreMainInner;$N$4$5\011(*inner)();$N}$N$N", 168);
STRING_LITERAL(T839829468_677, "\011#initThreadVarsEmulation();$N", 30);
STRING_LITERAL(T839829468_678, "still forwarded: ", 17);
STRING_LITERAL(T839829468_679, "NIM_EXTERNC N_NOINLINE(void, $1)(void) {$N", 42);
STRING_LITERAL(T839829468_680, "static #TNimNode $1[$2];$n", 26);
STRING_LITERAL(T839829468_681, "static #TNimType $1[$2];$n", 26);
STRING_LITERAL(T839829468_682, "\011TFrame FR; FR.len = 0;$N", 25);
STRING_LITERAL(T839829468_683, "}$N$N", 5);
STRING_LITERAL(T839829468_684, "N_NIMCALL(void, nimLoadProcs$1)(void) {$2}$N$N", 46);
STRING_LITERAL(T839829468_685, "/* Generated by Nim Compiler v$1 */$N/* (c) 2016 Andreas Rump"
"f */$N/* The generated code is subject to the original license. "
"*/$N", 131);
STRING_LITERAL(T839829468_686, "0.15.0", 6);
STRING_LITERAL(T839829468_687, "/* Generated by Nim Compiler v$1 */$N/* (c) 2016 Andreas Rump"
"f */$N/* The generated code is subject to the original license. "
"*/$N/* Compiled for: $2, $3, $4 */$N/* Command for C compiler:$n"
" $5 */$N", 201);
extern NIM_CONST TY176082 Os_176068_4151366050;
extern NIM_CONST TY176510 Cpu_176496_4151366050;
STRING_LITERAL(T839829468_688, "#define NIM_INTBITS $1", 22);
STRING_LITERAL(T839829468_689, "typedef struct {$1} NimThreadVars;$n", 36);
STRING_LITERAL(T839829468_690, "#include \"nimbase.h\"", 20);
STRING_LITERAL(T839829468_691, "#include \"$1\"$N", 15);
STRING_LITERAL(T839829468_692, "#include $1$N", 13);
STRING_LITERAL(T839829468_693, "extern \"C\"", 10);
STRING_LITERAL(T839829468_694, "$#NI NimThreadVarsSize(){return (NI)sizeof(NimThreadVars);}$n", 61);
STRING_LITERAL(T839829468_695, "__$1__", 6);
STRING_LITERAL(T839829468_696, "#ifndef $1$n#define $1$n", 24);
STRING_LITERAL(T839829468_697, "N_CDECL(void, NimMain)(void);$n", 31);
STRING_LITERAL(T839829468_698, "#endif /* $1 */$n", 17);
Tcgen529027* generatedheader_532201_839829468;
extern TNimType NTI529015; /* BModule */
Ropeobj178006* indent_532655_839829468;
extern TNimType NTI178004; /* Rope */
extern Gcheap50218 gch_50258_1689653243;
Ropeobj178006* nimtv_538656_839829468;
Ttypeseq292836* nimtvdeps_538674_839829468;
extern TNimType NTI292836; /* TTypeSeq */
Intset268030 nimtvdeclared_538675_839829468;
extern TNimType NTI268030; /* IntSet */
NI breakpointid_548860_839829468;
Ropeobj178006* gbreakpoints_548861_839829468;
extern TY529153* gmodules_529170_3723162438;
extern TNimType NTI529027; /* TCGen */
extern Debuginfo203009 gdebuginfo_203470_1926258066;
extern Toption169009Set goptions_169128_2607990831;
extern TNimType NTI292804; /* TSymSeq */
extern Tglobaloption169013Set gglobaloptions_169130_2607990831;
extern NimStringDesc* headerfile_169138_2607990831;
extern NimStringDesc* gprojectfull_169211_2607990831;
extern Tcommands169076 gcmd_169132_2607990831;
extern NI gerrorcounter_192072_155036129;
extern Ropeobj178006* rnl_178903_2381377266;
extern NI gforwardedprocscounter_529171_3723162438;
extern TNimType NTI292244; /* TTypeKind */
extern TNimType NTI203017; /* seq[(string, int)] */
extern Tsystemcc273002 ccompiler_273431_2528170400;
extern NimStringDesc* tnl_176644_4151366050;
extern NI floatsize_176642_4151366050;
extern Tgcmode169080 gselectedgc_169133_2607990831;
extern TNimType NTI292020; /* TNodeKind */
extern TNimType NTI135002; /* seq[string] */
extern TNimType NTI292435; /* TSymKind */
extern TNimType NTI292816; /* TLoc */
extern NI intsize_176641_4151366050;
extern TNimType NTI292524; /* TMagic */
extern TNimType NTI191350; /* seq[Rope] */
extern TNimType NTI292796; /* TNodeSeq */
extern Ropeobj178006* mainmodprocs_529148_3723162438;
extern Ropeobj178006* maindatinit_529151_3723162438;
extern Ropeobj178006* mainmodinit_529149_3723162438;
extern Ropeobj178006* othermodsinit_529150_3723162438;
extern Tsystemos176004 targetos_176629_4151366050;
extern TY191612* fileinfos_191629_155036129;
extern Tsystemcpu176452 targetcpu_176627_4151366050;
extern Ropeobj178006* gmapping_529152_3723162438;
N_NIMCALL(void, T839829468_2)(void) {
nimGCvisit((void*)generatedheader_532201_839829468, 0);
}
N_NIMCALL(void, T839829468_3)(void) {
nimGCvisit((void*)indent_532655_839829468, 0);
}
static N_INLINE(Cell47705*, usrtocell_51840_1689653243)(void* usr0) {
Cell47705* result0;
result0 = (Cell47705*)0;
result0 = ((Cell47705*) ((NI)((NU32)(((NI) (usr0))) - (NU32)(((NI)sizeof(Cell47705))))));
return result0;
}
static N_INLINE(void, rtladdzct_53001_1689653243)(Cell47705* c0) {
addzct_51817_1689653243((&gch_50258_1689653243.zct), c0);
}
static N_INLINE(void, asgnRefNoCycle)(void** dest0, void* src0) {
{
Cell47705* c0;
if (!!((src0 == NIM_NIL))) goto LA3;
c0 = usrtocell_51840_1689653243(src0);
(*c0).refcount += ((NI) 8);
}
LA3: ;
{
Cell47705* c0;
if (!!(((*dest0) == NIM_NIL))) goto LA7;
c0 = usrtocell_51840_1689653243((*dest0));
{
(*c0).refcount -= ((NI) 8);
if (!((NU32)((*c0).refcount) < (NU32)(((NI) 8)))) goto LA11;
rtladdzct_53001_1689653243(c0);
}
LA11: ;
}
LA7: ;
(*dest0) = src0;
}
N_NIMCALL(void, T839829468_5)(void) {
nimGCvisit((void*)nimtv_538656_839829468, 0);
}
N_NIMCALL(void, T839829468_6)(void) {
nimGCvisit((void*)nimtvdeps_538674_839829468, 0);
}
static N_INLINE(void, nimGCunrefNoCycle)(void* p0) {
Cell47705* c0;
c0 = usrtocell_51840_1689653243(p0);
{
(*c0).refcount -= ((NI) 8);
if (!((NU32)((*c0).refcount) < (NU32)(((NI) 8)))) goto LA3;
rtladdzct_53001_1689653243(c0);
}
LA3: ;
}
N_NIMCALL(void, T839829468_7)(void) {
nimGCvisit((void*)nimtvdeclared_538675_839829468.head, 0);
nimGCvisit((void*)nimtvdeclared_538675_839829468.data, 0);
}
N_NIMCALL(void, T839829468_8)(void) {
nimGCvisit((void*)gbreakpoints_548861_839829468, 0);
}
N_NIMCALL(Tcgen529027*, getcgenmodule_532226_839829468)(Tsym292834* s0) {
Tcgen529027* result0;
result0 = (Tcgen529027*)0;
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = (((NI) 0) <= (*s0).position);
if (!(LOC3)) goto LA4;
LOC3 = ((*s0).position < (gmodules_529170_3723162438 ? gmodules_529170_3723162438->Sup.len : 0));
LA4: ;
if (!LOC3) goto LA5;
result0 = gmodules_529170_3723162438->data[(*s0).position];
}
goto LA1;
LA5: ;
{
result0 = NIM_NIL;
}
LA1: ;
return result0;
}
static N_INLINE(void, copymem_7485_1689653243)(void* dest0, void* source0, NI size0) {
void* LOC1;
LOC1 = (void*)0;
LOC1 = memcpy(dest0, source0, ((size_t) (size0)));
}
static N_INLINE(void, appendString)(NimStringDesc* dest0, NimStringDesc* src0) {
copymem_7485_1689653243(((void*) ((&(*dest0).data[((*dest0).Sup.len)- 0]))), ((void*) ((*src0).data)), ((NI) ((NI)((*src0).Sup.len + ((NI) 1)))));
(*dest0).Sup.len += (*src0).Sup.len;
}
N_NIMCALL(NU32, hashowner_532977_839829468)(Tsym292834* s0) {
NU32 result0;
Tsym292834* m0;
Tsym292834* p0;
result0 = (NU32)0;
m0 = s0;
{
while (1) {
if (!!(((*m0).kind == ((Tsymkind292435) 6)))) goto LA2;
m0 = (*m0).owner;
} LA2: ;
}
p0 = (*m0).owner;
result0 = register_203121_1926258066((&gdebuginfo_203470_1926258066), (*(*p0).name).s, (*(*m0).name).s);
return result0;
}
static N_INLINE(void, incref_53819_1689653243)(Cell47705* c0) {
(*c0).refcount = (NI)((NU32)((*c0).refcount) + (NU32)(((NI) 8)));
}
static N_INLINE(void, decref_53401_1689653243)(Cell47705* c0) {
{
(*c0).refcount -= ((NI) 8);
if (!((NU32)((*c0).refcount) < (NU32)(((NI) 8)))) goto LA3;
rtladdzct_53001_1689653243(c0);
}
LA3: ;
}
static N_INLINE(void, asgnRef)(void** dest0, void* src0) {
{
Cell47705* LOC5;
if (!!((src0 == NIM_NIL))) goto LA3;
LOC5 = (Cell47705*)0;
LOC5 = usrtocell_51840_1689653243(src0);
incref_53819_1689653243(LOC5);
}
LA3: ;
{
Cell47705* LOC10;
if (!!(((*dest0) == NIM_NIL))) goto LA8;
LOC10 = (Cell47705*)0;
LOC10 = usrtocell_51840_1689653243((*dest0));
decref_53401_1689653243(LOC10);
}
LA8: ;
(*dest0) = src0;
}
N_NIMCALL(Toption169009Set, initprocoptions_562635_839829468)(Tcgen529027* m0) {
Toption169009Set result0;
memset((void*)(&result0), 0, sizeof(result0));
{
if (!(((*(*m0).module).flags &(1U<<((NU)(((Tsymflag292184) 13))&31U)))!=0)) goto LA3;
result0 = (goptions_169128_2607990831 & ~ 32768);
}
goto LA1;
LA3: ;
{
result0 = goptions_169128_2607990831;
}
LA1: ;
return result0;
}
N_NIMCALL(Tcproc529021*, newpreinitproc_562625_839829468)(Tcgen529027* m0) {
Tcproc529021* result0;
result0 = (Tcproc529021*)0;
result0 = newproc_529206_3723162438(NIM_NIL, m0);
(*result0).labels = ((NI) 100000);
return result0;
}
N_NIMCALL(Tcproc529021*, newpostinitproc_562630_839829468)(Tcgen529027* m0) {
Tcproc529021* result0;
result0 = (Tcproc529021*)0;
result0 = newproc_529206_3723162438(NIM_NIL, m0);
(*result0).labels = ((NI) 200000);
return result0;
}
N_NIMCALL(Ropeobj178006*, gettempname_533596_839829468)(Tcgen529027* m0) {
Ropeobj178006* result0;
Ropeobj178006* LOC1;
result0 = (Ropeobj178006*)0;
LOC1 = (Ropeobj178006*)0;
LOC1 = rope_178401_2381377266(((NI64) ((*m0).labels)));
result0 = HEX26_178418_2381377266((*m0).tmpbase, LOC1);
(*m0).labels += ((NI) 1);
return result0;
}
N_NIMCALL(Tcgen529027*, rawnewmodule_562663_839829468)(Tsym292834* module0, NimStringDesc* filename0) {
Tcgen529027* result0;
NimStringDesc* LOC1;
NU32 LOC2;
NimStringDesc* LOC3;
NimStringDesc* LOC4;
NimStringDesc* LOC5;
result0 = (Tcgen529027*)0;
result0 = (Tcgen529027*) newObj((&NTI529015), sizeof(Tcgen529027));
(*result0).Sup.Sup.m_type = (&NTI529027);
LOC1 = (NimStringDesc*)0;
LOC2 = (NU32)0;
LOC2 = hashowner_532977_839829468(module0);
LOC3 = (NimStringDesc*)0;
LOC3 = HEX24_8401_1689653243(((NU64) (LOC2)));
LOC1 = rawNewString(LOC3->Sup.len + 2);
appendString(LOC1, ((NimStringDesc*) &T839829468_11));
appendString(LOC1, LOC3);
appendString(LOC1, ((NimStringDesc*) &T839829468_12));
asgnRefNoCycle((void**) (&(*result0).tmpbase), rope_178277_2381377266(LOC1));
initlinkedlist_147031_3771138726((&(*result0).headerfiles));
initintset_268885_2627731572((&(*result0).declaredthings));
initintset_268885_2627731572((&(*result0).declaredprotos));
LOC4 = (NimStringDesc*)0;
LOC4 = (*result0).cfilename; (*result0).cfilename = copyStringRC1(filename0);
if (LOC4) nimGCunrefNoCycle(LOC4);
LOC5 = (NimStringDesc*)0;
LOC5 = (*result0).filename; (*result0).filename = copyStringRC1(filename0);
if (LOC5) nimGCunrefNoCycle(LOC5);
initidtable_296019_850551059((&(*result0).typecache));
initidtable_296019_850551059((&(*result0).forwtypecache));
asgnRefNoCycle((void**) (&(*result0).module), module0);
initintset_268885_2627731572((&(*result0).typeinfomarker));
asgnRef((void**) (&(*result0).initproc), newproc_529206_3723162438(NIM_NIL, result0));
(*(*result0).initproc).options = initprocoptions_562635_839829468(result0);
asgnRef((void**) (&(*result0).preinitproc), newpreinitproc_562625_839829468(result0));
asgnRef((void**) (&(*result0).postinitproc), newpostinitproc_562630_839829468(result0));
initnodetable_296085_850551059((&(*result0).datacache));
if ((*result0).typestack) nimGCunrefNoCycle((*result0).typestack);
(*result0).typestack = (Ttypeseq292836*) newSeqRC1((&NTI292836), 0);
if ((*result0).forwardedprocs) nimGCunrefNoCycle((*result0).forwardedprocs);
(*result0).forwardedprocs = (Tsymseq292804*) newSeqRC1((&NTI292804), 0);
asgnRefNoCycle((void**) (&(*result0).typenodesname), gettempname_533596_839829468(result0));
asgnRefNoCycle((void**) (&(*result0).nimtypesname), gettempname_533596_839829468(result0));
{
if (!(((*module0).flags &(1U<<((NU)(((Tsymflag292184) 13))&31U)))!=0)) goto LA8;
(*result0).flags |= ((NU8)1)<<((((Codegenflag529025) 0))%(sizeof(NU8)*8));
(*(*result0).preinitproc).options &= ~(((NU32)1) << ((((Toption169009) 15)) % (sizeof(NU32)*8)));
(*(*result0).postinitproc).options &= ~(((NU32)1) << ((((Toption169009) 15)) % (sizeof(NU32)*8)));
}
LA8: ;
return result0;
}
N_NIMCALL(Tcgen529027*, rawnewmodule_563038_839829468)(Tsym292834* module0) {
Tcgen529027* result0;
NimStringDesc* LOC1;
result0 = (Tcgen529027*)0;
LOC1 = (NimStringDesc*)0;
LOC1 = tofullpath_192264_155036129(((NI32) ((*module0).position)));
result0 = rawnewmodule_562663_839829468(module0, LOC1);
return result0;
}
N_NIMCALL(Tcgen529027*, newmodule_563045_839829468)(Tsym292834* module0) {
Tcgen529027* result0;
result0 = (Tcgen529027*)0;
{
Tcgen529027* LOC3;
NimStringDesc* LOC6;
LOC3 = (Tcgen529027*)0;
LOC3 = getcgenmodule_532226_839829468(module0);
if (!!((LOC3 == NIM_NIL))) goto LA4;
LOC6 = (NimStringDesc*)0;
LOC6 = HEX24_196185_1689653243(T839829468_9);
internalerror_196113_155036129(LOC6);
}
LA4: ;
result0 = rawnewmodule_563038_839829468(module0);
{
if (!((gmodules_529170_3723162438 ? gmodules_529170_3723162438->Sup.len : 0) <= (*module0).position)) goto LA9;
gmodules_529170_3723162438 = (TY529153*) setLengthSeq(&(gmodules_529170_3723162438)->Sup, sizeof(Tcgen529027*), ((NI) ((NI)((*module0).position + ((NI) 1)))));
}
LA9: ;
asgnRef((void**) (&gmodules_529170_3723162438->data[(*module0).position]), result0);
{
if (!((gglobaloptions_169130_2607990831 &((NU64)1<<((NU)(((Tglobaloption169013) 2))&63U)))!=0)) goto LA13;
{
NimStringDesc* LOC19;
NimStringDesc* LOC20;
if (!(((*module0).flags &(1U<<((NU)(((Tsymflag292184) 25))&31U)))!=0)) goto LA17;
LOC19 = (NimStringDesc*)0;
LOC20 = (NimStringDesc*)0;
LOC20 = tofilename_192260_155036129(((NI32) ((*module0).position)));
LOC19 = rawNewString(LOC20->Sup.len + 28);
appendString(LOC19, ((NimStringDesc*) &T839829468_13));
appendString(LOC19, LOC20);
internalerror_196113_155036129(LOC19);
}
LA17: ;
}
LA13: ;
return result0;
}
N_NIMCALL(Tpasscontext341002*, myopen_563115_839829468)(Tsym292834* module0) {
Tpasscontext341002* result0;
Tcgen529027* LOC1;
result0 = (Tpasscontext341002*)0;
LOC1 = (Tcgen529027*)0;
LOC1 = newmodule_563045_839829468(module0);
result0 = &LOC1->Sup;
{
NIM_BOOL LOC4;
NimStringDesc* f0;
NimStringDesc* LOC13;
NimStringDesc* LOC14;
LOC4 = (NIM_BOOL)0;
LOC4 = ((gglobaloptions_169130_2607990831 &((NU64)1<<((NU)(((Tglobaloption169013) 27))&63U)))!=0);
if (!(LOC4)) goto LA5;
LOC4 = (generatedheader_532201_839829468 == NIM_NIL);
LA5: ;
if (!LOC4) goto LA6;
{
if (!(((NI) 0) < (headerfile_169138_2607990831 ? headerfile_169138_2607990831->Sup.len : 0))) goto LA10;
f0 = headerfile_169138_2607990831;
}
goto LA8;
LA10: ;
{
f0 = gprojectfull_169211_2607990831;
}
LA8: ;
LOC13 = (NimStringDesc*)0;
LOC13 = completecfilepath_273854_2528170400(f0, NIM_TRUE);
LOC14 = (NimStringDesc*)0;
LOC14 = noschangeFileExt(LOC13, ((NimStringDesc*) &T839829468_14));
asgnRef((void**) (&generatedheader_532201_839829468), rawnewmodule_562663_839829468(module0, LOC14));
(*generatedheader_532201_839829468).flags |= ((NU8)1)<<((((Codegenflag529025) 3))%(sizeof(NU8)*8));
}
LA6: ;
return result0;
}
N_NIMCALL(NimStringDesc*, getcfile_563204_839829468)(Tcgen529027* m0) {
NimStringDesc* result0;
NimStringDesc* ext0;
NimStringDesc* LOC13;
NimStringDesc* LOC14;
result0 = (NimStringDesc*)0;
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC3) goto LA4;
LOC3 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA4: ;
if (!LOC3) goto LA5;
ext0 = copyString(((NimStringDesc*) &T839829468_15));
}
goto LA1;
LA5: ;
{
NIM_BOOL LOC8;
LOC8 = (NIM_BOOL)0;
LOC8 = (gcmd_169132_2607990831 == ((Tcommands169076) 3));
if (LOC8) goto LA9;
LOC8 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag292184) 28))&31U)))!=0);
LA9: ;
if (!LOC8) goto LA10;
ext0 = copyString(((NimStringDesc*) &T839829468_16));
}
goto LA1;
LA10: ;
{
ext0 = copyString(((NimStringDesc*) &T839829468_17));
}
LA1: ;
LOC13 = (NimStringDesc*)0;
LOC13 = withpackagename_170073_2607990831((*m0).cfilename);
LOC14 = (NimStringDesc*)0;
LOC14 = completecfilepath_273854_2528170400(LOC13, NIM_TRUE);
result0 = noschangeFileExt(LOC14, ext0);
return result0;
}
N_NIMCALL(Tpasscontext341002*, myopencached_563249_839829468)(Tsym292834* module0, Trodreader332021* rd0) {
Tpasscontext341002* result0;
Tcgen529027* m0;
NimStringDesc* LOC1;
result0 = (Tpasscontext341002*)0;
m0 = newmodule_563045_839829468(module0);
LOC1 = (NimStringDesc*)0;
LOC1 = getcfile_563204_839829468(m0);
readmergeinfo_530613_2760143328(LOC1, m0);
result0 = &m0->Sup;
return result0;
}
static N_INLINE(NIM_BOOL, skipcodegen_341085_2355241294)(Tnode292802* n0) {
NIM_BOOL result0;
result0 = (NIM_BOOL)0;
result0 = (((NI) 0) < gerrorcounter_192072_155036129);
return result0;
}
N_NIMCALL(void, fillloc_532282_839829468)(Tloc292816* a0, Tlockind292808 k0, Ttype292840* typ0, Ropeobj178006* r0, Tstorageloc292812 s0) {
{
if (!((*a0).k == ((Tlockind292808) 0))) goto LA3;
(*a0).k = k0;
unsureAsgnRef((void**) (&(*a0).t), typ0);
(*a0).s = s0;
{
if (!((*a0).r == NIM_NIL)) goto LA7;
unsureAsgnRef((void**) (&(*a0).r), r0);
}
LA7: ;
}
LA3: ;
}
N_NIMCALL(NIM_BOOL, iskeyword_532960_839829468)(Tident199010* w0) {
NIM_BOOL result0;
{ result0 = (NIM_BOOL)0;
switch ((*w0).Sup.id) {
case ((NI) 200) ... ((NI) 262):
case ((NI) 4) ... ((NI) 70):
case ((NI) 138):
{
result0 = NIM_TRUE;
goto BeforeRet;
}
break;
default:
{
result0 = NIM_FALSE;
goto BeforeRet;
}
break;
}
}BeforeRet: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, manglename_533205_839829468)(Tsym292834* s0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
result0 = (*s0).loc.r;
{
NIM_BOOL keeporigname0;
NIM_BOOL LOC5;
NIM_BOOL LOC6;
NIM_BOOL LOC9;
NimStringDesc* LOC10;
if (!(result0 == NIM_NIL)) goto LA3;
LOC5 = (NIM_BOOL)0;
LOC6 = (NIM_BOOL)0;
LOC6 = ((2824 &(1U<<((NU)((*s0).kind)&31U)))!=0);
if (!(LOC6)) goto LA7;
LOC6 = ((IL64(2149580812) & (*s0).flags) == 0);
LA7: ;
LOC5 = LOC6;
if (!(LOC5)) goto LA8;
LOC9 = (NIM_BOOL)0;
LOC9 = iskeyword_532960_839829468((*s0).name);
LOC5 = !(LOC9);
LA8: ;
keeporigname0 = LOC5;
LOC10 = (NimStringDesc*)0;
LOC10 = mangle_528847_2036603609((*(*s0).name).s);
result0 = rope_178277_2381377266(LOC10);
{
if (!keeporigname0) goto LA13;
add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_18));
}
goto LA11;
LA13: ;
{
TY533289 LOC16;
Ropeobj178006* LOC17;
Ropeobj178006* LOC18;
TY533289 LOC19;
Ropeobj178006* LOC20;
NU32 LOC21;
Ropeobj178006* LOC22;
memset((void*)LOC16, 0, sizeof(LOC16));
LOC17 = (Ropeobj178006*)0;
LOC17 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_12), LOC16, 0);
add_178482_2381377266(&result0, LOC17);
LOC18 = (Ropeobj178006*)0;
LOC18 = rope_178401_2381377266(((NI64) ((*s0).Sup.id)));
add_178482_2381377266(&result0, LOC18);
memset((void*)LOC19, 0, sizeof(LOC19));
LOC20 = (Ropeobj178006*)0;
LOC20 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_12), LOC19, 0);
add_178482_2381377266(&result0, LOC20);
LOC21 = (NU32)0;
LOC21 = hashowner_532977_839829468(s0);
LOC22 = (Ropeobj178006*)0;
LOC22 = rope_178401_2381377266(((NI64) (LOC21)));
add_178482_2381377266(&result0, LOC22);
}
LA11: ;
asgnRefNoCycle((void**) (&(*s0).loc.r), result0);
}
LA3: ;
return result0;
}
N_NIMCALL(void, fillprocloc_539201_839829468)(Tsym292834* sym0) {
{
Ropeobj178006* LOC5;
if (!((*sym0).loc.k == ((Tlockind292808) 0))) goto LA3;
LOC5 = (Ropeobj178006*)0;
LOC5 = manglename_533205_839829468(sym0);
fillloc_532282_839829468((&(*sym0).loc), ((Tlockind292808) 7), (*sym0).typ, LOC5, ((Tstorageloc292812) 2));
}
LA3: ;
}
N_NIMCALL(void, useheader_532369_839829468)(Tcgen529027* m0, Tsym292834* sym0) {
{
NimStringDesc* LOC5;
NIM_BOOL LOC6;
if (!(((*sym0).loc.flags &(1U<<((NU)(((Tlocflag292810) 6))&15U)))!=0)) goto LA3;
LOC5 = (NimStringDesc*)0;
LOC5 = getstr_297230_850551059((*(*sym0).annex).path);
LOC6 = (NIM_BOOL)0;
LOC6 = includestr_147249_3771138726((&(*m0).headerfiles), LOC5);
}
LA3: ;
}
static N_INLINE(void, appendChar)(NimStringDesc* dest0, NIM_CHAR c0) {
(*dest0).data[((*dest0).Sup.len)- 0] = c0;
(*dest0).data[((NI)((*dest0).Sup.len + ((NI) 1)))- 0] = 0;
(*dest0).Sup.len += ((NI) 1);
}
N_NIMCALL(NIM_BOOL, isactivated_561431_839829468)(Tsym292834* prc0) {
NIM_BOOL result0;
result0 = (NIM_BOOL)0;
result0 = !(((*prc0).typ == NIM_NIL));
return result0;
}
N_NIMCALL(void, addforwardedproc_532203_839829468)(Tcgen529027* m0, Tsym292834* prc0) {
(*m0).forwardedprocs = (Tsymseq292804*) incrSeqV2(&((*m0).forwardedprocs)->Sup, sizeof(Tsym292834*));
asgnRefNoCycle((void**) (&(*m0).forwardedprocs->data[(*m0).forwardedprocs->Sup.len]), prc0);
++(*m0).forwardedprocs->Sup.len;
gforwardedprocscounter_529171_3723162438 += ((NI) 1);
}
N_NIMCALL(void, genclinedir_532725_839829468)(Ropeobj178006** r0, NimStringDesc* filename0, NI line0) {
{
TY532811 LOC5;
NimStringDesc* LOC6;
if (!((goptions_169128_2607990831 &(1U<<((NU)(((Toption169009) 10))&31U)))!=0)) goto LA3;
memset((void*)LOC5, 0, sizeof(LOC5));
LOC6 = (NimStringDesc*)0;
LOC6 = makesinglelinecstring_528835_2036603609(filename0);
LOC5[0] = rope_178277_2381377266(LOC6);
LOC5[1] = rope_178401_2381377266(((NI64) (line0)));
addf_179205_2381377266(r0, ((NimStringDesc*) &T839829468_21), LOC5, 2);
}
LA3: ;
}
static N_INLINE(NI, tolinenumber_192415_155036129)(Tlineinfo191336 info0) {
NI result0;
result0 = (NI)0;
result0 = ((NI) (info0.line));
return result0;
}
N_NIMCALL(NI, safelinenm_532721_839829468)(Tlineinfo191336 info0) {
NI result0;
result0 = (NI)0;
result0 = tolinenumber_192415_155036129(info0);
{
if (!(result0 < ((NI) 0))) goto LA3;
result0 = ((NI) 0);
}
LA3: ;
return result0;
}
N_NIMCALL(void, genclinedir_532813_839829468)(Ropeobj178006** r0, Tlineinfo191336 info0) {
NimStringDesc* LOC1;
NI LOC2;
LOC1 = (NimStringDesc*)0;
LOC1 = tofullpath_192264_155036129(info0.fileindex);
LOC2 = (NI)0;
LOC2 = safelinenm_532721_839829468(info0);
genclinedir_532725_839829468(r0, LOC1, LOC2);
}
N_NIMCALL(Tctypekind529007, mapsettype_533389_839829468)(Ttype292840* typ0) {
Tctypekind529007 result0;
NI64 LOC1;
result0 = (Tctypekind529007)0;
LOC1 = (NI64)0;
LOC1 = getsize_320135_3876443242(typ0);
switch (((NI) (LOC1))) {
case ((NI) 1):
{
result0 = ((Tctypekind529007) 4);
}
break;
case ((NI) 2):
{
result0 = ((Tctypekind529007) 5);
}
break;
case ((NI) 4):
{
result0 = ((Tctypekind529007) 6);
}
break;
case ((NI) 8):
{
result0 = ((Tctypekind529007) 7);
}
break;
default:
{
result0 = ((Tctypekind529007) 17);
}
break;
}
return result0;
}
N_NIMCALL(Tctypekind529007, maptype_533393_839829468)(Ttype292840* typ0) {
Tctypekind529007 result0;
result0 = (Tctypekind529007)0;
switch ((*typ0).kind) {
case ((Ttypekind292244) 0):
case ((Ttypekind292244) 7):
{
result0 = ((Tctypekind529007) 0);
}
break;
case ((Ttypekind292244) 1):
{
result0 = ((Tctypekind529007) 2);
}
break;
case ((Ttypekind292244) 2):
{
result0 = ((Tctypekind529007) 1);
}
break;
case ((Ttypekind292244) 19):
{
result0 = mapsettype_533389_839829468(typ0);
}
break;
case ((Ttypekind292244) 27):
case ((Ttypekind292244) 4):
case ((Ttypekind292244) 16):
case ((Ttypekind292244) 48):
{
result0 = ((Tctypekind529007) 17);
}
break;
case ((Ttypekind292244) 17):
case ((Ttypekind292244) 18):
{
result0 = ((Tctypekind529007) 19);
}
break;
case ((Ttypekind292244) 10):
case ((Ttypekind292244) 11):
case ((Ttypekind292244) 12):
case ((Ttypekind292244) 13):
case ((Ttypekind292244) 15):
case ((Ttypekind292244) 46):
case ((Ttypekind292244) 47):
case ((Ttypekind292244) 49):
case ((Ttypekind292244) 8):
{
Ttype292840* LOC8;
LOC8 = (Ttype292840*)0;
LOC8 = lastson_295377_850551059(typ0);
result0 = maptype_533393_839829468(LOC8);
}
break;
case ((Ttypekind292244) 14):
{
{
NI64 LOC12;
LOC12 = (NI64)0;
LOC12 = firstord_320001_3876443242(typ0);
if (!(LOC12 < IL64(0))) goto LA13;
result0 = ((Tctypekind529007) 6);
}
goto LA10;
LA13: ;
{
NI64 LOC16;
LOC16 = (NI64)0;
LOC16 = getsize_320135_3876443242(typ0);
switch (((NI) (LOC16))) {
case ((NI) 1):
{
result0 = ((Tctypekind529007) 13);
}
break;
case ((NI) 2):
{
result0 = ((Tctypekind529007) 14);
}
break;
case ((NI) 4):
{
result0 = ((Tctypekind529007) 6);
}
break;
case ((NI) 8):
{
result0 = ((Tctypekind529007) 7);
}
break;
default:
{
internalerror_196113_155036129(((NimStringDesc*) &T839829468_25));
}
break;
}
}
LA10: ;
}
break;
case ((Ttypekind292244) 20):
{
result0 = maptype_533393_839829468((*typ0).sons->data[((NI) 0)]);
}
break;
case ((Ttypekind292244) 21):
case ((Ttypekind292244) 23):
case ((Ttypekind292244) 22):
{
Ttype292840* base0;
Ttype292840* LOC24;
LOC24 = (Ttype292840*)0;
LOC24 = lastson_295377_850551059(typ0);
base0 = skiptypes_296099_850551059(LOC24, IL64(211106232576256));
switch ((*base0).kind) {
case ((Ttypekind292244) 27):
case ((Ttypekind292244) 4):
case ((Ttypekind292244) 16):
case ((Ttypekind292244) 48):
{
result0 = ((Tctypekind529007) 18);
}
break;
default:
{
result0 = ((Tctypekind529007) 20);
}
break;
}
}
break;
case ((Ttypekind292244) 26):
{
result0 = ((Tctypekind529007) 20);
}
break;
case ((Ttypekind292244) 24):
{
result0 = ((Tctypekind529007) 22);
}
break;
case ((Ttypekind292244) 25):
{
{
if (!!(((*typ0).callconv == ((Tcallingconvention292002) 8)))) goto LA32;
result0 = ((Tctypekind529007) 23);
}
goto LA30;
LA32: ;
{
result0 = ((Tctypekind529007) 19);
}
LA30: ;
}
break;
case ((Ttypekind292244) 28):
{
result0 = ((Tctypekind529007) 21);
}
break;
case ((Ttypekind292244) 29):
{
result0 = ((Tctypekind529007) 24);
}
break;
case ((Ttypekind292244) 31) ... ((Ttypekind292244) 44):
{
result0 = ((Tctypekind529007) ((NI)(((NI) ((NI)(((NI) ((*typ0).kind)) - ((NI) 31)))) + ((NI) 3))));
}
break;
case ((Ttypekind292244) 59):
{
{
Ttype292840* LOC43;
if (!!(((*typ0).n == NIM_NIL))) goto LA41;
LOC43 = (Ttype292840*)0;
LOC43 = lastson_295377_850551059(typ0);
result0 = maptype_533393_839829468(LOC43);
}
goto LA39;
LA41: ;
{
internalerror_196113_155036129(((NimStringDesc*) &T839829468_25));
}
LA39: ;
}
break;
default:
{
internalerror_196113_155036129(((NimStringDesc*) &T839829468_25));
}
break;
}
return result0;
}
N_NIMCALL(NIM_BOOL, isimportedcpptype_533476_839829468)(Ttype292840* t0) {
NIM_BOOL result0;
NIM_BOOL LOC1;
result0 = (NIM_BOOL)0;
LOC1 = (NIM_BOOL)0;
LOC1 = !(((*t0).sym == NIM_NIL));
if (!(LOC1)) goto LA2;
LOC1 = (((*(*t0).sym).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA2: ;
result0 = LOC1;
return result0;
}
N_NIMCALL(NIM_BOOL, needscomplexassignment_533509_839829468)(Ttype292840* typ0) {
NIM_BOOL result0;
result0 = (NIM_BOOL)0;
result0 = containsgarbagecollectedref_320117_3876443242(typ0);
return result0;
}
static N_INLINE(NIM_BOOL, isobjlackingtypefield_533513_839829468)(Ttype292840* typ0) {
NIM_BOOL result0;
NIM_BOOL LOC1;
NIM_BOOL LOC3;
NIM_BOOL LOC4;
result0 = (NIM_BOOL)0;
LOC1 = (NIM_BOOL)0;
LOC1 = ((*typ0).kind == ((Ttypekind292244) 17));
if (!(LOC1)) goto LA2;
LOC3 = (NIM_BOOL)0;
LOC4 = (NIM_BOOL)0;
LOC4 = (((*typ0).flags &(1U<<((NU)(((Ttypeflag292431) 2))&31U)))!=0);
if (!(LOC4)) goto LA5;
LOC4 = ((*typ0).sons->data[((NI) 0)] == NIM_NIL);
LA5: ;
LOC3 = LOC4;
if (LOC3) goto LA6;
LOC3 = ispureobject_320138_3876443242(typ0);
LA6: ;
LOC1 = LOC3;
LA2: ;
result0 = LOC1;
return result0;
}
N_NIMCALL(NIM_BOOL, isinvalidreturntype_533548_839829468)(Ttype292840* rettype0) {
NIM_BOOL result0;
{ result0 = (NIM_BOOL)0;
{
if (!(rettype0 == NIM_NIL)) goto LA3;
result0 = NIM_TRUE;
}
goto LA1;
LA3: ;
{
Tctypekind529007 LOC6;
LOC6 = (Tctypekind529007)0;
LOC6 = maptype_533393_839829468(rettype0);
switch (LOC6) {
case ((Tctypekind529007) 17):
{
Ttype292840* LOC8;
LOC8 = (Ttype292840*)0;
LOC8 = skiptypes_296099_850551059(rettype0, IL64(211106232576256));
result0 = !(((*LOC8).kind == ((Ttypekind292244) 23) || (*LOC8).kind == ((Ttypekind292244) 22) || (*LOC8).kind == ((Ttypekind292244) 21)));
}
break;
case ((Tctypekind529007) 19):
{
Ttype292840* t0;
NIM_BOOL LOC16;
NIM_BOOL LOC18;
NIM_BOOL LOC20;
t0 = skiptypes_296099_850551059(rettype0, IL64(211106232576256));
{
NIM_BOOL LOC12;
LOC12 = (NIM_BOOL)0;
LOC12 = isimportedcpptype_533476_839829468(rettype0);
if (LOC12) goto LA13;
LOC12 = isimportedcpptype_533476_839829468(t0);
LA13: ;
if (!LOC12) goto LA14;
result0 = NIM_FALSE;
goto BeforeRet;
}
LA14: ;
LOC16 = (NIM_BOOL)0;
LOC16 = needscomplexassignment_533509_839829468(t0);
if (LOC16) goto LA17;
LOC18 = (NIM_BOOL)0;
LOC18 = ((*t0).kind == ((Ttypekind292244) 17));
if (!(LOC18)) goto LA19;
LOC20 = (NIM_BOOL)0;
LOC20 = isobjlackingtypefield_533513_839829468(t0);
LOC18 = !(LOC20);
LA19: ;
LOC16 = LOC18;
LA17: ;
result0 = LOC16;
}
break;
default:
{
result0 = NIM_FALSE;
}
break;
}
}
LA1: ;
}BeforeRet: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, typename_533292_839829468)(Ttype292840* typ0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
{
NimStringDesc* LOC5;
if (!!(((*typ0).sym == NIM_NIL))) goto LA3;
LOC5 = (NimStringDesc*)0;
LOC5 = mangle_528847_2036603609((*(*(*typ0).sym).name).s);
result0 = rope_178277_2381377266(LOC5);
}
goto LA1;
LA3: ;
{
TY533289 LOC7;
memset((void*)LOC7, 0, sizeof(LOC7));
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_28), LOC7, 0);
}
LA1: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, gettypename_533313_839829468)(Ttype292840* typ0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = !(((*typ0).sym == NIM_NIL));
if (!(LOC3)) goto LA4;
LOC3 = !(((96 & (*(*typ0).sym).flags) == 0));
LA4: ;
if (!LOC3) goto LA5;
result0 = (*(*typ0).sym).loc.r;
}
goto LA1;
LA5: ;
{
{
Ropeobj178006* LOC12;
Ropeobj178006* LOC13;
if (!((*typ0).loc.r == NIM_NIL)) goto LA10;
LOC12 = (Ropeobj178006*)0;
LOC12 = typename_533292_839829468(typ0);
LOC13 = (Ropeobj178006*)0;
LOC13 = rope_178401_2381377266(((NI64) ((*typ0).Sup.id)));
asgnRefNoCycle((void**) (&(*typ0).loc.r), HEX26_178418_2381377266(LOC12, LOC13));
}
LA10: ;
result0 = (*typ0).loc.r;
}
LA1: ;
{
NimStringDesc* LOC18;
if (!(result0 == NIM_NIL)) goto LA16;
LOC18 = (NimStringDesc*)0;
LOC18 = rawNewString(reprEnum((NI)(*typ0).kind, (&NTI292244))->Sup.len + 13);
appendString(LOC18, ((NimStringDesc*) &T839829468_29));
appendString(LOC18, reprEnum((NI)(*typ0).kind, (&NTI292244)));
internalerror_196113_155036129(LOC18);
}
LA16: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, typenameorliteral_533898_839829468)(Ttype292840* t0, NimStringDesc* literal0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
{
NIM_BOOL LOC3;
NIM_BOOL LOC4;
LOC3 = (NIM_BOOL)0;
LOC4 = (NIM_BOOL)0;
LOC4 = !(((*t0).sym == NIM_NIL));
if (!(LOC4)) goto LA5;
LOC4 = (((*(*t0).sym).flags &(1U<<((NU)(((Tsymflag292184) 5))&31U)))!=0);
LA5: ;
LOC3 = LOC4;
if (!(LOC3)) goto LA6;
LOC3 = ((*(*t0).sym).magic == ((Tmagic292524) 0));
LA6: ;
if (!LOC3) goto LA7;
result0 = gettypename_533313_839829468(t0);
}
goto LA1;
LA7: ;
{
result0 = rope_178277_2381377266(literal0);
}
LA1: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, getsimpletypedesc_533936_839829468)(Tcgen529027* m0, Ttype292840* typ0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
switch ((*typ0).kind) {
case ((Ttypekind292244) 26):
{
result0 = typenameorliteral_533898_839829468(typ0, ((NimStringDesc*) &T839829468_30));
}
break;
case ((Ttypekind292244) 28):
{
Ropeobj178006* LOC3;
LOC3 = (Ropeobj178006*)0;
LOC3 = cgsym_532403_839829468(m0, ((NimStringDesc*) &T839829468_31));
result0 = typenameorliteral_533898_839829468(typ0, ((NimStringDesc*) &T839829468_32));
}
break;
case ((Ttypekind292244) 29):
{
result0 = typenameorliteral_533898_839829468(typ0, ((NimStringDesc*) &T839829468_33));
}
break;
case ((Ttypekind292244) 1):
{
result0 = typenameorliteral_533898_839829468(typ0, ((NimStringDesc*) &T839829468_34));
}
break;
case ((Ttypekind292244) 2):
{
result0 = typenameorliteral_533898_839829468(typ0, ((NimStringDesc*) &T839829468_35));
}
break;
case ((Ttypekind292244) 5):
{
result0 = typenameorliteral_533898_839829468(typ0, ((NimStringDesc*) &T839829468_18));
}
break;
case ((Ttypekind292244) 31) ... ((Ttypekind292244) 44):
{
result0 = typenameorliteral_533898_839829468(typ0, Numericaltypetostr_533941_839829468[((*typ0).kind)- 31]);
}
break;
case ((Ttypekind292244) 13):
case ((Ttypekind292244) 20):
case ((Ttypekind292244) 15):
{
result0 = getsimpletypedesc_533936_839829468(m0, (*typ0).sons->data[((NI) 0)]);
}
break;
case ((Ttypekind292244) 59):
{
{
Ttype292840* LOC15;
if (!!(((*typ0).n == NIM_NIL))) goto LA13;
LOC15 = (Ttype292840*)0;
LOC15 = lastson_295377_850551059(typ0);
result0 = getsimpletypedesc_533936_839829468(m0, LOC15);
}
goto LA11;
LA13: ;
{
internalerror_196113_155036129(((NimStringDesc*) &T839829468_50));
}
LA11: ;
}
break;
case ((Ttypekind292244) 11):
{
Ttype292840* LOC18;
LOC18 = (Ttype292840*)0;
LOC18 = lastson_295377_850551059(typ0);
result0 = getsimpletypedesc_533936_839829468(m0, LOC18);
}
break;
default:
{
result0 = NIM_NIL;
}
break;
}
return result0;
}
N_NIMCALL(Ropeobj178006*, cachegettype_533591_839829468)(Tidtable292850 tab0, Ttype292840* key0) {
Ropeobj178006* result0;
Tidobj199004* LOC1;
TNimObject* LOC2;
result0 = (Ropeobj178006*)0;
LOC1 = (Tidobj199004*)0;
LOC1 = &key0->Sup;
LOC2 = (TNimObject*)0;
LOC2 = idtableget_299086_2984716966(tab0, LOC1);
result0 = ((Ropeobj178006*) (LOC2));
return result0;
}
N_NIMCALL(Ropeobj178006*, gettypepre_533972_839829468)(Tcgen529027* m0, Ttype292840* typ0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
{
if (!(typ0 == NIM_NIL)) goto LA3;
result0 = rope_178277_2381377266(((NimStringDesc*) &T839829468_26));
}
goto LA1;
LA3: ;
{
result0 = getsimpletypedesc_533936_839829468(m0, typ0);
{
if (!(result0 == NIM_NIL)) goto LA8;
result0 = cachegettype_533591_839829468((*m0).typecache, typ0);
}
LA8: ;
}
LA1: ;
return result0;
}
N_NIMCALL(NIM_BOOL, isimportedtype_533449_839829468)(Ttype292840* t0) {
NIM_BOOL result0;
NIM_BOOL LOC1;
result0 = (NIM_BOOL)0;
LOC1 = (NIM_BOOL)0;
LOC1 = !(((*t0).sym == NIM_NIL));
if (!(LOC1)) goto LA2;
LOC1 = (((*(*t0).sym).flags &(1U<<((NU)(((Tsymflag292184) 5))&31U)))!=0);
LA2: ;
result0 = LOC1;
return result0;
}
N_NIMCALL(NimStringDesc*, getforwardstructformat_534015_839829468)(Tcgen529027* m0) {
NimStringDesc* result0;
result0 = (NimStringDesc*)0;
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC3) goto LA4;
LOC3 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA4: ;
if (!LOC3) goto LA5;
result0 = copyString(((NimStringDesc*) &T839829468_54));
}
goto LA1;
LA5: ;
{
result0 = copyString(((NimStringDesc*) &T839829468_55));
}
LA1: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, structorunion_534001_839829468)(Ttype292840* t0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
{
if (!(((*t0).flags &(1U<<((NU)(((Ttypeflag292431) 1))&31U)))!=0)) goto LA3;
result0 = rope_178277_2381377266(((NimStringDesc*) &T839829468_56));
}
goto LA1;
LA3: ;
{
result0 = rope_178277_2381377266(((NimStringDesc*) &T839829468_57));
}
LA1: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, gettypeforward_534039_839829468)(Tcgen529027* m0, Ttype292840* typ0) {
Ropeobj178006* result0;
{ result0 = (Ropeobj178006*)0;
result0 = cachegettype_533591_839829468((*m0).forwtypecache, typ0);
{
if (!!((result0 == NIM_NIL))) goto LA3;
goto BeforeRet;
}
LA3: ;
result0 = gettypepre_533972_839829468(m0, typ0);
{
if (!!((result0 == NIM_NIL))) goto LA7;
goto BeforeRet;
}
LA7: ;
switch ((*typ0).kind) {
case ((Ttypekind292244) 24):
case ((Ttypekind292244) 18):
case ((Ttypekind292244) 17):
{
Tidobj199004* LOC17;
TNimObject* LOC18;
result0 = gettypename_533313_839829468(typ0);
{
NIM_BOOL LOC12;
NimStringDesc* LOC15;
TY532811 LOC16;
LOC12 = (NIM_BOOL)0;
LOC12 = isimportedtype_533449_839829468(typ0);
if (!!(LOC12)) goto LA13;
LOC15 = (NimStringDesc*)0;
LOC15 = getforwardstructformat_534015_839829468(m0);
memset((void*)LOC16, 0, sizeof(LOC16));
LOC16[0] = structorunion_534001_839829468(typ0);
LOC16[1] = result0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 2))- 0], LOC15, LOC16, 2);
}
LA13: ;
LOC17 = (Tidobj199004*)0;
LOC17 = &typ0->Sup;
LOC18 = (TNimObject*)0;
LOC18 = &result0->Sup;
idtableput_299094_2984716966((&(*m0).forwtypecache), LOC17, LOC18);
}
break;
default:
{
NimStringDesc* LOC20;
LOC20 = (NimStringDesc*)0;
LOC20 = rawNewString(reprEnum((NI)(*typ0).kind, (&NTI292244))->Sup.len + 16);
appendString(LOC20, ((NimStringDesc*) &T839829468_58));
appendString(LOC20, reprEnum((NI)(*typ0).kind, (&NTI292244)));
appendChar(LOC20, 41);
internalerror_196113_155036129(LOC20);
}
break;
}
}BeforeRet: ;
return result0;
}
N_NIMCALL(void, pushtype_533958_839829468)(Tcgen529027* m0, Ttype292840* typ0) {
(*m0).typestack = (Ttypeseq292836*) incrSeqV2(&((*m0).typestack)->Sup, sizeof(Ttype292840*));
asgnRefNoCycle((void**) (&(*m0).typestack->data[(*m0).typestack->Sup.len]), typ0);
++(*m0).typestack->Sup.len;
}
N_NIMCALL(Ropeobj178006*, gettypedescweak_534079_839829468)(Tcgen529027* m0, Ttype292840* t0, Intset268030* check0) {
Ropeobj178006* result0;
Ttype292840* etb0;
result0 = (Ropeobj178006*)0;
etb0 = skiptypes_296099_850551059(t0, IL64(211106232576256));
switch ((*etb0).kind) {
case ((Ttypekind292244) 17):
case ((Ttypekind292244) 18):
{
{
NIM_BOOL LOC4;
LOC4 = (NIM_BOOL)0;
LOC4 = isimportedcpptype_533476_839829468(etb0);
if (!(LOC4)) goto LA5;
LOC4 = ((*t0).kind == ((Ttypekind292244) 11));
LA5: ;
if (!LOC4) goto LA6;
result0 = gettypedescaux_533503_839829468(m0, t0, check0);
}
goto LA2;
LA6: ;
{
Ttype292840* x0;
x0 = getuniquetype_528640_2036603609(etb0);
result0 = gettypeforward_534039_839829468(m0, x0);
pushtype_533958_839829468(m0, x0);
}
LA2: ;
}
break;
case ((Ttypekind292244) 24):
{
Ttype292840* x0;
Ropeobj178006* LOC10;
x0 = getuniquetype_528640_2036603609(etb0);
LOC10 = (Ropeobj178006*)0;
LOC10 = gettypeforward_534039_839829468(m0, x0);
result0 = HEX26_178447_2381377266(LOC10, ((NimStringDesc*) &T839829468_53));
pushtype_533958_839829468(m0, x0);
}
break;
default:
{
result0 = gettypedescaux_533503_839829468(m0, t0, check0);
}
break;
}
return result0;
}
static N_INLINE(NI, len_293081_850551059)(Tnode292802* n0) {
NI result0;
result0 = (NI)0;
{
if (!(*n0).kindU.S6.sons == 0) goto LA3;
result0 = ((NI) 0);
}
goto LA1;
LA3: ;
{
result0 = ((*n0).kindU.S6.sons ? (*n0).kindU.S6.sons->Sup.len : 0);
}
LA1: ;
return result0;
}
N_NIMCALL(void, appcg_532632_839829468)(Tcgen529027* m0, Ropeobj178006** c0, NimStringDesc* frmt0, Ropeobj178006** args0, NI args0Len0) {
Ropeobj178006* LOC1;
LOC1 = (Ropeobj178006*)0;
LOC1 = ropecg_532407_839829468(m0, frmt0, args0, args0Len0);
add_178482_2381377266(c0, LOC1);
}
N_NIMCALL(NIM_BOOL, scancppgenericslot_534827_839829468)(NimStringDesc* pat0, NI* cursor0, NI* outidx0, NI* outstars0) {
NIM_BOOL result0;
NI begin0;
{ result0 = (NIM_BOOL)0;
(*cursor0) += ((NI) 1);
begin0 = (*cursor0);
{
while (1) {
if (!((NU8)(pat0->data[(*cursor0)]) == (NU8)(42))) goto LA2;
(*cursor0) += ((NI) 1);
} LA2: ;
}
{
if (!(((NU8)(pat0->data[(*cursor0)])) >= ((NU8)(48)) && ((NU8)(pat0->data[(*cursor0)])) <= ((NU8)(57)))) goto LA5;
(*outidx0) = ((NI) ((NI)(((NI) (((NU8)(pat0->data[(*cursor0)])))) - ((NI) 48))));
(*outstars0) = (NI)((*cursor0) - begin0);
(*cursor0) += ((NI) 1);
result0 = NIM_TRUE;
goto BeforeRet;
}
goto LA3;
LA5: ;
{
result0 = NIM_FALSE;
goto BeforeRet;
}
LA3: ;
}BeforeRet: ;
return result0;
}
N_NIMCALL(Ttype292840*, resolvestarsincpptype_534891_839829468)(Ttype292840* typ0, NI idx0, NI stars0) {
Ttype292840* result0;
result0 = (Ttype292840*)0;
{
NI LOC3;
LOC3 = (NI)0;
LOC3 = len_295339_850551059(typ0);
if (!(LOC3 <= idx0)) goto LA4;
internalerror_196113_155036129(((NimStringDesc*) &T839829468_81));
}
LA4: ;
result0 = (*typ0).sons->data[idx0];
{
NI i_534906_839829468;
NI res_534931_839829468;
i_534906_839829468 = (NI)0;
res_534931_839829468 = ((NI) 1);
{
while (1) {
if (!(res_534931_839829468 <= stars0)) goto LA8;
i_534906_839829468 = res_534931_839829468;
{
NIM_BOOL LOC11;
NI LOC13;
LOC11 = (NIM_BOOL)0;
LOC11 = !((result0 == NIM_NIL));
if (!(LOC11)) goto LA12;
LOC13 = (NI)0;
LOC13 = len_295339_850551059(result0);
LOC11 = (((NI) 0) < LOC13);
LA12: ;
if (!LOC11) goto LA14;
{
if (!((*result0).kind == ((Ttypekind292244) 11))) goto LA18;
result0 = (*result0).sons->data[((NI) 1)];
}
goto LA16;
LA18: ;
{
result0 = elemtype_320394_3876443242(result0);
}
LA16: ;
}
LA14: ;
res_534931_839829468 += ((NI) 1);
} LA8: ;
}
}
return result0;
}
N_NIMCALL(NimStringDesc*, manglefield_532973_839829468)(Tident199010* name0) {
NimStringDesc* result0;
result0 = (NimStringDesc*)0;
result0 = mangle_528847_2036603609((*name0).s);
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = iskeyword_532960_839829468(name0);
if (!LOC3) goto LA4;
result0->data[((NI) 0)] = nsuToUpperAsciiChar(result0->data[((NI) 0)]);
}
LA4: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, manglerecfieldname_534361_839829468)(Tsym292834* field0, Ttype292840* rectype0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = !(((*rectype0).sym == NIM_NIL));
if (!(LOC3)) goto LA4;
LOC3 = !(((96 & (*(*rectype0).sym).flags) == 0));
LA4: ;
if (!LOC3) goto LA5;
result0 = (*field0).loc.r;
}
goto LA1;
LA5: ;
{
NimStringDesc* LOC8;
LOC8 = (NimStringDesc*)0;
LOC8 = manglefield_532973_839829468((*field0).name);
result0 = rope_178277_2381377266(LOC8);
}
LA1: ;
{
if (!(result0 == NIM_NIL)) goto LA11;
internalerror_196100_155036129((*field0).info, ((NimStringDesc*) &T839829468_96));
}
LA11: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, genrecordfieldsaux_534421_839829468)(Tcgen529027* m0, Tnode292802* n0, Ropeobj178006* accessexpr0, Ttype292840* rectype0, Intset268030* check0) {
Ropeobj178006* result0;
Ropeobj178006* ae0;
Ropeobj178006* uname0;
Ropeobj178006* sname0;
Ropeobj178006* a0;
Tnode292802* k0;
Tsym292834* field0;
{ result0 = (Ropeobj178006*)0;
ae0 = (Ropeobj178006*)0;
uname0 = (Ropeobj178006*)0;
sname0 = (Ropeobj178006*)0;
a0 = (Ropeobj178006*)0;
k0 = (Tnode292802*)0;
field0 = (Tsym292834*)0;
result0 = NIM_NIL;
switch ((*n0).kind) {
case ((Tnodekind292020) 138):
{
{
NI i_534447_839829468;
NI HEX3Atmp_534620_839829468;
NI LOC3;
NI res_534623_839829468;
i_534447_839829468 = (NI)0;
HEX3Atmp_534620_839829468 = (NI)0;
LOC3 = (NI)0;
LOC3 = sonslen_295351_850551059(n0);
HEX3Atmp_534620_839829468 = (NI)(LOC3 - ((NI) 1));
res_534623_839829468 = ((NI) 0);
{
while (1) {
Ropeobj178006* LOC6;
if (!(res_534623_839829468 <= HEX3Atmp_534620_839829468)) goto LA5;
i_534447_839829468 = res_534623_839829468;
LOC6 = (Ropeobj178006*)0;
LOC6 = genrecordfieldsaux_534421_839829468(m0, (*n0).kindU.S6.sons->data[i_534447_839829468], accessexpr0, rectype0, check0);
add_178482_2381377266(&result0, LOC6);
res_534623_839829468 += ((NI) 1);
} LA5: ;
}
}
}
break;
case ((Tnodekind292020) 139):
{
Ropeobj178006* LOC12;
NimStringDesc* LOC13;
NimStringDesc* LOC14;
Ropeobj178006* unionbody0;
{
if (!!(((*(*n0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 3)))) goto LA10;
internalerror_196100_155036129((*n0).info, ((NimStringDesc*) &T839829468_89));
}
LA10: ;
LOC12 = (Ropeobj178006*)0;
LOC12 = genrecordfieldsaux_534421_839829468(m0, (*n0).kindU.S6.sons->data[((NI) 0)], accessexpr0, rectype0, check0);
add_178482_2381377266(&result0, LOC12);
LOC13 = (NimStringDesc*)0;
LOC14 = (NimStringDesc*)0;
LOC14 = mangle_528847_2036603609((*(*(*(*n0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).name).s);
LOC13 = rawNewString(LOC14->Sup.len + 1);
appendString(LOC13, LOC14);
appendChar(LOC13, 85);
uname0 = rope_178277_2381377266(LOC13);
{
TY532811 LOC19;
if (!!((accessexpr0 == NIM_NIL))) goto LA17;
memset((void*)LOC19, 0, sizeof(LOC19));
LOC19[0] = accessexpr0;
LOC19[1] = uname0;
ae0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_90), LOC19, 2);
}
goto LA15;
LA17: ;
{
ae0 = uname0;
}
LA15: ;
unionbody0 = NIM_NIL;
{
NI i_534491_839829468;
NI HEX3Atmp_534629_839829468;
NI LOC22;
NI res_534632_839829468;
i_534491_839829468 = (NI)0;
HEX3Atmp_534629_839829468 = (NI)0;
LOC22 = (NI)0;
LOC22 = sonslen_295351_850551059(n0);
HEX3Atmp_534629_839829468 = (NI)(LOC22 - ((NI) 1));
res_534632_839829468 = ((NI) 1);
{
while (1) {
if (!(res_534632_839829468 <= HEX3Atmp_534629_839829468)) goto LA24;
i_534491_839829468 = res_534632_839829468;
switch ((*(*n0).kindU.S6.sons->data[i_534491_839829468]).kind) {
case ((Tnodekind292020) 85):
case ((Tnodekind292020) 88):
{
k0 = lastson_295364_850551059((*n0).kindU.S6.sons->data[i_534491_839829468]);
{
Ropeobj178006* LOC30;
TY532811 LOC31;
Ropeobj178006* LOC32;
if (!!(((*k0).kind == ((Tnodekind292020) 3)))) goto LA28;
LOC30 = (Ropeobj178006*)0;
LOC30 = rope_178401_2381377266(((NI64) (i_534491_839829468)));
sname0 = HEX26_178452_2381377266(((NimStringDesc*) &T839829468_91), LOC30);
memset((void*)LOC31, 0, sizeof(LOC31));
LOC31[0] = ae0;
LOC31[1] = sname0;
LOC32 = (Ropeobj178006*)0;
LOC32 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_90), LOC31, 2);
a0 = genrecordfieldsaux_534421_839829468(m0, k0, LOC32, rectype0, check0);
{
TY178507 LOC37;
if (!!((a0 == NIM_NIL))) goto LA35;
add_178487_2381377266(&unionbody0, ((NimStringDesc*) &T839829468_92));
add_178482_2381377266(&unionbody0, a0);
memset((void*)LOC37, 0, sizeof(LOC37));
LOC37[0] = sname0;
addf_179205_2381377266(&unionbody0, ((NimStringDesc*) &T839829468_93), LOC37, 1);
}
LA35: ;
}
goto LA26;
LA28: ;
{
Ropeobj178006* LOC39;
LOC39 = (Ropeobj178006*)0;
LOC39 = genrecordfieldsaux_534421_839829468(m0, k0, ae0, rectype0, check0);
add_178482_2381377266(&unionbody0, LOC39);
}
LA26: ;
}
break;
default:
{
internalerror_196113_155036129(((NimStringDesc*) &T839829468_94));
}
break;
}
res_534632_839829468 += ((NI) 1);
} LA24: ;
}
}
{
TY532811 LOC45;
if (!!((unionbody0 == NIM_NIL))) goto LA43;
memset((void*)LOC45, 0, sizeof(LOC45));
LOC45[0] = unionbody0;
LOC45[1] = uname0;
addf_179205_2381377266(&result0, ((NimStringDesc*) &T839829468_95), LOC45, 2);
}
LA43: ;
}
break;
case ((Tnodekind292020) 3):
{
field0 = (*n0).kindU.S4.sym;
{
if (!((*(*field0).typ).kind == ((Ttypekind292244) 62))) goto LA49;
goto BeforeRet;
}
LA49: ;
sname0 = manglerecfieldname_534361_839829468(field0, rectype0);
{
TY532811 LOC55;
if (!!((accessexpr0 == NIM_NIL))) goto LA53;
memset((void*)LOC55, 0, sizeof(LOC55));
LOC55[0] = accessexpr0;
LOC55[1] = sname0;
ae0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_90), LOC55, 2);
}
goto LA51;
LA53: ;
{
ae0 = sname0;
}
LA51: ;
fillloc_532282_839829468((&(*field0).loc), ((Tlockind292808) 5), (*field0).typ, ae0, ((Tstorageloc292812) 0));
{
NIM_BOOL LOC59;
Ttype292840* fieldtype0;
LOC59 = (NIM_BOOL)0;
LOC59 = isimportedcpptype_533476_839829468(rectype0);
if (!!(LOC59)) goto LA60;
fieldtype0 = skiptypes_296099_850551059((*field0).loc.t, IL64(211106232576256));
{
NIM_BOOL LOC64;
TY532811 LOC68;
Ttype292840* LOC69;
LOC64 = (NIM_BOOL)0;
LOC64 = ((*fieldtype0).kind == ((Ttypekind292244) 16));
if (!(LOC64)) goto LA65;
LOC64 = (((*fieldtype0).flags &(1U<<((NU)(((Ttypeflag292431) 0))&31U)))!=0);
LA65: ;
if (!LOC64) goto LA66;
memset((void*)LOC68, 0, sizeof(LOC68));
LOC69 = (Ttype292840*)0;
LOC69 = elemtype_320394_3876443242(fieldtype0);
LOC68[0] = gettypedescaux_533503_839829468(m0, LOC69, check0);
LOC68[1] = sname0;
addf_179205_2381377266(&result0, ((NimStringDesc*) &T839829468_97), LOC68, 2);
}
goto LA62;
LA66: ;
{
TY532811 LOC73;
if (!((*fieldtype0).kind == ((Ttypekind292244) 24))) goto LA71;
memset((void*)LOC73, 0, sizeof(LOC73));
LOC73[0] = gettypedescweak_534079_839829468(m0, (*field0).loc.t, check0);
LOC73[1] = sname0;
addf_179205_2381377266(&result0, ((NimStringDesc*) &T839829468_54), LOC73, 2);
}
goto LA62;
LA71: ;
{
TY535238 LOC77;
NimStringDesc* LOC78;
if (!!(((*field0).kindU.S4.bitsize == ((NI) 0)))) goto LA75;
memset((void*)LOC77, 0, sizeof(LOC77));
LOC77[0] = gettypedescaux_533503_839829468(m0, (*field0).loc.t, check0);
LOC77[1] = sname0;
LOC78 = (NimStringDesc*)0;
LOC78 = nimIntToStr((*field0).kindU.S4.bitsize);
LOC77[2] = rope_178277_2381377266(LOC78);
addf_179205_2381377266(&result0, ((NimStringDesc*) &T839829468_98), LOC77, 3);
}
goto LA62;
LA75: ;
{
TY532811 LOC80;
memset((void*)LOC80, 0, sizeof(LOC80));
LOC80[0] = gettypedescaux_533503_839829468(m0, (*field0).loc.t, check0);
LOC80[1] = sname0;
addf_179205_2381377266(&result0, ((NimStringDesc*) &T839829468_54), LOC80, 2);
}
LA62: ;
}
LA60: ;
}
break;
default:
{
internalerror_196100_155036129((*n0).info, ((NimStringDesc*) &T839829468_99));
}
break;
}
}BeforeRet: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, getrecordfields_534636_839829468)(Tcgen529027* m0, Ttype292840* typ0, Intset268030* check0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
result0 = genrecordfieldsaux_534421_839829468(m0, (*typ0).n, NIM_NIL, typ0, check0);
return result0;
}
N_NIMCALL(Ropeobj178006*, getrecorddesc_534643_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ropeobj178006* name0, Intset268030* check0) {
Ropeobj178006* result0;
NIM_BOOL hasfield0;
Ropeobj178006* attribute0;
TY535238 LOC6;
Ropeobj178006* desc0;
NimStringDesc* LOC46;
result0 = (Ropeobj178006*)0;
hasfield0 = NIM_FALSE;
{
if (!(((*typ0).flags &(1U<<((NU)(((Ttypeflag292431) 21))&31U)))!=0)) goto LA3;
attribute0 = rope_178277_2381377266(Cc_273413_2528170400[(ccompiler_273431_2528170400)- 1].Field19);
}
goto LA1;
LA3: ;
{
attribute0 = NIM_NIL;
}
LA1: ;
memset((void*)LOC6, 0, sizeof(LOC6));
LOC6[0] = structorunion_534001_839829468(typ0);
LOC6[1] = name0;
LOC6[2] = attribute0;
result0 = ropecg_532407_839829468(m0, Cc_273413_2528170400[(ccompiler_273431_2528170400)- 1].Field18, LOC6, 3);
{
if (!((*typ0).kind == ((Ttypekind292244) 17))) goto LA9;
{
if (!((*typ0).sons->data[((NI) 0)] == NIM_NIL)) goto LA13;
{
NIM_BOOL LOC17;
NIM_BOOL LOC18;
TY533289 LOC23;
LOC17 = (NIM_BOOL)0;
LOC18 = (NIM_BOOL)0;
LOC18 = !(((*typ0).sym == NIM_NIL));
if (!(LOC18)) goto LA19;
LOC18 = (((*(*typ0).sym).flags &(1U<<((NU)(((Tsymflag292184) 9))&31U)))!=0);
LA19: ;
LOC17 = LOC18;
if (LOC17) goto LA20;
LOC17 = (((*typ0).flags &(1U<<((NU)(((Ttypeflag292431) 2))&31U)))!=0);
LA20: ;
if (!LOC17) goto LA21;
memset((void*)LOC23, 0, sizeof(LOC23));
appcg_532632_839829468(m0, &result0, ((NimStringDesc*) &T839829468_85), LOC23, 0);
}
goto LA15;
LA21: ;
{
TY532811 LOC25;
memset((void*)LOC25, 0, sizeof(LOC25));
LOC25[0] = name0;
LOC25[1] = attribute0;
appcg_532632_839829468(m0, &result0, ((NimStringDesc*) &T839829468_86), LOC25, 2);
hasfield0 = NIM_TRUE;
}
LA15: ;
}
goto LA11;
LA13: ;
{
NIM_BOOL LOC27;
TY178507 LOC31;
Ttype292840* LOC32;
LOC27 = (NIM_BOOL)0;
LOC27 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC27) goto LA28;
LOC27 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA28: ;
if (!LOC27) goto LA29;
memset((void*)LOC31, 0, sizeof(LOC31));
LOC32 = (Ttype292840*)0;
LOC32 = skiptypes_296099_850551059((*typ0).sons->data[((NI) 0)], IL64(211106247215360));
LOC31[0] = gettypedescaux_533503_839829468(m0, LOC32, check0);
appcg_532632_839829468(m0, &result0, ((NimStringDesc*) &T839829468_87), LOC31, 1);
hasfield0 = NIM_TRUE;
}
goto LA11;
LA29: ;
{
TY178507 LOC34;
Ttype292840* LOC35;
memset((void*)LOC34, 0, sizeof(LOC34));
LOC35 = (Ttype292840*)0;
LOC35 = skiptypes_296099_850551059((*typ0).sons->data[((NI) 0)], IL64(211106247215360));
LOC34[0] = gettypedescaux_533503_839829468(m0, LOC35, check0);
appcg_532632_839829468(m0, &result0, ((NimStringDesc*) &T839829468_88), LOC34, 1);
hasfield0 = NIM_TRUE;
}
LA11: ;
}
goto LA7;
LA9: ;
{
TY178507 LOC37;
memset((void*)LOC37, 0, sizeof(LOC37));
LOC37[0] = name0;
addf_179205_2381377266(&result0, ((NimStringDesc*) &T839829468_85), LOC37, 1);
}
LA7: ;
desc0 = getrecordfields_534636_839829468(m0, typ0, check0);
{
NIM_BOOL LOC40;
TY533289 LOC44;
LOC40 = (NIM_BOOL)0;
LOC40 = (desc0 == NIM_NIL);
if (!(LOC40)) goto LA41;
LOC40 = !(hasfield0);
LA41: ;
if (!LOC40) goto LA42;
memset((void*)LOC44, 0, sizeof(LOC44));
addf_179205_2381377266(&result0, ((NimStringDesc*) &T839829468_100), LOC44, 0);
}
goto LA38;
LA42: ;
{
add_178482_2381377266(&result0, desc0);
}
LA38: ;
LOC46 = (NimStringDesc*)0;
LOC46 = rawNewString(tnl_176644_4151366050->Sup.len + 2);
appendString(LOC46, ((NimStringDesc*) &T839829468_101));
appendString(LOC46, tnl_176644_4151366050);
add_178487_2381377266(&result0, LOC46);
return result0;
}
N_NIMCALL(Ropeobj178006*, gettupledesc_534777_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ropeobj178006* name0, Intset268030* check0) {
Ropeobj178006* result0;
TY532811 LOC1;
Ropeobj178006* desc0;
NimStringDesc* LOC13;
result0 = (Ropeobj178006*)0;
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = structorunion_534001_839829468(typ0);
LOC1[1] = name0;
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_102), LOC1, 2);
desc0 = NIM_NIL;
{
NI i_534799_839829468;
NI HEX3Atmp_534820_839829468;
NI LOC3;
NI res_534823_839829468;
i_534799_839829468 = (NI)0;
HEX3Atmp_534820_839829468 = (NI)0;
LOC3 = (NI)0;
LOC3 = sonslen_295327_850551059(typ0);
HEX3Atmp_534820_839829468 = (NI)(LOC3 - ((NI) 1));
res_534823_839829468 = ((NI) 0);
{
while (1) {
TY532811 LOC6;
if (!(res_534823_839829468 <= HEX3Atmp_534820_839829468)) goto LA5;
i_534799_839829468 = res_534823_839829468;
memset((void*)LOC6, 0, sizeof(LOC6));
LOC6[0] = gettypedescaux_533503_839829468(m0, (*typ0).sons->data[i_534799_839829468], check0);
LOC6[1] = rope_178401_2381377266(((NI64) (i_534799_839829468)));
addf_179205_2381377266(&desc0, ((NimStringDesc*) &T839829468_103), LOC6, 2);
res_534823_839829468 += ((NI) 1);
} LA5: ;
}
}
{
NimStringDesc* LOC11;
if (!(desc0 == NIM_NIL)) goto LA9;
LOC11 = (NimStringDesc*)0;
LOC11 = rawNewString(tnl_176644_4151366050->Sup.len + 11);
appendString(LOC11, ((NimStringDesc*) &T839829468_104));
appendString(LOC11, tnl_176644_4151366050);
add_178487_2381377266(&result0, LOC11);
}
goto LA7;
LA9: ;
{
add_178482_2381377266(&result0, desc0);
}
LA7: ;
LOC13 = (NimStringDesc*)0;
LOC13 = rawNewString(tnl_176644_4151366050->Sup.len + 2);
appendString(LOC13, ((NimStringDesc*) &T839829468_101));
appendString(LOC13, tnl_176644_4151366050);
add_178487_2381377266(&result0, LOC13);
return result0;
}
N_NIMCALL(Ropeobj178006*, gettypedescaux_533503_839829468)(Tcgen529027* m0, Ttype292840* typ0, Intset268030* check0) {
Ropeobj178006* result0;
Ttype292840* t_534942_839829468;
{ result0 = (Ropeobj178006*)0;
t_534942_839829468 = getuniquetype_528640_2036603609(typ0);
{
if (!(t_534942_839829468 == NIM_NIL)) goto LA3;
internalerror_196113_155036129(((NimStringDesc*) &T839829468_27));
}
LA3: ;
{
if (!!(((*t_534942_839829468).sym == NIM_NIL))) goto LA7;
useheader_532369_839829468(m0, (*t_534942_839829468).sym);
}
LA7: ;
result0 = gettypepre_533972_839829468(m0, t_534942_839829468);
{
if (!!((result0 == NIM_NIL))) goto LA11;
goto BeforeRet;
}
LA11: ;
{
NIM_BOOL LOC15;
LOC15 = (NIM_BOOL)0;
LOC15 = containsorincl_268862_2627731572(check0, (*t_534942_839829468).Sup.id);
if (!LOC15) goto LA16;
{
NIM_BOOL LOC20;
NimStringDesc* LOC24;
NimStringDesc* LOC25;
LOC20 = (NIM_BOOL)0;
LOC20 = isimportedcpptype_533476_839829468(typ0);
if (LOC20) goto LA21;
LOC20 = isimportedcpptype_533476_839829468(t_534942_839829468);
LA21: ;
if (!!(LOC20)) goto LA22;
LOC24 = (NimStringDesc*)0;
LOC25 = (NimStringDesc*)0;
LOC25 = typetostring_320017_3876443242(typ0, ((Tprefereddesc320011) 0));
LOC24 = rawNewString(LOC25->Sup.len + 28);
appendString(LOC24, ((NimStringDesc*) &T839829468_51));
appendString(LOC24, LOC25);
internalerror_196113_155036129(LOC24);
}
LA22: ;
}
LA16: ;
switch ((*t_534942_839829468).kind) {
case ((Ttypekind292244) 22):
case ((Ttypekind292244) 21):
case ((Ttypekind292244) 23):
{
NimStringDesc* star0;
Ttype292840* et0;
Ttype292840* LOC38;
Ttype292840* etb0;
{
NIM_BOOL LOC29;
NIM_BOOL LOC30;
NIM_BOOL LOC33;
LOC29 = (NIM_BOOL)0;
LOC30 = (NIM_BOOL)0;
LOC30 = ((*t_534942_839829468).kind == ((Ttypekind292244) 23));
if (!(LOC30)) goto LA31;
LOC30 = !((((*typ0).flags &(1U<<((NU)(((Ttypeflag292431) 18))&31U)))!=0));
LA31: ;
LOC29 = LOC30;
if (!(LOC29)) goto LA32;
LOC33 = (NIM_BOOL)0;
LOC33 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC33) goto LA34;
LOC33 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA34: ;
LOC29 = LOC33;
LA32: ;
if (!LOC29) goto LA35;
star0 = copyString(((NimStringDesc*) &T839829468_52));
}
goto LA27;
LA35: ;
{
star0 = copyString(((NimStringDesc*) &T839829468_53));
}
LA27: ;
LOC38 = (Ttype292840*)0;
LOC38 = skiptypes_296099_850551059(typ0, IL64(211106232576256));
et0 = lastson_295377_850551059(LOC38);
etb0 = skiptypes_296099_850551059(et0, IL64(211106232576256));
{
if (!((*etb0).kind == ((Ttypekind292244) 4) || (*etb0).kind == ((Ttypekind292244) 16) || (*etb0).kind == ((Ttypekind292244) 27) || (*etb0).kind == ((Ttypekind292244) 48))) goto LA41;
et0 = elemtype_320394_3876443242(etb0);
etb0 = skiptypes_296099_850551059(et0, IL64(211106232576256));
star0->data[((NI) 0)] = 42;
}
LA41: ;
switch ((*etb0).kind) {
case ((Ttypekind292244) 17):
case ((Ttypekind292244) 18):
{
{
NIM_BOOL LOC46;
Ropeobj178006* LOC50;
LOC46 = (NIM_BOOL)0;
LOC46 = isimportedcpptype_533476_839829468(etb0);
if (!(LOC46)) goto LA47;
LOC46 = ((*et0).kind == ((Ttypekind292244) 11));
LA47: ;
if (!LOC46) goto LA48;
LOC50 = (Ropeobj178006*)0;
LOC50 = gettypedescaux_533503_839829468(m0, et0, check0);
result0 = HEX26_178447_2381377266(LOC50, star0);
}
goto LA44;
LA48: ;
{
Ttype292840* x0;
Ropeobj178006* name0;
Tidobj199004* LOC52;
TNimObject* LOC53;
x0 = getuniquetype_528640_2036603609(etb0);
name0 = gettypeforward_534039_839829468(m0, x0);
result0 = HEX26_178447_2381377266(name0, star0);
LOC52 = (Tidobj199004*)0;
LOC52 = &t_534942_839829468->Sup;
LOC53 = (TNimObject*)0;
LOC53 = &result0->Sup;
idtableput_299094_2984716966((&(*m0).typecache), LOC52, LOC53);
pushtype_533958_839829468(m0, x0);
}
LA44: ;
}
break;
case ((Ttypekind292244) 24):
{
Ttype292840* x0;
Ropeobj178006* name0;
Ropeobj178006* LOC55;
Tidobj199004* LOC56;
TNimObject* LOC57;
x0 = getuniquetype_528640_2036603609(etb0);
name0 = gettypeforward_534039_839829468(m0, x0);
LOC55 = (Ropeobj178006*)0;
LOC55 = HEX26_178447_2381377266(name0, ((NimStringDesc*) &T839829468_53));
result0 = HEX26_178447_2381377266(LOC55, star0);
LOC56 = (Tidobj199004*)0;
LOC56 = &t_534942_839829468->Sup;
LOC57 = (TNimObject*)0;
LOC57 = &result0->Sup;
idtableput_299094_2984716966((&(*m0).typecache), LOC56, LOC57);
pushtype_533958_839829468(m0, x0);
}
break;
default:
{
Ropeobj178006* LOC59;
Tidobj199004* LOC60;
TNimObject* LOC61;
LOC59 = (Ropeobj178006*)0;
LOC59 = gettypedescaux_533503_839829468(m0, et0, check0);
result0 = HEX26_178447_2381377266(LOC59, star0);
LOC60 = (Tidobj199004*)0;
LOC60 = &t_534942_839829468->Sup;
LOC61 = (TNimObject*)0;
LOC61 = &result0->Sup;
idtableput_299094_2984716966((&(*m0).typecache), LOC60, LOC61);
}
break;
}
}
break;
case ((Ttypekind292244) 27):
case ((Ttypekind292244) 48):
{
Ropeobj178006* LOC63;
Tidobj199004* LOC64;
TNimObject* LOC65;
LOC63 = (Ropeobj178006*)0;
LOC63 = gettypedescweak_534079_839829468(m0, (*t_534942_839829468).sons->data[((NI) 0)], check0);
result0 = HEX26_178447_2381377266(LOC63, ((NimStringDesc*) &T839829468_53));
LOC64 = (Tidobj199004*)0;
LOC64 = &t_534942_839829468->Sup;
LOC65 = (TNimObject*)0;
LOC65 = &result0->Sup;
idtableput_299094_2984716966((&(*m0).typecache), LOC64, LOC65);
}
break;
case ((Ttypekind292244) 20):
case ((Ttypekind292244) 14):
{
Ttype292840* t0;
{
if (!((*t_534942_839829468).kind == ((Ttypekind292244) 20))) goto LA69;
t0 = lastson_295377_850551059(t_534942_839829468);
}
goto LA67;
LA69: ;
{
t0 = t_534942_839829468;
}
LA67: ;
result0 = cachegettype_533591_839829468((*m0).typecache, t0);
{
if (!(result0 == NIM_NIL)) goto LA74;
result0 = gettypename_533313_839829468(t0);
{
NIM_BOOL LOC78;
NIM_BOOL LOC80;
Tidobj199004* LOC84;
TNimObject* LOC85;
NI size0;
NU32 owner0;
LOC78 = (NIM_BOOL)0;
LOC78 = isimportedcpptype_533476_839829468(t0);
if (LOC78) goto LA79;
LOC80 = (NIM_BOOL)0;
LOC80 = (((*(*t0).sym).flags &(1U<<((NU)(((Tsymflag292184) 5))&31U)))!=0);
if (!(LOC80)) goto LA81;
LOC80 = ((*(*t0).sym).magic == ((Tmagic292524) 0));
LA81: ;
LOC78 = LOC80;
LA79: ;
if (!!(LOC78)) goto LA82;
LOC84 = (Tidobj199004*)0;
LOC84 = &t0->Sup;
LOC85 = (TNimObject*)0;
LOC85 = &result0->Sup;
idtableput_299094_2984716966((&(*m0).typecache), LOC84, LOC85);
size0 = (NI)0;
{
NI64 LOC88;
TY178507 LOC91;
LOC88 = (NI64)0;
LOC88 = firstord_320001_3876443242(t0);
if (!(LOC88 < IL64(0))) goto LA89;
memset((void*)LOC91, 0, sizeof(LOC91));
LOC91[0] = result0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 3))- 0], ((NimStringDesc*) &T839829468_59), LOC91, 1);
size0 = ((NI) 4);
}
goto LA86;
LA89: ;
{
NI64 LOC93;
LOC93 = (NI64)0;
LOC93 = getsize_320135_3876443242(t0);
size0 = ((NI) (LOC93));
switch (size0) {
case ((NI) 1):
{
TY178507 LOC95;
memset((void*)LOC95, 0, sizeof(LOC95));
LOC95[0] = result0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 3))- 0], ((NimStringDesc*) &T839829468_60), LOC95, 1);
}
break;
case ((NI) 2):
{
TY178507 LOC97;
memset((void*)LOC97, 0, sizeof(LOC97));
LOC97[0] = result0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 3))- 0], ((NimStringDesc*) &T839829468_61), LOC97, 1);
}
break;
case ((NI) 4):
{
TY178507 LOC99;
memset((void*)LOC99, 0, sizeof(LOC99));
LOC99[0] = result0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 3))- 0], ((NimStringDesc*) &T839829468_59), LOC99, 1);
}
break;
case ((NI) 8):
{
TY178507 LOC101;
memset((void*)LOC101, 0, sizeof(LOC101));
LOC101[0] = result0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 3))- 0], ((NimStringDesc*) &T839829468_62), LOC101, 1);
}
break;
default:
{
internalerror_196100_155036129((*(*t0).sym).info, ((NimStringDesc*) &T839829468_63));
}
break;
}
}
LA86: ;
owner0 = hashowner_532977_839829468((*t0).sym);
{
NIM_BOOL LOC105;
TY203017* vals0;
Enumdesc203007 LOC114;
LOC105 = (NIM_BOOL)0;
LOC105 = hasenum_203230_1926258066(gdebuginfo_203470_1926258066, (*(*(*t0).sym).name).s, ((NI) ((*(*t0).sym).info.line)), owner0);
if (!!(LOC105)) goto LA106;
vals0 = (TY203017*) newSeq((&NTI203017), 0);
{
NI i_535144_839829468;
NI HEX3Atmp_535648_839829468;
NI LOC109;
NI res_535651_839829468;
i_535144_839829468 = (NI)0;
HEX3Atmp_535648_839829468 = (NI)0;
LOC109 = (NI)0;
LOC109 = len_293081_850551059((*t0).n);
HEX3Atmp_535648_839829468 = (NI)(LOC109 - ((NI) 1));
res_535651_839829468 = ((NI) 0);
{
while (1) {
Tsym292834* field0;
TY203018 LOC112;
NimStringDesc* LOC113;
if (!(res_535651_839829468 <= HEX3Atmp_535648_839829468)) goto LA111;
i_535144_839829468 = res_535651_839829468;
field0 = (*(*(*t0).n).kindU.S6.sons->data[i_535144_839829468]).kindU.S4.sym;
memset((void*)(&LOC112), 0, sizeof(LOC112));
LOC112.Field0 = copyString((*(*field0).name).s);
LOC112.Field1 = (*field0).position;
vals0 = (TY203017*) incrSeqV2(&(vals0)->Sup, sizeof(TY203018));
LOC113 = (NimStringDesc*)0;
LOC113 = vals0->data[vals0->Sup.len].Field0; vals0->data[vals0->Sup.len].Field0 = copyStringRC1(LOC112.Field0);
if (LOC113) nimGCunrefNoCycle(LOC113);
vals0->data[vals0->Sup.len].Field1 = LOC112.Field1;
++vals0->Sup.len;
res_535651_839829468 += ((NI) 1);
} LA111: ;
}
}
memset((void*)(&LOC114), 0, sizeof(LOC114));
memset((void*)(&LOC114), 0, sizeof(LOC114));
LOC114.size = size0;
LOC114.owner = owner0;
LOC114.id = (*(*t0).sym).Sup.id;
LOC114.name = copyString((*(*(*t0).sym).name).s);
genericSeqAssign((&LOC114.values), vals0, (&NTI203017));
registerenum_203419_1926258066((&gdebuginfo_203470_1926258066), (&LOC114));
}
LA106: ;
}
LA82: ;
}
LA74: ;
}
break;
case ((Ttypekind292244) 25):
{
Tidobj199004* LOC116;
TNimObject* LOC117;
Ropeobj178006* rettype0;
Ropeobj178006* desc0;
result0 = gettypename_533313_839829468(t_534942_839829468);
LOC116 = (Tidobj199004*)0;
LOC116 = &t_534942_839829468->Sup;
LOC117 = (TNimObject*)0;
LOC117 = &result0->Sup;
idtableput_299094_2984716966((&(*m0).typecache), LOC116, LOC117);
rettype0 = (Ropeobj178006*)0;
desc0 = (Ropeobj178006*)0;
genprocparams_534115_839829468(m0, t_534942_839829468, &rettype0, &desc0, check0, NIM_TRUE, NIM_TRUE);
{
NIM_BOOL LOC120;
LOC120 = (NIM_BOOL)0;
LOC120 = isimportedtype_533449_839829468(t_534942_839829468);
if (!!(LOC120)) goto LA121;
{
TY535235 LOC127;
if (!!(((*t_534942_839829468).callconv == ((Tcallingconvention292002) 8)))) goto LA125;
memset((void*)LOC127, 0, sizeof(LOC127));
LOC127[0] = rope_178277_2381377266(Callingconvtostr_533585_839829468[((*t_534942_839829468).callconv)- 0]);
LOC127[1] = rettype0;
LOC127[2] = result0;
LOC127[3] = desc0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 3))- 0], ((NimStringDesc*) &T839829468_64), LOC127, 4);
}
goto LA123;
LA125: ;
{
TY535238 LOC129;
memset((void*)LOC129, 0, sizeof(LOC129));
LOC129[0] = result0;
LOC129[1] = rettype0;
LOC129[2] = desc0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 3))- 0], ((NimStringDesc*) &T839829468_75), LOC129, 3);
}
LA123: ;
}
LA121: ;
}
break;
case ((Ttypekind292244) 24):
{
Tidobj199004* LOC144;
Ropeobj178006* LOC145;
TNimObject* LOC146;
result0 = cachegettype_533591_839829468((*m0).forwtypecache, t_534942_839829468);
{
Tidobj199004* LOC142;
TNimObject* LOC143;
if (!(result0 == NIM_NIL)) goto LA133;
result0 = gettypename_533313_839829468(t_534942_839829468);
{
NIM_BOOL LOC137;
NimStringDesc* LOC140;
TY532811 LOC141;
LOC137 = (NIM_BOOL)0;
LOC137 = isimportedtype_533449_839829468(t_534942_839829468);
if (!!(LOC137)) goto LA138;
LOC140 = (NimStringDesc*)0;
LOC140 = getforwardstructformat_534015_839829468(m0);
memset((void*)LOC141, 0, sizeof(LOC141));
LOC141[0] = structorunion_534001_839829468(t_534942_839829468);
LOC141[1] = result0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 2))- 0], LOC140, LOC141, 2);
}
LA138: ;
LOC142 = (Tidobj199004*)0;
LOC142 = &t_534942_839829468->Sup;
LOC143 = (TNimObject*)0;
LOC143 = &result0->Sup;
idtableput_299094_2984716966((&(*m0).forwtypecache), LOC142, LOC143);
}
LA133: ;
LOC144 = (Tidobj199004*)0;
LOC144 = &t_534942_839829468->Sup;
LOC145 = (Ropeobj178006*)0;
LOC145 = HEX26_178447_2381377266(result0, ((NimStringDesc*) &T839829468_53));
LOC146 = (TNimObject*)0;
LOC146 = &LOC145->Sup;
idtableput_299094_2984716966((&(*m0).typecache), LOC144, LOC146);
{
NIM_BOOL LOC149;
LOC149 = (NIM_BOOL)0;
LOC149 = isimportedtype_533449_839829468(t_534942_839829468);
if (!!(LOC149)) goto LA150;
{
Ttype292840* LOC154;
NimStringDesc* LOC157;
NimStringDesc* LOC158;
TY532811 LOC166;
LOC154 = (Ttype292840*)0;
LOC154 = skiptypes_296099_850551059((*t_534942_839829468).sons->data[((NI) 0)], IL64(211106232576256));
if (!!(((*LOC154).kind == ((Ttypekind292244) 3)))) goto LA155;
LOC157 = (NimStringDesc*)0;
LOC158 = (NimStringDesc*)0;
{
NIM_BOOL LOC161;
LOC161 = (NIM_BOOL)0;
LOC161 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC161) goto LA162;
LOC161 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA162: ;
if (!LOC161) goto LA163;
LOC158 = copyString(((NimStringDesc*) &T839829468_76));
}
goto LA159;
LA163: ;
{
LOC158 = copyString(((NimStringDesc*) &T839829468_77));
}
LA159: ;
LOC157 = rawNewString(LOC158->Sup.len + 31);
appendString(LOC157, LOC158);
appendString(LOC157, ((NimStringDesc*) &T839829468_78));
memset((void*)LOC166, 0, sizeof(LOC166));
LOC166[0] = gettypedescaux_533503_839829468(m0, (*t_534942_839829468).sons->data[((NI) 0)], check0);
LOC166[1] = result0;
appcg_532632_839829468(m0, &(*m0).s[(((Tcfilesection529005) 4))- 0], LOC157, LOC166, 2);
}
goto LA152;
LA155: ;
{
result0 = rope_178277_2381377266(((NimStringDesc*) &T839829468_79));
}
LA152: ;
}
LA150: ;
add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_53));
}
break;
case ((Ttypekind292244) 4):
case ((Ttypekind292244) 16):
{
NI64 n0;
Tidobj199004* LOC173;
TNimObject* LOC174;
n0 = lengthord_320007_3876443242(t_534942_839829468);
{
if (!(n0 <= IL64(0))) goto LA171;
n0 = IL64(1);
}
LA171: ;
result0 = gettypename_533313_839829468(t_534942_839829468);
LOC173 = (Tidobj199004*)0;
LOC173 = &t_534942_839829468->Sup;
LOC174 = (TNimObject*)0;
LOC174 = &result0->Sup;
idtableput_299094_2984716966((&(*m0).typecache), LOC173, LOC174);
{
NIM_BOOL LOC177;
Ropeobj178006* foo0;
TY535238 LOC180;
LOC177 = (NIM_BOOL)0;
LOC177 = isimportedtype_533449_839829468(t_534942_839829468);
if (!!(LOC177)) goto LA178;
foo0 = gettypedescaux_533503_839829468(m0, (*t_534942_839829468).sons->data[((NI) 1)], check0);
memset((void*)LOC180, 0, sizeof(LOC180));
LOC180[0] = foo0;
LOC180[1] = result0;
LOC180[2] = rope_178401_2381377266(n0);
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 3))- 0], ((NimStringDesc*) &T839829468_80), LOC180, 3);
}
LA178: ;
}
break;
case ((Ttypekind292244) 17):
case ((Ttypekind292244) 18):
{
{
NIM_BOOL LOC184;
Ropeobj178006* cppname0;
NI i0;
NI chunkstart0;
Ropeobj178006* LOC226;
LOC184 = (NIM_BOOL)0;
LOC184 = isimportedcpptype_533476_839829468(t_534942_839829468);
if (!(LOC184)) goto LA185;
LOC184 = ((*typ0).kind == ((Ttypekind292244) 11));
LA185: ;
if (!LOC184) goto LA186;
cppname0 = gettypename_533313_839829468(t_534942_839829468);
i0 = ((NI) 0);
chunkstart0 = ((NI) 0);
{
while (1) {
if (!(i0 < ((*cppname0).data ? (*cppname0).data->Sup.len : 0))) goto LA189;
{
NI chunkend0;
NI idx0;
NI stars0;
if (!((NU8)((*cppname0).data->data[i0]) == (NU8)(39))) goto LA192;
chunkend0 = (i0 - 1);
idx0 = (NI)0;
stars0 = (NI)0;
{
NIM_BOOL LOC196;
NimStringDesc* LOC199;
Ttype292840* typeinslot0;
LOC196 = (NIM_BOOL)0;
LOC196 = scancppgenericslot_534827_839829468((*cppname0).data, (&i0), (&idx0), (&stars0));
if (!LOC196) goto LA197;
LOC199 = (NimStringDesc*)0;
LOC199 = copyStrLast((*cppname0).data, chunkstart0, chunkend0);
add_178487_2381377266(&result0, LOC199);
chunkstart0 = i0;
typeinslot0 = resolvestarsincpptype_534891_839829468(typ0, (NI)(idx0 + ((NI) 1)), stars0);
{
NIM_BOOL LOC202;
TY533289 LOC206;
Ropeobj178006* LOC207;
LOC202 = (NIM_BOOL)0;
LOC202 = (typeinslot0 == NIM_NIL);
if (LOC202) goto LA203;
LOC202 = ((*typeinslot0).kind == ((Ttypekind292244) 62));
LA203: ;
if (!LOC202) goto LA204;
memset((void*)LOC206, 0, sizeof(LOC206));
LOC207 = (Ropeobj178006*)0;
LOC207 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_26), LOC206, 0);
add_178482_2381377266(&result0, LOC207);
}
goto LA200;
LA204: ;
{
Ropeobj178006* LOC209;
LOC209 = (Ropeobj178006*)0;
LOC209 = gettypedescaux_533503_839829468(m0, typeinslot0, check0);
add_178482_2381377266(&result0, LOC209);
}
LA200: ;
}
LA197: ;
}
goto LA190;
LA192: ;
{
i0 += ((NI) 1);
}
LA190: ;
} LA189: ;
}
{
NimStringDesc* LOC215;
if (!!((chunkstart0 == ((NI) 0)))) goto LA213;
LOC215 = (NimStringDesc*)0;
LOC215 = copyStr((*cppname0).data, chunkstart0);
add_178487_2381377266(&result0, LOC215);
}
goto LA211;
LA213: ;
{
result0 = HEX26_178447_2381377266(cppname0, ((NimStringDesc*) &T839829468_82));
{
NI i_535516_839829468;
NI HEX3Atmp_535664_839829468;
NI LOC218;
NI res_535667_839829468;
i_535516_839829468 = (NI)0;
HEX3Atmp_535664_839829468 = (NI)0;
LOC218 = (NI)0;
LOC218 = len_295339_850551059(typ0);
HEX3Atmp_535664_839829468 = (NI)(LOC218 - ((NI) 2));
res_535667_839829468 = ((NI) 1);
{
while (1) {
Ropeobj178006* LOC225;
if (!(res_535667_839829468 <= HEX3Atmp_535664_839829468)) goto LA220;
i_535516_839829468 = res_535667_839829468;
{
if (!(((NI) 1) < i_535516_839829468)) goto LA223;
add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_83));
}
LA223: ;
LOC225 = (Ropeobj178006*)0;
LOC225 = gettypedescaux_533503_839829468(m0, (*typ0).sons->data[i_535516_839829468], check0);
add_178482_2381377266(&result0, LOC225);
res_535667_839829468 += ((NI) 1);
} LA220: ;
}
}
add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_84));
}
LA211: ;
LOC226 = (Ropeobj178006*)0;
LOC226 = getrecorddesc_534643_839829468(m0, t_534942_839829468, result0, check0);
}
goto LA182;
LA186: ;
{
Tidobj199004* LOC241;
TNimObject* LOC242;
Ropeobj178006* recdesc0;
result0 = cachegettype_533591_839829468((*m0).forwtypecache, t_534942_839829468);
{
Tidobj199004* LOC239;
TNimObject* LOC240;
if (!(result0 == NIM_NIL)) goto LA230;
result0 = gettypename_533313_839829468(t_534942_839829468);
{
NIM_BOOL LOC234;
NimStringDesc* LOC237;
TY532811 LOC238;
LOC234 = (NIM_BOOL)0;
LOC234 = isimportedtype_533449_839829468(t_534942_839829468);
if (!!(LOC234)) goto LA235;
LOC237 = (NimStringDesc*)0;
LOC237 = getforwardstructformat_534015_839829468(m0);
memset((void*)LOC238, 0, sizeof(LOC238));
LOC238[0] = structorunion_534001_839829468(t_534942_839829468);
LOC238[1] = result0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 2))- 0], LOC237, LOC238, 2);
}
LA235: ;
LOC239 = (Tidobj199004*)0;
LOC239 = &t_534942_839829468->Sup;
LOC240 = (TNimObject*)0;
LOC240 = &result0->Sup;
idtableput_299094_2984716966((&(*m0).forwtypecache), LOC239, LOC240);
}
LA230: ;
LOC241 = (Tidobj199004*)0;
LOC241 = &t_534942_839829468->Sup;
LOC242 = (TNimObject*)0;
LOC242 = &result0->Sup;
idtableput_299094_2984716966((&(*m0).typecache), LOC241, LOC242);
{
if (!!(((*t_534942_839829468).kind == ((Ttypekind292244) 18)))) goto LA245;
recdesc0 = getrecorddesc_534643_839829468(m0, t_534942_839829468, result0, check0);
}
goto LA243;
LA245: ;
{
recdesc0 = gettupledesc_534777_839829468(m0, t_534942_839829468, result0, check0);
}
LA243: ;
{
NIM_BOOL LOC250;
LOC250 = (NIM_BOOL)0;
LOC250 = isimportedtype_533449_839829468(t_534942_839829468);
if (!!(LOC250)) goto LA251;
add_178482_2381377266(&(*m0).s[(((Tcfilesection529005) 3))- 0], recdesc0);
}
LA251: ;
}
LA182: ;
}
break;
case ((Ttypekind292244) 19):
{
Ttype292840* LOC254;
Ropeobj178006* LOC255;
Tidobj199004* LOC256;
TNimObject* LOC257;
LOC254 = (Ttype292840*)0;
LOC254 = lastson_295377_850551059(t_534942_839829468);
LOC255 = (Ropeobj178006*)0;
LOC255 = gettypename_533313_839829468(LOC254);
result0 = HEX26_178447_2381377266(LOC255, ((NimStringDesc*) &T839829468_105));
LOC256 = (Tidobj199004*)0;
LOC256 = &t_534942_839829468->Sup;
LOC257 = (TNimObject*)0;
LOC257 = &result0->Sup;
idtableput_299094_2984716966((&(*m0).typecache), LOC256, LOC257);
{
NIM_BOOL LOC260;
NI s0;
NI64 LOC263;
LOC260 = (NIM_BOOL)0;
LOC260 = isimportedtype_533449_839829468(t_534942_839829468);
if (!!(LOC260)) goto LA261;
LOC263 = (NI64)0;
LOC263 = getsize_320135_3876443242(t_534942_839829468);
s0 = ((NI) (LOC263));
switch (s0) {
case ((NI) 1):
case ((NI) 2):
case ((NI) 4):
case ((NI) 8):
{
TY532811 LOC265;
memset((void*)LOC265, 0, sizeof(LOC265));
LOC265[0] = result0;
LOC265[1] = rope_178401_2381377266(((NI64) ((NI)(s0 * ((NI) 8)))));
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 3))- 0], ((NimStringDesc*) &T839829468_106), LOC265, 2);
}
break;
default:
{
TY532811 LOC267;
NI64 LOC268;
memset((void*)LOC267, 0, sizeof(LOC267));
LOC267[0] = result0;
LOC268 = (NI64)0;
LOC268 = getsize_320135_3876443242(t_534942_839829468);
LOC267[1] = rope_178401_2381377266(LOC268);
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 3))- 0], ((NimStringDesc*) &T839829468_107), LOC267, 2);
}
break;
}
}
LA261: ;
}
break;
case ((Ttypekind292244) 11):
case ((Ttypekind292244) 13):
case ((Ttypekind292244) 15):
case ((Ttypekind292244) 46):
case ((Ttypekind292244) 47):
case ((Ttypekind292244) 49):
case ((Ttypekind292244) 8):
{
Ttype292840* LOC270;
LOC270 = (Ttype292840*)0;
LOC270 = lastson_295377_850551059(t_534942_839829468);
result0 = gettypedescaux_533503_839829468(m0, LOC270, check0);
}
break;
default:
{
NimStringDesc* LOC272;
LOC272 = (NimStringDesc*)0;
LOC272 = rawNewString(reprEnum((NI)(*t_534942_839829468).kind, (&NTI292244))->Sup.len + 16);
appendString(LOC272, ((NimStringDesc*) &T839829468_108));
appendString(LOC272, reprEnum((NI)(*t_534942_839829468).kind, (&NTI292244)));
appendChar(LOC272, 41);
internalerror_196113_155036129(LOC272);
result0 = NIM_NIL;
}
break;
}
excl_268841_2627731572(check0, (*t_534942_839829468).Sup.id);
}BeforeRet: ;
return result0;
}
static N_INLINE(NIM_BOOL, iscompiletimeonly_328706_3876443242)(Ttype292840* t0) {
NIM_BOOL result0;
result0 = (NIM_BOOL)0;
result0 = ((*t0).kind == ((Ttypekind292244) 8) || (*t0).kind == ((Ttypekind292244) 59));
return result0;
}
N_NIMCALL(Tstorageloc292812, paramstorageloc_534098_839829468)(Tsym292834* param0) {
Tstorageloc292812 result0;
result0 = (Tstorageloc292812)0;
{
Ttype292840* LOC3;
LOC3 = (Ttype292840*)0;
LOC3 = skiptypes_296099_850551059((*param0).typ, 8388864);
if (!!(((*LOC3).kind == ((Ttypekind292244) 16) || (*LOC3).kind == ((Ttypekind292244) 27) || (*LOC3).kind == ((Ttypekind292244) 48) || (*LOC3).kind == ((Ttypekind292244) 4)))) goto LA4;
result0 = ((Tstorageloc292812) 2);
}
goto LA1;
LA4: ;
{
result0 = ((Tstorageloc292812) 0);
}
LA1: ;
return result0;
}
N_NIMCALL(NIM_BOOL, ccgintroducedptr_533609_839829468)(Tsym292834* s0) {
NIM_BOOL result0;
Ttype292840* pt0;
{ result0 = (NIM_BOOL)0;
pt0 = skiptypes_296099_850551059((*s0).typ, IL64(211106232576256));
{
if (!(((*pt0).flags &(1U<<((NU)(((Ttypeflag292431) 13))&31U)))!=0)) goto LA3;
result0 = NIM_TRUE;
goto BeforeRet;
}
goto LA1;
LA3: ;
{
if (!(((*pt0).flags &(1U<<((NU)(((Ttypeflag292431) 12))&31U)))!=0)) goto LA6;
result0 = NIM_FALSE;
goto BeforeRet;
}
goto LA1;
LA6: ;
LA1: ;
switch ((*pt0).kind) {
case ((Ttypekind292244) 17):
{
{
NIM_BOOL LOC11;
NI64 LOC13;
LOC11 = (NIM_BOOL)0;
LOC11 = (((*s0).options &(1U<<((NU)(((Toption169009) 18))&31U)))!=0);
if (LOC11) goto LA12;
LOC13 = (NI64)0;
LOC13 = getsize_320135_3876443242(pt0);
LOC11 = (((NI64) ((NI)(floatsize_176642_4151366050 * ((NI) 2)))) < LOC13);
LA12: ;
if (!LOC11) goto LA14;
result0 = NIM_TRUE;
}
goto LA9;
LA14: ;
{
NIM_BOOL LOC17;
LOC17 = (NIM_BOOL)0;
LOC17 = (((*pt0).flags &(1U<<((NU)(((Ttypeflag292431) 2))&31U)))!=0);
if (!(LOC17)) goto LA18;
LOC17 = ((*pt0).sons->data[((NI) 0)] == NIM_NIL);
LA18: ;
if (!LOC17) goto LA19;
result0 = NIM_FALSE;
}
goto LA9;
LA19: ;
{
result0 = NIM_TRUE;
}
LA9: ;
}
break;
case ((Ttypekind292244) 18):
{
NIM_BOOL LOC23;
NI64 LOC24;
LOC23 = (NIM_BOOL)0;
LOC24 = (NI64)0;
LOC24 = getsize_320135_3876443242(pt0);
LOC23 = (((NI64) ((NI)(floatsize_176642_4151366050 * ((NI) 2)))) < LOC24);
if (LOC23) goto LA25;
LOC23 = (((*s0).options &(1U<<((NU)(((Toption169009) 18))&31U)))!=0);
LA25: ;
result0 = LOC23;
}
break;
default:
{
result0 = NIM_FALSE;
}
break;
}
}BeforeRet: ;
return result0;
}
N_NIMCALL(Tctypekind529007, mapreturntype_533445_839829468)(Ttype292840* typ0) {
Tctypekind529007 result0;
result0 = (Tctypekind529007)0;
result0 = maptype_533393_839829468(typ0);
return result0;
}
N_NIMCALL(void, genprocparams_534115_839829468)(Tcgen529027* m0, Ttype292840* t0, Ropeobj178006** rettype0, Ropeobj178006** params0, Intset268030* check0, NIM_BOOL declareenvironment0, NIM_BOOL weakdep0) {
unsureAsgnRef((void**) (&(*params0)), NIM_NIL);
{
NIM_BOOL LOC3;
TY533289 LOC7;
LOC3 = (NIM_BOOL)0;
LOC3 = ((*t0).sons->data[((NI) 0)] == NIM_NIL);
if (LOC3) goto LA4;
LOC3 = isinvalidreturntype_533548_839829468((*t0).sons->data[((NI) 0)]);
LA4: ;
if (!LOC3) goto LA5;
memset((void*)LOC7, 0, sizeof(LOC7));
unsureAsgnRef((void**) (&(*rettype0)), HEX25_178905_2381377266(((NimStringDesc*) &T839829468_26), LOC7, 0));
}
goto LA1;
LA5: ;
{
unsureAsgnRef((void**) (&(*rettype0)), gettypedescaux_533503_839829468(m0, (*t0).sons->data[((NI) 0)], check0));
}
LA1: ;
{
NI i_534152_839829468;
NI HEX3Atmp_534353_839829468;
NI LOC10;
NI res_534356_839829468;
i_534152_839829468 = (NI)0;
HEX3Atmp_534353_839829468 = (NI)0;
LOC10 = (NI)0;
LOC10 = sonslen_295351_850551059((*t0).n);
HEX3Atmp_534353_839829468 = (NI)(LOC10 - ((NI) 1));
res_534356_839829468 = ((NI) 1);
{
while (1) {
if (!(res_534356_839829468 <= HEX3Atmp_534353_839829468)) goto LA12;
i_534152_839829468 = res_534356_839829468;
{
Tsym292834* param0;
Ropeobj178006* LOC29;
Tstorageloc292812 LOC30;
TY533289 LOC45;
Ropeobj178006* LOC46;
Ttype292840* arr0;
NI j0;
{
if (!!(((*(*(*t0).n).kindU.S6.sons->data[i_534152_839829468]).kind == ((Tnodekind292020) 3)))) goto LA16;
internalerror_196100_155036129((*(*t0).n).info, ((NimStringDesc*) &T839829468_109));
}
LA16: ;
param0 = (*(*(*t0).n).kindU.S6.sons->data[i_534152_839829468]).kindU.S4.sym;
{
NIM_BOOL LOC20;
LOC20 = (NIM_BOOL)0;
LOC20 = iscompiletimeonly_328706_3876443242((*param0).typ);
if (!LOC20) goto LA21;
goto LA13;
}
LA21: ;
{
TY533289 LOC27;
Ropeobj178006* LOC28;
if (!!(((*params0) == NIM_NIL))) goto LA25;
memset((void*)LOC27, 0, sizeof(LOC27));
LOC28 = (Ropeobj178006*)0;
LOC28 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_110), LOC27, 0);
add_178482_2381377266(params0, LOC28);
}
LA25: ;
LOC29 = (Ropeobj178006*)0;
LOC29 = manglename_533205_839829468(param0);
LOC30 = (Tstorageloc292812)0;
LOC30 = paramstorageloc_534098_839829468(param0);
fillloc_532282_839829468((&(*param0).loc), ((Tlockind292808) 4), (*param0).typ, LOC29, LOC30);
{
NIM_BOOL LOC33;
Ropeobj178006* LOC36;
TY533289 LOC37;
Ropeobj178006* LOC38;
LOC33 = (NIM_BOOL)0;
LOC33 = ccgintroducedptr_533609_839829468(param0);
if (!LOC33) goto LA34;
LOC36 = (Ropeobj178006*)0;
LOC36 = gettypedescweak_534079_839829468(m0, (*param0).typ, check0);
add_178482_2381377266(params0, LOC36);
memset((void*)LOC37, 0, sizeof(LOC37));
LOC38 = (Ropeobj178006*)0;
LOC38 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_53), LOC37, 0);
add_178482_2381377266(params0, LOC38);
(*param0).loc.flags |= ((NU16)1)<<((((Tlocflag292810) 0))%(sizeof(NU16)*8));
(*param0).loc.s = ((Tstorageloc292812) 0);
}
goto LA31;
LA34: ;
{
Ropeobj178006* LOC42;
if (!weakdep0) goto LA40;
LOC42 = (Ropeobj178006*)0;
LOC42 = gettypedescweak_534079_839829468(m0, (*param0).typ, check0);
add_178482_2381377266(params0, LOC42);
}
goto LA31;
LA40: ;
{
Ropeobj178006* LOC44;
LOC44 = (Ropeobj178006*)0;
LOC44 = gettypedescaux_533503_839829468(m0, (*param0).typ, check0);
add_178482_2381377266(params0, LOC44);
}
LA31: ;
memset((void*)LOC45, 0, sizeof(LOC45));
LOC46 = (Ropeobj178006*)0;
LOC46 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_111), LOC45, 0);
add_178482_2381377266(params0, LOC46);
add_178482_2381377266(params0, (*param0).loc.r);
arr0 = (*param0).typ;
{
if (!((*arr0).kind == ((Ttypekind292244) 23))) goto LA49;
arr0 = (*arr0).sons->data[((NI) 0)];
}
LA49: ;
j0 = ((NI) 0);
{
while (1) {
TY532811 LOC57;
if (!((*arr0).kind == ((Ttypekind292244) 27) || (*arr0).kind == ((Ttypekind292244) 48))) goto LA52;
{
if (!((*(*param0).typ).kind == ((Ttypekind292244) 23))) goto LA55;
(*param0).loc.s = ((Tstorageloc292812) 0);
}
LA55: ;
memset((void*)LOC57, 0, sizeof(LOC57));
LOC57[0] = (*param0).loc.r;
LOC57[1] = rope_178401_2381377266(((NI64) (j0)));
addf_179205_2381377266(params0, ((NimStringDesc*) &T839829468_112), LOC57, 2);
j0 += ((NI) 1);
arr0 = (*arr0).sons->data[((NI) 0)];
} LA52: ;
}
} LA13: ;
res_534356_839829468 += ((NI) 1);
} LA12: ;
}
}
{
NIM_BOOL LOC60;
Ttype292840* arr0;
TY533289 LOC76;
LOC60 = (NIM_BOOL)0;
LOC60 = !(((*t0).sons->data[((NI) 0)] == NIM_NIL));
if (!(LOC60)) goto LA61;
LOC60 = isinvalidreturntype_533548_839829468((*t0).sons->data[((NI) 0)]);
LA61: ;
if (!LOC60) goto LA62;
arr0 = (*t0).sons->data[((NI) 0)];
{
if (!!(((*params0) == NIM_NIL))) goto LA66;
add_178487_2381377266(params0, ((NimStringDesc*) &T839829468_110));
}
LA66: ;
{
Tctypekind529007 LOC70;
Ropeobj178006* LOC73;
LOC70 = (Tctypekind529007)0;
LOC70 = mapreturntype_533445_839829468((*t0).sons->data[((NI) 0)]);
if (!!((LOC70 == ((Tctypekind529007) 17)))) goto LA71;
LOC73 = (Ropeobj178006*)0;
LOC73 = gettypedescweak_534079_839829468(m0, arr0, check0);
add_178482_2381377266(params0, LOC73);
add_178487_2381377266(params0, ((NimStringDesc*) &T839829468_53));
}
goto LA68;
LA71: ;
{
Ropeobj178006* LOC75;
LOC75 = (Ropeobj178006*)0;
LOC75 = gettypedescaux_533503_839829468(m0, arr0, check0);
add_178482_2381377266(params0, LOC75);
}
LA68: ;
memset((void*)LOC76, 0, sizeof(LOC76));
addf_179205_2381377266(params0, ((NimStringDesc*) &T839829468_113), LOC76, 0);
}
LA62: ;
{
NIM_BOOL LOC79;
LOC79 = (NIM_BOOL)0;
LOC79 = ((*t0).callconv == ((Tcallingconvention292002) 8));
if (!(LOC79)) goto LA80;
LOC79 = declareenvironment0;
LA80: ;
if (!LOC79) goto LA81;
{
if (!!(((*params0) == NIM_NIL))) goto LA85;
add_178487_2381377266(params0, ((NimStringDesc*) &T839829468_110));
}
LA85: ;
add_178487_2381377266(params0, ((NimStringDesc*) &T839829468_114));
}
LA81: ;
{
if (!(((*t0).flags &(1U<<((NU)(((Ttypeflag292431) 0))&31U)))!=0)) goto LA89;
{
if (!!(((*params0) == NIM_NIL))) goto LA93;
add_178487_2381377266(params0, ((NimStringDesc*) &T839829468_110));
}
LA93: ;
add_178487_2381377266(params0, ((NimStringDesc*) &T839829468_115));
}
LA89: ;
{
if (!((*params0) == NIM_NIL)) goto LA97;
add_178487_2381377266(params0, ((NimStringDesc*) &T839829468_116));
}
goto LA95;
LA97: ;
{
add_178487_2381377266(params0, ((NimStringDesc*) &T839829468_117));
}
LA95: ;
unsureAsgnRef((void**) (&(*params0)), HEX26_178452_2381377266(((NimStringDesc*) &T839829468_118), (*params0)));
}
N_NIMCALL(Ropeobj178006*, genprocheader_535867_839829468)(Tcgen529027* m0, Tsym292834* prc0) {
Ropeobj178006* result0;
Ropeobj178006* rettype0;
Ropeobj178006* params0;
Intset268030 check0;
Ropeobj178006* LOC13;
result0 = (Ropeobj178006*)0;
rettype0 = (Ropeobj178006*)0;
params0 = (Ropeobj178006*)0;
genclinedir_532813_839829468(&result0, (*prc0).info);
{
if (!(((*prc0).loc.flags &(1U<<((NU)(((Tlocflag292810) 5))&15U)))!=0)) goto LA3;
{
if (!(((*m0).flags &(1U<<((NU)(((Codegenflag529025) 3))&7U)))!=0)) goto LA7;
add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_22));
}
goto LA5;
LA7: ;
{
add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_23));
}
LA5: ;
}
goto LA1;
LA3: ;
{
if (!((*(*prc0).typ).callconv == ((Tcallingconvention292002) 5))) goto LA11;
add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_24));
}
goto LA1;
LA11: ;
LA1: ;
memset((void*)(&check0), 0, sizeof(check0));
chckNil((void*)(&check0));
memset((void*)(&check0), 0, sizeof(check0));
initintset_268885_2627731572((&check0));
LOC13 = (Ropeobj178006*)0;
LOC13 = manglename_533205_839829468(prc0);
fillloc_532282_839829468((&(*prc0).loc), ((Tlockind292808) 7), (*prc0).typ, LOC13, ((Tstorageloc292812) 0));
genprocparams_534115_839829468(m0, (*prc0).typ, &rettype0, ¶ms0, (&check0), NIM_TRUE, NIM_FALSE);
{
TY535235 LOC18;
if (!(*prc0).constraint == 0) goto LA16;
memset((void*)LOC18, 0, sizeof(LOC18));
LOC18[0] = rope_178277_2381377266(Callingconvtostr_533585_839829468[((*(*prc0).typ).callconv)- 0]);
LOC18[1] = rettype0;
LOC18[2] = (*prc0).loc.r;
LOC18[3] = params0;
addf_179205_2381377266(&result0, ((NimStringDesc*) &T839829468_119), LOC18, 4);
}
goto LA14;
LA16: ;
{
TY535238 LOC20;
memset((void*)LOC20, 0, sizeof(LOC20));
LOC20[0] = rettype0;
LOC20[1] = (*prc0).loc.r;
LOC20[2] = params0;
result0 = HEX25_178905_2381377266((*(*prc0).constraint).kindU.S3.strval, LOC20, 3);
}
LA14: ;
return result0;
}
static N_INLINE(Tnode292802*, HEX5BHEX5D_293238_850551059)(Tnode292802* n0, NI i0) {
Tnode292802* result0;
result0 = (Tnode292802*)0;
result0 = (*n0).kindU.S6.sons->data[i0];
return result0;
}
N_NIMCALL(Tnode292802*, easyresultasgn_560191_839829468)(Tnode292802* n0) {
Tnode292802* result0;
{ result0 = (Tnode292802*)0;
switch ((*n0).kind) {
case ((Tnodekind292020) 115):
case ((Tnodekind292020) 126):
{
NI i0;
i0 = ((NI) 0);
{
while (1) {
NIM_BOOL LOC4;
NI LOC5;
Tnode292802* LOC7;
LOC4 = (NIM_BOOL)0;
LOC5 = (NI)0;
LOC5 = len_293081_850551059(n0);
LOC4 = (i0 < LOC5);
if (!(LOC4)) goto LA6;
LOC7 = (Tnode292802*)0;
LOC7 = HEX5BHEX5D_293238_850551059(n0, i0);
LOC4 = ((*LOC7).kind == ((Tnodekind292020) 1) || (*LOC7).kind >= ((Tnodekind292020) 79) && (*LOC7).kind <= ((Tnodekind292020) 81) || (*LOC7).kind == ((Tnodekind292020) 84) || (*LOC7).kind == ((Tnodekind292020) 98) || (*LOC7).kind == ((Tnodekind292020) 101) || (*LOC7).kind == ((Tnodekind292020) 125));
LA6: ;
if (!LOC4) goto LA3;
i0 += ((NI) 1);
} LA3: ;
}
{
NI LOC10;
Tnode292802* LOC13;
LOC10 = (NI)0;
LOC10 = len_293081_850551059(n0);
if (!(i0 < LOC10)) goto LA11;
LOC13 = (Tnode292802*)0;
LOC13 = HEX5BHEX5D_293238_850551059(n0, i0);
result0 = easyresultasgn_560191_839829468(LOC13);
}
LA11: ;
}
break;
case ((Tnodekind292020) 73):
case ((Tnodekind292020) 74):
{
{
NIM_BOOL LOC17;
Tnode292802* LOC18;
Tnode292802* LOC20;
LOC17 = (NIM_BOOL)0;
LOC18 = (Tnode292802*)0;
LOC18 = HEX5BHEX5D_293238_850551059(n0, ((NI) 0));
LOC17 = ((*LOC18).kind == ((Tnodekind292020) 3));
if (!(LOC17)) goto LA19;
LOC20 = (Tnode292802*)0;
LOC20 = HEX5BHEX5D_293238_850551059(n0, ((NI) 0));
LOC17 = (((Tsymkind292435) 11) == (*(*LOC20).kindU.S4.sym).kind);
LA19: ;
if (!LOC17) goto LA21;
(*n0).flags |= ((NU16)1)<<((((Tnodeflag292427) 14))%(sizeof(NU16)*8));
result0 = HEX5BHEX5D_293238_850551059(n0, ((NI) 1));
goto BeforeRet;
}
LA21: ;
}
break;
case ((Tnodekind292020) 109):
{
{
NI LOC26;
Tnode292802* LOC29;
LOC26 = (NI)0;
LOC26 = len_293081_850551059(n0);
if (!(((NI) 0) < LOC26)) goto LA27;
LOC29 = (Tnode292802*)0;
LOC29 = HEX5BHEX5D_293238_850551059(n0, ((NI) 0));
result0 = easyresultasgn_560191_839829468(LOC29);
{
if (!!((result0 == NIM_NIL))) goto LA32;
(*n0).flags |= ((NU16)1)<<((((Tnodeflag292427) 14))%(sizeof(NU16)*8));
}
LA32: ;
}
LA27: ;
}
break;
default:
{
}
break;
}
}BeforeRet: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, gettypedesc_535671_839829468)(Tcgen529027* m0, Ttype292840* typ0) {
Ropeobj178006* result0;
Intset268030 check0;
result0 = (Ropeobj178006*)0;
memset((void*)(&check0), 0, sizeof(check0));
chckNil((void*)(&check0));
memset((void*)(&check0), 0, sizeof(check0));
initintset_268885_2627731572((&check0));
result0 = gettypedescaux_533503_839829468(m0, typ0, (&check0));
return result0;
}
N_NIMCALL(Ropeobj178006*, localvardecl_538532_839829468)(Tcproc529021* p0, Tsym292834* s0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
{
Ropeobj178006* LOC5;
if (!((*s0).loc.k == ((Tlockind292808) 0))) goto LA3;
LOC5 = (Ropeobj178006*)0;
LOC5 = manglename_533205_839829468(s0);
fillloc_532282_839829468((&(*s0).loc), ((Tlockind292808) 2), (*s0).typ, LOC5, ((Tstorageloc292812) 2));
{
if (!((*s0).kind == ((Tsymkind292435) 9))) goto LA8;
(*s0).loc.flags |= ((NU16)1)<<((((Tlocflag292810) 2))%(sizeof(NU16)*8));
}
LA8: ;
}
LA3: ;
result0 = gettypedesc_535671_839829468((*p0).module, (*s0).loc.t);
{
if (!(*s0).constraint == 0) goto LA12;
{
if (!(((*s0).flags &(1U<<((NU)(((Tsymflag292184) 8))&31U)))!=0)) goto LA16;
add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_121));
}
LA16: ;
{
if (!(((*s0).flags &(1U<<((NU)(((Tsymflag292184) 7))&31U)))!=0)) goto LA20;
add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_122));
}
LA20: ;
add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_111));
add_178482_2381377266(&result0, (*s0).loc.r);
}
goto LA10;
LA12: ;
{
TY532811 LOC23;
memset((void*)LOC23, 0, sizeof(LOC23));
LOC23[0] = result0;
LOC23[1] = (*s0).loc.r;
result0 = HEX25_178905_2381377266((*(*s0).constraint).kindU.S3.strval, LOC23, 2);
}
LA10: ;
return result0;
}
N_NIMCALL(void, initloc_532273_839829468)(Tloc292816* result0, Tlockind292808 k0, Ttype292840* typ0, Tstorageloc292812 s0) {
(*result0).k = k0;
(*result0).s = s0;
unsureAsgnRef((void**) (&(*result0).t), typ0);
unsureAsgnRef((void**) (&(*result0).r), NIM_NIL);
(*result0).flags = 0;
}
N_NIMCALL(void, initlocexprsingleuse_539289_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* result0) {
initloc_532273_839829468(result0, ((Tlockind292808) 0), (*e0).typ, ((Tstorageloc292812) 0));
(*result0).flags |= ((NU16)1)<<((((Tlocflag292810) 8))%(sizeof(NU16)*8));
expr_539248_839829468(p0, e0, result0);
}
static N_INLINE(Ropeobj178006**, s_529179_3723162438)(Tcproc529021* p0, Tcprocsection529011 s0) {
Ropeobj178006** result0;
result0 = (Ropeobj178006**)0;
result0 = &(*p0).blocks->data[(NI)(((*p0).blocks ? (*p0).blocks->Sup.len : 0) - ((NI) 1))].sections[(s0)- 0];
return result0;
}
N_NIMCALL(Ropeobj178006*, indentline_532656_839829468)(Tcproc529021* p0, Ropeobj178006* r0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
result0 = r0;
{
NI i_532680_839829468;
NI HEX3Atmp_532683_839829468;
NI res_532686_839829468;
i_532680_839829468 = (NI)0;
HEX3Atmp_532683_839829468 = (NI)0;
HEX3Atmp_532683_839829468 = (NI)(((*p0).blocks ? (*p0).blocks->Sup.len : 0) - ((NI) 1));
res_532686_839829468 = ((NI) 0);
{
while (1) {
if (!(res_532686_839829468 <= HEX3Atmp_532683_839829468)) goto LA3;
i_532680_839829468 = res_532686_839829468;
prepend_178893_2381377266(&result0, indent_532655_839829468);
res_532686_839829468 += ((NI) 1);
} LA3: ;
}
}
return result0;
}
N_NIMCALL(void, linefmt_532714_839829468)(Tcproc529021* p0, Tcprocsection529011 s0, NimStringDesc* frmt0, Ropeobj178006** args0, NI args0Len0) {
Ropeobj178006** LOC1;
Ropeobj178006* LOC2;
Ropeobj178006* LOC3;
LOC1 = (Ropeobj178006**)0;
LOC1 = s_529179_3723162438(p0, s0);
LOC2 = (Ropeobj178006*)0;
LOC2 = ropecg_532407_839829468((*p0).module, frmt0, args0, args0Len0);
LOC3 = (Ropeobj178006*)0;
LOC3 = indentline_532656_839829468(p0, LOC2);
add_178482_2381377266(LOC1, LOC3);
}
N_NIMCALL(Ropeobj178006*, rdloc_538188_839829468)(Tloc292816 a0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
result0 = a0.r;
{
TY178507 LOC5;
if (!((a0.flags &(1U<<((NU)(((Tlocflag292810) 0))&15U)))!=0)) goto LA3;
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = result0;
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_124), LOC5, 1);
}
LA3: ;
return result0;
}
N_NIMCALL(void, line_532690_839829468)(Tcproc529021* p0, Tcprocsection529011 s0, Ropeobj178006* r0) {
Ropeobj178006** LOC1;
Ropeobj178006* LOC2;
LOC1 = (Ropeobj178006**)0;
LOC1 = s_529179_3723162438(p0, s0);
LOC2 = (Ropeobj178006*)0;
LOC2 = indentline_532656_839829468(p0, r0);
add_178482_2381377266(LOC1, LOC2);
}
N_NIMCALL(void, linef_532700_839829468)(Tcproc529021* p0, Tcprocsection529011 s0, NimStringDesc* frmt0, Ropeobj178006** args0, NI args0Len0) {
Ropeobj178006** LOC1;
Ropeobj178006* LOC2;
Ropeobj178006* LOC3;
LOC1 = (Ropeobj178006**)0;
LOC1 = s_529179_3723162438(p0, s0);
LOC2 = (Ropeobj178006*)0;
LOC2 = HEX25_178905_2381377266(frmt0, args0, args0Len0);
LOC3 = (Ropeobj178006*)0;
LOC3 = indentline_532656_839829468(p0, LOC2);
add_178482_2381377266(LOC1, LOC3);
}
N_NIMCALL(void, gentypeinfoauxbase_535960_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ttype292840* origtype0, Ropeobj178006* name0, Ropeobj178006* base0) {
NI nimtypekind0;
Ropeobj178006* size0;
TY535235 LOC17;
NI flags0;
Ropeobj178006* LOC33;
TY532811 LOC34;
NimStringDesc* LOC35;
nimtypekind0 = (NI)0;
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = isobjlackingtypefield_533513_839829468(typ0);
if (!LOC3) goto LA4;
nimtypekind0 = ((NI) 18);
}
goto LA1;
LA4: ;
{
nimtypekind0 = ((NI) ((*typ0).kind));
}
LA1: ;
size0 = (Ropeobj178006*)0;
{
if (!(((*typ0).flags &(1U<<((NU)(((Ttypeflag292431) 0))&31U)))!=0)) goto LA9;
size0 = rope_178277_2381377266(((NimStringDesc*) &T839829468_133));
}
goto LA7;
LA9: ;
{
NIM_BOOL LOC12;
LOC12 = (NIM_BOOL)0;
LOC12 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC12) goto LA13;
LOC12 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA13: ;
if (!LOC12) goto LA14;
size0 = gettypedesc_535671_839829468(m0, origtype0);
}
goto LA7;
LA14: ;
{
size0 = gettypedesc_535671_839829468(m0, typ0);
}
LA7: ;
memset((void*)LOC17, 0, sizeof(LOC17));
LOC17[0] = name0;
LOC17[1] = size0;
LOC17[2] = rope_178401_2381377266(((NI64) (nimtypekind0)));
LOC17[3] = base0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_134), LOC17, 4);
flags0 = ((NI) 0);
{
NIM_BOOL LOC20;
LOC20 = (NIM_BOOL)0;
LOC20 = containsgarbagecollectedref_320117_3876443242(typ0);
if (!!(LOC20)) goto LA21;
flags0 = (NI)(flags0 | ((NI) 1));
}
LA21: ;
{
NIM_BOOL LOC25;
LOC25 = (NIM_BOOL)0;
LOC25 = canformacycle_320123_3876443242(typ0);
if (!!(LOC25)) goto LA26;
flags0 = (NI)(flags0 | ((NI) 2));
}
LA26: ;
{
TY532811 LOC32;
if (!!((flags0 == ((NI) 0)))) goto LA30;
memset((void*)LOC32, 0, sizeof(LOC32));
LOC32[0] = name0;
LOC32[1] = rope_178401_2381377266(((NI64) (flags0)));
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_135), LOC32, 2);
}
LA30: ;
LOC33 = (Ropeobj178006*)0;
LOC33 = cgsym_532403_839829468(m0, ((NimStringDesc*) &T839829468_129));
memset((void*)LOC34, 0, sizeof(LOC34));
LOC34[0] = name0;
LOC35 = (NimStringDesc*)0;
LOC35 = typetostring_320017_3876443242(typ0, ((Tprefereddesc320011) 0));
LOC34[1] = rope_178277_2381377266(LOC35);
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 9))- 0], ((NimStringDesc*) &T839829468_136), LOC34, 2);
}
N_NIMCALL(Ropeobj178006*, getnimnode_535945_839829468)(Tcgen529027* m0) {
Ropeobj178006* result0;
TY532811 LOC1;
result0 = (Ropeobj178006*)0;
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = (*m0).typenodesname;
LOC1[1] = rope_178401_2381377266(((NI64) ((*m0).typenodes)));
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_138), LOC1, 2);
(*m0).typenodes += ((NI) 1);
return result0;
}
N_NIMCALL(void, gentupleinfo_536549_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ropeobj178006* name0) {
Ropeobj178006* LOC1;
Ropeobj178006* expr0;
NI length0;
TY532811 LOC15;
LOC1 = (Ropeobj178006*)0;
LOC1 = rope_178277_2381377266(((NimStringDesc*) &T839829468_18));
gentypeinfoauxbase_535960_839829468(m0, typ0, typ0, name0, LOC1);
expr0 = getnimnode_535945_839829468(m0);
length0 = sonslen_295327_850551059(typ0);
{
Ropeobj178006* tmp0;
TY532811 LOC6;
TY535238 LOC12;
if (!(((NI) 0) < length0)) goto LA4;
tmp0 = gettempname_533596_839829468(m0);
memset((void*)LOC6, 0, sizeof(LOC6));
LOC6[0] = tmp0;
LOC6[1] = rope_178401_2381377266(((NI64) (length0)));
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 12))- 0], ((NimStringDesc*) &T839829468_139), LOC6, 2);
{
NI i_536571_839829468;
NI HEX3Atmp_536590_839829468;
NI res_536593_839829468;
i_536571_839829468 = (NI)0;
HEX3Atmp_536590_839829468 = (NI)0;
HEX3Atmp_536590_839829468 = (NI)(length0 - ((NI) 1));
res_536593_839829468 = ((NI) 0);
{
while (1) {
Ttype292840* a0;
Ropeobj178006* tmp20;
TY535238 LOC10;
TY535235 LOC11;
if (!(res_536593_839829468 <= HEX3Atmp_536590_839829468)) goto LA9;
i_536571_839829468 = res_536593_839829468;
a0 = (*typ0).sons->data[i_536571_839829468];
tmp20 = getnimnode_535945_839829468(m0);
memset((void*)LOC10, 0, sizeof(LOC10));
LOC10[0] = tmp0;
LOC10[1] = rope_178401_2381377266(((NI64) (i_536571_839829468)));
LOC10[2] = tmp20;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_140), LOC10, 3);
memset((void*)LOC11, 0, sizeof(LOC11));
LOC11[0] = tmp20;
LOC11[1] = gettypedesc_535671_839829468(m0, typ0);
LOC11[2] = rope_178401_2381377266(((NI64) (i_536571_839829468)));
LOC11[3] = gentypeinfo_535941_839829468(m0, a0);
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_141), LOC11, 4);
res_536593_839829468 += ((NI) 1);
} LA9: ;
}
}
memset((void*)LOC12, 0, sizeof(LOC12));
LOC12[0] = expr0;
LOC12[1] = rope_178401_2381377266(((NI64) (length0)));
LOC12[2] = tmp0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_142), LOC12, 3);
}
goto LA2;
LA4: ;
{
TY532811 LOC14;
memset((void*)LOC14, 0, sizeof(LOC14));
LOC14[0] = expr0;
LOC14[1] = rope_178401_2381377266(((NI64) (length0)));
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_143), LOC14, 2);
}
LA2: ;
memset((void*)LOC15, 0, sizeof(LOC15));
LOC15[0] = name0;
LOC15[1] = expr0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_144), LOC15, 2);
}
N_NIMCALL(Ttype292840*, fakeclosuretype_537010_839829468)(Tsym292834* owner0) {
Ttype292840* result0;
Ttype292840* LOC1;
Ttype292840* r0;
Ttype292840* LOC2;
result0 = (Ttype292840*)0;
result0 = newtype_295107_850551059(((Ttypekind292244) 18), owner0);
LOC1 = (Ttype292840*)0;
LOC1 = newtype_295107_850551059(((Ttypekind292244) 26), owner0);
rawaddson_296394_850551059(result0, LOC1);
r0 = newtype_295107_850551059(((Ttypekind292244) 22), owner0);
LOC2 = (Ttype292840*)0;
LOC2 = newtype_295107_850551059(((Ttypekind292244) 18), owner0);
rawaddson_296394_850551059(r0, LOC2);
rawaddson_296394_850551059(result0, r0);
return result0;
}
N_NIMCALL(void, gentypeinfoaux_536027_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ttype292840* origtype0, Ropeobj178006* name0) {
Ropeobj178006* base0;
base0 = (Ropeobj178006*)0;
{
NIM_BOOL LOC3;
NI LOC4;
Ttype292840* x0;
LOC3 = (NIM_BOOL)0;
LOC4 = (NI)0;
LOC4 = sonslen_295327_850551059(typ0);
LOC3 = (((NI) 0) < LOC4);
if (!(LOC3)) goto LA5;
LOC3 = !(((*typ0).sons->data[((NI) 0)] == NIM_NIL));
LA5: ;
if (!LOC3) goto LA6;
x0 = (*typ0).sons->data[((NI) 0)];
{
if (!((*typ0).kind == ((Ttypekind292244) 17))) goto LA10;
x0 = skiptypes_296099_850551059(x0, IL64(211106247215360));
}
LA10: ;
base0 = gentypeinfo_535941_839829468(m0, x0);
}
goto LA1;
LA6: ;
{
base0 = rope_178277_2381377266(((NimStringDesc*) &T839829468_18));
}
LA1: ;
gentypeinfoauxbase_535960_839829468(m0, typ0, origtype0, name0, base0);
}
static N_INLINE(NIM_BOOL, iscomplexvaluetype_538317_839829468)(Ttype292840* t0) {
NIM_BOOL result0;
NIM_BOOL LOC1;
NIM_BOOL LOC3;
result0 = (NIM_BOOL)0;
LOC1 = (NIM_BOOL)0;
LOC1 = ((*t0).kind == ((Ttypekind292244) 16) || (*t0).kind == ((Ttypekind292244) 4) || (*t0).kind == ((Ttypekind292244) 19) || (*t0).kind == ((Ttypekind292244) 18) || (*t0).kind == ((Ttypekind292244) 17));
if (LOC1) goto LA2;
LOC3 = (NIM_BOOL)0;
LOC3 = ((*t0).kind == ((Ttypekind292244) 25));
if (!(LOC3)) goto LA4;
LOC3 = ((*t0).callconv == ((Tcallingconvention292002) 8));
LA4: ;
LOC1 = LOC3;
LA2: ;
result0 = LOC1;
return result0;
}
N_NIMCALL(void, usestringh_532345_839829468)(Tcgen529027* m0) {
{
NIM_BOOL LOC5;
if (!!((((*m0).flags &(1U<<((NU)(((Codegenflag529025) 4))&7U)))!=0))) goto LA3;
(*m0).flags |= ((NU8)1)<<((((Codegenflag529025) 4))%(sizeof(NU8)*8));
LOC5 = (NIM_BOOL)0;
LOC5 = includestr_147249_3771138726((&(*m0).headerfiles), ((NimStringDesc*) &T839829468_151));
}
LA3: ;
}
N_NIMCALL(Ropeobj178006*, addrloc_538204_839829468)(Tloc292816 a0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
result0 = a0.r;
{
NIM_BOOL LOC3;
Tctypekind529007 LOC5;
Ropeobj178006* LOC8;
LOC3 = (NIM_BOOL)0;
LOC3 = !(((a0.flags &(1U<<((NU)(((Tlocflag292810) 0))&15U)))!=0));
if (!(LOC3)) goto LA4;
LOC5 = (Tctypekind529007)0;
LOC5 = maptype_533393_839829468(a0.t);
LOC3 = !((LOC5 == ((Tctypekind529007) 17)));
LA4: ;
if (!LOC3) goto LA6;
LOC8 = (Ropeobj178006*)0;
LOC8 = HEX26_178452_2381377266(((NimStringDesc*) &T839829468_128), result0);
result0 = HEX26_178447_2381377266(LOC8, ((NimStringDesc*) &T839829468_117));
}
LA6: ;
return result0;
}
N_NIMCALL(void, genobjectinit_538242_839829468)(Tcproc529021* p0, Tcprocsection529011 section0, Ttype292840* t0, Tloc292816 a0, NIM_BOOL takeaddr0) {
Ttypefieldresult320145 LOC1;
LOC1 = (Ttypefieldresult320145)0;
LOC1 = analyseobjectwithtypefield_320149_3876443242(t0);
switch (LOC1) {
case ((Ttypefieldresult320145) 0):
{
}
break;
case ((Ttypefieldresult320145) 1):
{
Ropeobj178006* r0;
Ttype292840* s0;
TY532811 LOC19;
r0 = rdloc_538188_839829468(a0);
{
TY178507 LOC8;
if (!!(takeaddr0)) goto LA6;
memset((void*)LOC8, 0, sizeof(LOC8));
LOC8[0] = r0;
r0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_124), LOC8, 1);
}
LA6: ;
s0 = skiptypes_296099_850551059(t0, IL64(211106232576256));
{
NIM_BOOL LOC11;
LOC11 = (NIM_BOOL)0;
LOC11 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC11) goto LA12;
LOC11 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA12: ;
if (!!(LOC11)) goto LA13;
{
while (1) {
NIM_BOOL LOC17;
LOC17 = (NIM_BOOL)0;
LOC17 = ((*s0).kind == ((Ttypekind292244) 17));
if (!(LOC17)) goto LA18;
LOC17 = !(((*s0).sons->data[((NI) 0)] == NIM_NIL));
LA18: ;
if (!LOC17) goto LA16;
add_178487_2381377266(&r0, ((NimStringDesc*) &T839829468_153));
s0 = skiptypes_296099_850551059((*s0).sons->data[((NI) 0)], IL64(211106247215360));
} LA16: ;
}
}
LA13: ;
memset((void*)LOC19, 0, sizeof(LOC19));
LOC19[0] = r0;
LOC19[1] = gentypeinfo_535941_839829468((*p0).module, t0);
linefmt_532714_839829468(p0, section0, ((NimStringDesc*) &T839829468_154), LOC19, 2);
}
break;
case ((Ttypefieldresult320145) 2):
{
Ropeobj178006* r0;
TY532811 LOC26;
{
if (!takeaddr0) goto LA23;
r0 = addrloc_538204_839829468(a0);
}
goto LA21;
LA23: ;
{
r0 = rdloc_538188_839829468(a0);
}
LA21: ;
memset((void*)LOC26, 0, sizeof(LOC26));
LOC26[0] = r0;
LOC26[1] = gentypeinfo_535941_839829468((*p0).module, t0);
linefmt_532714_839829468(p0, section0, ((NimStringDesc*) &T839829468_155), LOC26, 2);
}
break;
}
}
N_NIMCALL(void, constructloc_538388_839829468)(Tcproc529021* p0, Tloc292816 loc0, NIM_BOOL istemp0) {
Ttype292840* typ0;
typ0 = skiptypes_296099_850551059(loc0.t, IL64(211106233624832));
{
NIM_BOOL LOC3;
TY532811 LOC6;
LOC3 = (NIM_BOOL)0;
LOC3 = iscomplexvaluetype_538317_839829468(typ0);
if (!!(LOC3)) goto LA4;
memset((void*)LOC6, 0, sizeof(LOC6));
LOC6[0] = rdloc_538188_839829468(loc0);
LOC6[1] = gettypedesc_535671_839829468((*p0).module, typ0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_150), LOC6, 2);
}
goto LA1;
LA4: ;
{
{
NIM_BOOL LOC10;
LOC10 = (NIM_BOOL)0;
LOC10 = !(istemp0);
if (LOC10) goto LA11;
LOC10 = containsgarbagecollectedref_320117_3876443242(loc0.t);
LA11: ;
if (!LOC10) goto LA12;
{
NIM_BOOL LOC16;
TY532811 LOC19;
LOC16 = (NIM_BOOL)0;
LOC16 = isimportedcpptype_533476_839829468(typ0);
if (!!(LOC16)) goto LA17;
usestringh_532345_839829468((*p0).module);
memset((void*)LOC19, 0, sizeof(LOC19));
LOC19[0] = addrloc_538204_839829468(loc0);
LOC19[1] = rdloc_538188_839829468(loc0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_152), LOC19, 2);
}
LA17: ;
}
LA12: ;
genobjectinit_538242_839829468(p0, ((Tcprocsection529011) 2), loc0.t, loc0, NIM_TRUE);
}
LA1: ;
}
N_NIMCALL(void, gettemp_537032_839829468)(Tcproc529021* p0, Ttype292840* t0, Tloc292816* result0, NIM_BOOL needsinit0) {
Ropeobj178006* LOC1;
TY532811 LOC2;
(*p0).labels += ((NI) 1);
LOC1 = (Ropeobj178006*)0;
LOC1 = rope_178401_2381377266(((NI64) ((*p0).labels)));
unsureAsgnRef((void**) (&(*result0).r), HEX26_178452_2381377266(((NimStringDesc*) &T839829468_149), LOC1));
memset((void*)LOC2, 0, sizeof(LOC2));
LOC2[0] = gettypedesc_535671_839829468((*p0).module, t0);
LOC2[1] = (*result0).r;
linefmt_532714_839829468(p0, ((Tcprocsection529011) 0), ((NimStringDesc*) &T839829468_54), LOC2, 2);
(*result0).k = ((Tlockind292808) 1);
unsureAsgnRef((void**) (&(*result0).t), t0);
(*result0).s = ((Tstorageloc292812) 2);
(*result0).flags = 0;
constructloc_538388_839829468(p0, (*result0), !(needsinit0));
}
static N_INLINE(Ropeobj178006*, parentobj_537257_839829468)(Ropeobj178006* accessor0, Tcgen529027* m0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
{
NIM_BOOL LOC3;
TY178507 LOC7;
LOC3 = (NIM_BOOL)0;
LOC3 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC3) goto LA4;
LOC3 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA4: ;
if (!!(LOC3)) goto LA5;
memset((void*)LOC7, 0, sizeof(LOC7));
LOC7[0] = accessor0;
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_161), LOC7, 1);
}
goto LA1;
LA5: ;
{
result0 = accessor0;
}
LA1: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, intliteral_539270_839829468)(NI64 i0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = (IL64(-2147483648) < i0);
if (!(LOC3)) goto LA4;
LOC3 = (i0 <= IL64(2147483647));
LA4: ;
if (!LOC3) goto LA5;
result0 = rope_178401_2381377266(i0);
}
goto LA1;
LA5: ;
{
TY533289 LOC10;
if (!(i0 == IL64(-2147483648))) goto LA8;
memset((void*)LOC10, 0, sizeof(LOC10));
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_166), LOC10, 0);
}
goto LA1;
LA8: ;
{
TY178507 LOC14;
if (!((IL64(-9223372036854775807) - IL64(1)) < i0)) goto LA12;
memset((void*)LOC14, 0, sizeof(LOC14));
LOC14[0] = rope_178401_2381377266(i0);
result0 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_167), LOC14, 1);
}
goto LA1;
LA12: ;
{
TY533289 LOC16;
memset((void*)LOC16, 0, sizeof(LOC16));
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_168), LOC16, 0);
}
LA1: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, int64literal_549430_839829468)(NI64 i0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
{
TY178507 LOC5;
if (!((IL64(-9223372036854775807) - IL64(1)) < i0)) goto LA3;
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = rope_178401_2381377266(i0);
result0 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_167), LOC5, 1);
}
goto LA1;
LA3: ;
{
TY533289 LOC7;
memset((void*)LOC7, 0, sizeof(LOC7));
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_168), LOC7, 0);
}
LA1: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, uint64literal_549442_839829468)(NU64 i0) {
Ropeobj178006* result0;
NimStringDesc* LOC1;
NimStringDesc* LOC2;
result0 = (Ropeobj178006*)0;
LOC1 = (NimStringDesc*)0;
LOC2 = (NimStringDesc*)0;
LOC2 = HEX24_8401_1689653243(i0);
LOC1 = rawNewString(LOC2->Sup.len + 3);
appendString(LOC1, LOC2);
appendString(LOC1, ((NimStringDesc*) &T839829468_171));
result0 = rope_178277_2381377266(LOC1);
return result0;
}
N_NIMCALL(Ropeobj178006*, getstrlit_549468_839829468)(Tcgen529027* m0, NimStringDesc* s0) {
Ropeobj178006* result0;
Ropeobj178006* LOC1;
TY535238 LOC2;
result0 = (Ropeobj178006*)0;
LOC1 = (Ropeobj178006*)0;
LOC1 = cgsym_532403_839829468(m0, ((NimStringDesc*) &T839829468_79));
result0 = gettempname_533596_839829468(m0);
memset((void*)LOC2, 0, sizeof(LOC2));
LOC2[0] = result0;
LOC2[1] = makecstring_191638_155036129(s0);
LOC2[2] = rope_178401_2381377266(((NI64) ((s0 ? s0->Sup.len : 0))));
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 8))- 0], ((NimStringDesc*) &T839829468_177), LOC2, 3);
return result0;
}
N_NIMCALL(Ropeobj178006*, genliteral_549476_839829468)(Tcproc529021* p0, Tnode292802* n0, Ttype292840* ty0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
{
if (!(ty0 == NIM_NIL)) goto LA3;
internalerror_196100_155036129((*n0).info, ((NimStringDesc*) &T839829468_165));
}
LA3: ;
switch ((*n0).kind) {
case ((Tnodekind292020) 5) ... ((Tnodekind292020) 15):
{
Ttype292840* LOC6;
LOC6 = (Ttype292840*)0;
LOC6 = skiptypes_296099_850551059(ty0, IL64(211106242013440));
switch ((*LOC6).kind) {
case ((Ttypekind292244) 2):
case ((Ttypekind292244) 5):
{
result0 = intliteral_539270_839829468((*n0).kindU.S1.intval);
}
break;
case ((Ttypekind292244) 1):
{
{
TY533289 LOC13;
if (!!(((*n0).kindU.S1.intval == IL64(0)))) goto LA11;
memset((void*)LOC13, 0, sizeof(LOC13));
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_169), LOC13, 0);
}
goto LA9;
LA11: ;
{
TY533289 LOC15;
memset((void*)LOC15, 0, sizeof(LOC15));
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_170), LOC15, 0);
}
LA9: ;
}
break;
case ((Ttypekind292244) 35):
{
result0 = int64literal_549430_839829468((*n0).kindU.S1.intval);
}
break;
case ((Ttypekind292244) 44):
{
result0 = uint64literal_549442_839829468(((NU64) ((*n0).kindU.S1.intval)));
}
break;
default:
{
TY532811 LOC19;
Ttype292840* LOC20;
memset((void*)LOC19, 0, sizeof(LOC19));
LOC20 = (Ttype292840*)0;
LOC20 = skiptypes_296099_850551059(ty0, IL64(211106242013440));
LOC19[0] = gettypedesc_535671_839829468((*p0).module, LOC20);
LOC19[1] = intliteral_539270_839829468((*n0).kindU.S1.intval);
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_172), LOC19, 2);
}
break;
}
}
break;
case ((Tnodekind292020) 23):
{
Ttype292840* t0;
t0 = skiptypes_296099_850551059(ty0, IL64(211106242013440));
{
NIM_BOOL LOC24;
NI id0;
Ropeobj178006* LOC28;
LOC24 = (NIM_BOOL)0;
LOC24 = ((*t0).kind == ((Ttypekind292244) 25));
if (!(LOC24)) goto LA25;
LOC24 = ((*t0).callconv == ((Tcallingconvention292002) 8));
LA25: ;
if (!LOC24) goto LA26;
id0 = nodetabletestorset_342682_1142335848((&(*(*p0).module).datacache), n0, ((NI) ((*(*p0).module).labels)));
LOC28 = (Ropeobj178006*)0;
LOC28 = rope_178401_2381377266(((NI64) (id0)));
result0 = HEX26_178418_2381377266((*(*p0).module).tmpbase, LOC28);
{
TY532811 LOC33;
if (!(id0 == ((NI) ((*(*p0).module).labels)))) goto LA31;
(*(*p0).module).labels += ((NI) 1);
memset((void*)LOC33, 0, sizeof(LOC33));
LOC33[0] = gettypedesc_535671_839829468((*p0).module, t0);
LOC33[1] = result0;
addf_179205_2381377266(&(*(*p0).module).s[(((Tcfilesection529005) 8))- 0], ((NimStringDesc*) &T839829468_173), LOC33, 2);
}
LA31: ;
}
goto LA22;
LA26: ;
{
result0 = rope_178277_2381377266(((NimStringDesc*) &T839829468_174));
}
LA22: ;
}
break;
case ((Tnodekind292020) 20) ... ((Tnodekind292020) 22):
{
{
TY533289 LOC40;
if (!(*n0).kindU.S3.strval == 0) goto LA38;
memset((void*)LOC40, 0, sizeof(LOC40));
result0 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_175), LOC40, 0);
}
goto LA36;
LA38: ;
{
Ttype292840* LOC42;
NI id0;
LOC42 = (Ttype292840*)0;
LOC42 = skiptypes_296099_850551059(ty0, IL64(211106242013440));
if (!((*LOC42).kind == ((Ttypekind292244) 28))) goto LA43;
id0 = nodetabletestorset_342682_1142335848((&(*(*p0).module).datacache), n0, ((NI) ((*(*p0).module).labels)));
{
TY178507 LOC49;
if (!(id0 == ((NI) ((*(*p0).module).labels)))) goto LA47;
memset((void*)LOC49, 0, sizeof(LOC49));
LOC49[0] = getstrlit_549468_839829468((*p0).module, (*n0).kindU.S3.strval);
result0 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_176), LOC49, 1);
}
goto LA45;
LA47: ;
{
TY532811 LOC51;
memset((void*)LOC51, 0, sizeof(LOC51));
LOC51[0] = (*(*p0).module).tmpbase;
LOC51[1] = rope_178401_2381377266(((NI64) (id0)));
result0 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_178), LOC51, 2);
}
LA45: ;
}
goto LA36;
LA43: ;
{
result0 = makecstring_191638_155036129((*n0).kindU.S3.strval);
}
LA36: ;
}
break;
case ((Tnodekind292020) 16) ... ((Tnodekind292020) 18):
{
NimStringDesc* LOC54;
LOC54 = (NimStringDesc*)0;
LOC54 = tostrmaxprecision_298007_3471544153((*n0).kindU.S2.floatval);
result0 = rope_178277_2381377266(LOC54);
}
break;
default:
{
NimStringDesc* LOC56;
LOC56 = (NimStringDesc*)0;
LOC56 = rawNewString(reprEnum((NI)(*n0).kind, (&NTI292020))->Sup.len + 12);
appendString(LOC56, ((NimStringDesc*) &T839829468_179));
appendString(LOC56, reprEnum((NI)(*n0).kind, (&NTI292020)));
appendChar(LOC56, 41);
internalerror_196100_155036129((*n0).info, LOC56);
result0 = NIM_NIL;
}
break;
}
return result0;
}
N_NIMCALL(Ropeobj178006*, genliteral_539273_839829468)(Tcproc529021* p0, Tnode292802* n0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
result0 = genliteral_549476_839829468(p0, n0, (*n0).typ);
return result0;
}
N_NIMCALL(void, gencaserange_537028_839829468)(Tcproc529021* p0, Tnode292802* branch0) {
NI length0;
length0 = len_293081_850551059(branch0);
{
NI j_547676_839829468;
NI HEX3Atmp_547717_839829468;
NI res_547720_839829468;
j_547676_839829468 = (NI)0;
HEX3Atmp_547717_839829468 = (NI)0;
HEX3Atmp_547717_839829468 = (NI)(length0 - ((NI) 2));
res_547720_839829468 = ((NI) 0);
{
while (1) {
if (!(res_547720_839829468 <= HEX3Atmp_547717_839829468)) goto LA3;
j_547676_839829468 = res_547720_839829468;
{
Tnode292802* LOC6;
LOC6 = (Tnode292802*)0;
LOC6 = HEX5BHEX5D_293238_850551059(branch0, j_547676_839829468);
if (!((*LOC6).kind == ((Tnodekind292020) 44))) goto LA7;
{
TY532811 LOC13;
Tnode292802* LOC14;
Tnode292802* LOC15;
Tnode292802* LOC16;
Tnode292802* LOC17;
if (!((Cc_273413_2528170400[(ccompiler_273431_2528170400)- 1].Field20 &(1U<<((NU)(((Tinfoccprop273004) 0))&7U)))!=0)) goto LA11;
memset((void*)LOC13, 0, sizeof(LOC13));
LOC14 = (Tnode292802*)0;
LOC14 = HEX5BHEX5D_293238_850551059(branch0, j_547676_839829468);
LOC15 = (Tnode292802*)0;
LOC15 = HEX5BHEX5D_293238_850551059(LOC14, ((NI) 0));
LOC13[0] = genliteral_539273_839829468(p0, LOC15);
LOC16 = (Tnode292802*)0;
LOC16 = HEX5BHEX5D_293238_850551059(branch0, j_547676_839829468);
LOC17 = (Tnode292802*)0;
LOC17 = HEX5BHEX5D_293238_850551059(LOC16, ((NI) 1));
LOC13[1] = genliteral_539273_839829468(p0, LOC17);
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_164), LOC13, 2);
}
goto LA9;
LA11: ;
{
Tnode292802* v0;
Tnode292802* LOC19;
Tnode292802* LOC20;
LOC19 = (Tnode292802*)0;
LOC19 = HEX5BHEX5D_293238_850551059(branch0, j_547676_839829468);
LOC20 = (Tnode292802*)0;
LOC20 = HEX5BHEX5D_293238_850551059(LOC19, ((NI) 0));
v0 = copynode_296528_850551059(LOC20);
{
while (1) {
Tnode292802* LOC23;
Tnode292802* LOC24;
TY178507 LOC25;
LOC23 = (Tnode292802*)0;
LOC23 = HEX5BHEX5D_293238_850551059(branch0, j_547676_839829468);
LOC24 = (Tnode292802*)0;
LOC24 = HEX5BHEX5D_293238_850551059(LOC23, ((NI) 1));
if (!((*v0).kindU.S1.intval <= (*LOC24).kindU.S1.intval)) goto LA22;
memset((void*)LOC25, 0, sizeof(LOC25));
LOC25[0] = genliteral_539273_839829468(p0, v0);
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_180), LOC25, 1);
(*v0).kindU.S1.intval += ((NI) 1);
} LA22: ;
}
}
LA9: ;
}
goto LA4;
LA7: ;
{
TY178507 LOC27;
Tnode292802* LOC28;
memset((void*)LOC27, 0, sizeof(LOC27));
LOC28 = (Tnode292802*)0;
LOC28 = HEX5BHEX5D_293238_850551059(branch0, j_547676_839829468);
LOC27[0] = genliteral_539273_839829468(p0, LOC28);
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_180), LOC27, 1);
}
LA4: ;
res_547720_839829468 += ((NI) 1);
} LA3: ;
}
}
}
N_NIMCALL(void, gentraverseproc_537039_839829468)(Ttraversalclosure537019* c0, Ropeobj178006* accessor0, Tnode292802* n0) {
{ {
if (!(n0 == NIM_NIL)) goto LA3;
goto BeforeRet;
}
LA3: ;
switch ((*n0).kind) {
case ((Tnodekind292020) 138):
{
{
NI i_537068_839829468;
NI HEX3Atmp_537239_839829468;
NI LOC7;
NI res_537242_839829468;
i_537068_839829468 = (NI)0;
HEX3Atmp_537239_839829468 = (NI)0;
LOC7 = (NI)0;
LOC7 = sonslen_295351_850551059(n0);
HEX3Atmp_537239_839829468 = (NI)(LOC7 - ((NI) 1));
res_537242_839829468 = ((NI) 0);
{
while (1) {
if (!(res_537242_839829468 <= HEX3Atmp_537239_839829468)) goto LA9;
i_537068_839829468 = res_537242_839829468;
gentraverseproc_537039_839829468(c0, accessor0, (*n0).kindU.S6.sons->data[i_537068_839829468]);
res_537242_839829468 += ((NI) 1);
} LA9: ;
}
}
}
break;
case ((Tnodekind292020) 139):
{
Tcproc529021* p0;
Tsym292834* disc0;
TY532811 LOC15;
TY533289 LOC28;
{
if (!!(((*(*n0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 3)))) goto LA13;
internalerror_196100_155036129((*n0).info, ((NimStringDesc*) &T839829468_162));
}
LA13: ;
p0 = (*c0).p;
disc0 = (*(*n0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym;
memset((void*)LOC15, 0, sizeof(LOC15));
LOC15[0] = accessor0;
LOC15[1] = (*disc0).loc.r;
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_163), LOC15, 2);
{
NI i_537098_839829468;
NI HEX3Atmp_537249_839829468;
NI LOC17;
NI res_537252_839829468;
i_537098_839829468 = (NI)0;
HEX3Atmp_537249_839829468 = (NI)0;
LOC17 = (NI)0;
LOC17 = sonslen_295351_850551059(n0);
HEX3Atmp_537249_839829468 = (NI)(LOC17 - ((NI) 1));
res_537252_839829468 = ((NI) 1);
{
while (1) {
Tnode292802* branch0;
Tnode292802* LOC26;
TY533289 LOC27;
if (!(res_537252_839829468 <= HEX3Atmp_537249_839829468)) goto LA19;
i_537098_839829468 = res_537252_839829468;
branch0 = (*n0).kindU.S6.sons->data[i_537098_839829468];
{
if (!((*branch0).kind == ((Tnodekind292020) 85))) goto LA22;
gencaserange_537028_839829468((*c0).p, branch0);
}
goto LA20;
LA22: ;
{
TY533289 LOC25;
memset((void*)LOC25, 0, sizeof(LOC25));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_181), LOC25, 0);
}
LA20: ;
LOC26 = (Tnode292802*)0;
LOC26 = lastson_295364_850551059(branch0);
gentraverseproc_537039_839829468(c0, accessor0, LOC26);
memset((void*)LOC27, 0, sizeof(LOC27));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_182), LOC27, 0);
res_537252_839829468 += ((NI) 1);
} LA19: ;
}
}
memset((void*)LOC28, 0, sizeof(LOC28));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_183), LOC28, 0);
}
break;
case ((Tnodekind292020) 3):
{
Tsym292834* field0;
TY532811 LOC34;
Ropeobj178006* LOC35;
field0 = (*n0).kindU.S4.sym;
{
if (!((*field0).loc.t == NIM_NIL)) goto LA32;
internalerror_196100_155036129((*n0).info, ((NimStringDesc*) &T839829468_184));
}
LA32: ;
memset((void*)LOC34, 0, sizeof(LOC34));
LOC34[0] = accessor0;
LOC34[1] = (*field0).loc.r;
LOC35 = (Ropeobj178006*)0;
LOC35 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_90), LOC34, 2);
gentraverseproc_537022_839829468(c0, LOC35, (*field0).loc.t);
}
break;
default:
{
internalerror_196100_155036129((*n0).info, ((NimStringDesc*) &T839829468_184));
}
break;
}
}BeforeRet: ;
}
N_NIMCALL(void, linecg_532707_839829468)(Tcproc529021* p0, Tcprocsection529011 s0, NimStringDesc* frmt0, Ropeobj178006** args0, NI args0Len0) {
Ropeobj178006** LOC1;
Ropeobj178006* LOC2;
Ropeobj178006* LOC3;
LOC1 = (Ropeobj178006**)0;
LOC1 = s_529179_3723162438(p0, s0);
LOC2 = (Ropeobj178006*)0;
LOC2 = ropecg_532407_839829468((*p0).module, frmt0, args0, args0Len0);
LOC3 = (Ropeobj178006*)0;
LOC3 = indentline_532656_839829468(p0, LOC2);
add_178482_2381377266(LOC1, LOC3);
}
N_NIMCALL(void, gentraverseproc_537022_839829468)(Ttraversalclosure537019* c0, Ropeobj178006* accessor0, Ttype292840* typ_537027_839829468) {
Ttype292840* typ_537302_839829468;
Tcproc529021* p0;
{ {
if (!(typ_537027_839829468 == NIM_NIL)) goto LA3;
goto BeforeRet;
}
LA3: ;
typ_537302_839829468 = getuniquetype_528640_2036603609(typ_537027_839829468);
p0 = (*c0).p;
switch ((*typ_537302_839829468).kind) {
case ((Ttypekind292244) 11):
case ((Ttypekind292244) 10):
case ((Ttypekind292244) 8):
{
Ttype292840* LOC6;
LOC6 = (Ttype292840*)0;
LOC6 = lastson_295377_850551059(typ_537302_839829468);
gentraverseproc_537022_839829468(c0, accessor0, LOC6);
}
break;
case ((Ttypekind292244) 4):
case ((Ttypekind292244) 16):
{
NI64 arraysize0;
Tloc292816 i0;
Ttype292840* LOC8;
TY532811 LOC9;
TY532811 LOC10;
Ropeobj178006* LOC11;
TY533289 LOC12;
arraysize0 = lengthord_320007_3876443242((*typ_537302_839829468).sons->data[((NI) 0)]);
memset((void*)(&i0), 0, sizeof(i0));
LOC8 = (Ttype292840*)0;
LOC8 = getsystype_338150_3937434831(((Ttypekind292244) 31));
gettemp_537032_839829468(p0, LOC8, (&i0), NIM_FALSE);
memset((void*)LOC9, 0, sizeof(LOC9));
LOC9[0] = i0.r;
LOC9[1] = rope_178401_2381377266(arraysize0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_159), LOC9, 2);
memset((void*)LOC10, 0, sizeof(LOC10));
LOC10[0] = accessor0;
LOC10[1] = i0.r;
LOC11 = (Ropeobj178006*)0;
LOC11 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_138), LOC10, 2);
gentraverseproc_537022_839829468(c0, LOC11, (*typ_537302_839829468).sons->data[((NI) 1)]);
memset((void*)LOC12, 0, sizeof(LOC12));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_160), LOC12, 0);
}
break;
case ((Ttypekind292244) 17):
{
{
NI i_537325_839829468;
NI HEX3Atmp_537384_839829468;
NI LOC15;
NI res_537387_839829468;
i_537325_839829468 = (NI)0;
HEX3Atmp_537384_839829468 = (NI)0;
LOC15 = (NI)0;
LOC15 = sonslen_295327_850551059(typ_537302_839829468);
HEX3Atmp_537384_839829468 = (NI)(LOC15 - ((NI) 1));
res_537387_839829468 = ((NI) 0);
{
while (1) {
Ttype292840* x0;
Ropeobj178006* LOC22;
if (!(res_537387_839829468 <= HEX3Atmp_537384_839829468)) goto LA17;
i_537325_839829468 = res_537387_839829468;
x0 = (*typ_537302_839829468).sons->data[i_537325_839829468];
{
if (!!((x0 == NIM_NIL))) goto LA20;
x0 = skiptypes_296099_850551059(x0, IL64(211106247215360));
}
LA20: ;
LOC22 = (Ropeobj178006*)0;
LOC22 = parentobj_537257_839829468(accessor0, (*(*c0).p).module);
gentraverseproc_537022_839829468(c0, LOC22, x0);
res_537387_839829468 += ((NI) 1);
} LA17: ;
}
}
{
if (!!(((*typ_537302_839829468).n == NIM_NIL))) goto LA25;
gentraverseproc_537039_839829468(c0, accessor0, (*typ_537302_839829468).n);
}
LA25: ;
}
break;
case ((Ttypekind292244) 18):
{
Ttype292840* typ0;
typ0 = getuniquetype_528640_2036603609(typ_537302_839829468);
{
NI i_537363_839829468;
NI HEX3Atmp_537392_839829468;
NI LOC29;
NI res_537395_839829468;
i_537363_839829468 = (NI)0;
HEX3Atmp_537392_839829468 = (NI)0;
LOC29 = (NI)0;
LOC29 = sonslen_295327_850551059(typ0);
HEX3Atmp_537392_839829468 = (NI)(LOC29 - ((NI) 1));
res_537395_839829468 = ((NI) 0);
{
while (1) {
TY532811 LOC32;
Ropeobj178006* LOC33;
if (!(res_537395_839829468 <= HEX3Atmp_537392_839829468)) goto LA31;
i_537363_839829468 = res_537395_839829468;
memset((void*)LOC32, 0, sizeof(LOC32));
LOC32[0] = accessor0;
LOC32[1] = rope_178401_2381377266(((NI64) (i_537363_839829468)));
LOC33 = (Ropeobj178006*)0;
LOC33 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_185), LOC32, 2);
gentraverseproc_537022_839829468(c0, LOC33, (*typ0).sons->data[i_537363_839829468]);
res_537395_839829468 += ((NI) 1);
} LA31: ;
}
}
}
break;
case ((Ttypekind292244) 22):
case ((Ttypekind292244) 28):
case ((Ttypekind292244) 24):
{
TY178507 LOC35;
memset((void*)LOC35, 0, sizeof(LOC35));
LOC35[0] = accessor0;
linecg_532707_839829468(p0, ((Tcprocsection529011) 2), (*c0).visitorfrmt, LOC35, 1);
}
break;
case ((Ttypekind292244) 25):
{
{
TY178507 LOC41;
TY178507 LOC42;
if (!((*typ_537302_839829468).callconv == ((Tcallingconvention292002) 8))) goto LA39;
memset((void*)LOC41, 0, sizeof(LOC41));
memset((void*)LOC42, 0, sizeof(LOC42));
LOC42[0] = accessor0;
LOC41[0] = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_186), LOC42, 1);
linecg_532707_839829468(p0, ((Tcprocsection529011) 2), (*c0).visitorfrmt, LOC41, 1);
}
LA39: ;
}
break;
default:
{
}
break;
}
}BeforeRet: ;
}
N_NIMCALL(void, gentraverseprocseq_537399_839829468)(Ttraversalclosure537019* c0, Ropeobj178006* accessor0, Ttype292840* typ0) {
Tcproc529021* p0;
Tloc292816 i0;
Ttype292840* LOC1;
TY535238 LOC2;
NimStringDesc* LOC3;
TY532811 LOC11;
Ropeobj178006* LOC12;
TY533289 LOC13;
p0 = (*c0).p;
memset((void*)(&i0), 0, sizeof(i0));
LOC1 = (Ttype292840*)0;
LOC1 = getsystype_338150_3937434831(((Ttypekind292244) 31));
gettemp_537032_839829468(p0, LOC1, (&i0), NIM_FALSE);
memset((void*)LOC2, 0, sizeof(LOC2));
LOC2[0] = i0.r;
LOC2[1] = accessor0;
LOC3 = (NimStringDesc*)0;
{
NIM_BOOL LOC6;
LOC6 = (NIM_BOOL)0;
LOC6 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC6) goto LA7;
LOC6 = (((*(*(*(*c0).p).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA7: ;
if (!LOC6) goto LA8;
LOC3 = copyString(((NimStringDesc*) &T839829468_157));
}
goto LA4;
LA8: ;
{
LOC3 = copyString(((NimStringDesc*) &T839829468_158));
}
LA4: ;
LOC2[2] = rope_178277_2381377266(LOC3);
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_156), LOC2, 3);
memset((void*)LOC11, 0, sizeof(LOC11));
LOC11[0] = accessor0;
LOC11[1] = i0.r;
LOC12 = (Ropeobj178006*)0;
LOC12 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_187), LOC11, 2);
gentraverseproc_537022_839829468(c0, LOC12, (*typ0).sons->data[((NI) 0)]);
memset((void*)LOC13, 0, sizeof(LOC13));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_160), LOC13, 0);
}
N_NIMCALL(Ropeobj178006*, gentraverseproc_537632_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ttypeinforeason537016 reason0) {
Ropeobj178006* result0;
Ttraversalclosure537019 c0;
Tcproc529021* p0;
Ropeobj178006* header0;
TY178507 LOC3;
Ropeobj178006* t0;
TY178507 LOC4;
TY178507 LOC5;
Ropeobj178006* generatedproc0;
TY535235 LOC20;
Ropeobj178006** LOC21;
Ropeobj178006** LOC22;
Ropeobj178006** LOC23;
TY178507 LOC24;
result0 = (Ropeobj178006*)0;
memset((void*)(&c0), 0, sizeof(c0));
p0 = newproc_529206_3723162438(NIM_NIL, m0);
result0 = gettempname_533596_839829468(m0);
switch (reason0) {
case ((Ttypeinforeason537016) 0):
{
c0.visitorfrmt = copyString(((NimStringDesc*) &T839829468_145));
}
break;
default:
{
}
break;
}
memset((void*)LOC3, 0, sizeof(LOC3));
LOC3[0] = result0;
header0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_146), LOC3, 1);
t0 = gettypedesc_535671_839829468(m0, typ0);
memset((void*)LOC4, 0, sizeof(LOC4));
LOC4[0] = t0;
linef_532700_839829468(p0, ((Tcprocsection529011) 0), ((NimStringDesc*) &T839829468_147), LOC4, 1);
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = t0;
linef_532700_839829468(p0, ((Tcprocsection529011) 1), ((NimStringDesc*) &T839829468_148), LOC5, 1);
c0.p = p0;
{
Ropeobj178006* LOC10;
if (!((*typ0).kind == ((Ttypekind292244) 24))) goto LA8;
LOC10 = (Ropeobj178006*)0;
LOC10 = rope_178277_2381377266(((NimStringDesc*) &T839829468_188));
gentraverseprocseq_537399_839829468((&c0), LOC10, typ0);
}
goto LA6;
LA8: ;
{
{
Ttype292840* LOC14;
Ropeobj178006* LOC17;
LOC14 = (Ttype292840*)0;
LOC14 = skiptypes_296099_850551059((*typ0).sons->data[((NI) 0)], IL64(211106232576256));
if (!((*LOC14).kind == ((Ttypekind292244) 4) || (*LOC14).kind == ((Ttypekind292244) 16))) goto LA15;
LOC17 = (Ropeobj178006*)0;
LOC17 = rope_178277_2381377266(((NimStringDesc*) &T839829468_188));
gentraverseproc_537022_839829468((&c0), LOC17, (*typ0).sons->data[((NI) 0)]);
}
goto LA12;
LA15: ;
{
Ropeobj178006* LOC19;
LOC19 = (Ropeobj178006*)0;
LOC19 = rope_178277_2381377266(((NimStringDesc*) &T839829468_189));
gentraverseproc_537022_839829468((&c0), LOC19, (*typ0).sons->data[((NI) 0)]);
}
LA12: ;
}
LA6: ;
memset((void*)LOC20, 0, sizeof(LOC20));
LOC20[0] = header0;
LOC21 = (Ropeobj178006**)0;
LOC21 = s_529179_3723162438(p0, ((Tcprocsection529011) 0));
LOC20[1] = (*LOC21);
LOC22 = (Ropeobj178006**)0;
LOC22 = s_529179_3723162438(p0, ((Tcprocsection529011) 1));
LOC20[2] = (*LOC22);
LOC23 = (Ropeobj178006**)0;
LOC23 = s_529179_3723162438(p0, ((Tcprocsection529011) 2));
LOC20[3] = (*LOC23);
generatedproc0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_190), LOC20, 4);
memset((void*)LOC24, 0, sizeof(LOC24));
LOC24[0] = header0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 7))- 0], ((NimStringDesc*) &T839829468_191), LOC24, 1);
add_178482_2381377266(&(*m0).s[(((Tcfilesection529005) 10))- 0], generatedproc0);
return result0;
}
N_NIMCALL(void, genarrayinfo_537005_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ropeobj178006* name0) {
Ropeobj178006* LOC1;
LOC1 = (Ropeobj178006*)0;
LOC1 = gentypeinfo_535941_839829468(m0, (*typ0).sons->data[((NI) 1)]);
gentypeinfoauxbase_535960_839829468(m0, typ0, typ0, name0, LOC1);
}
N_NIMCALL(void, gensetinfo_536867_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ropeobj178006* name0) {
Ropeobj178006* tmp0;
TY535238 LOC1;
NI64 LOC2;
gentypeinfoaux_536027_839829468(m0, typ0, typ0, name0);
tmp0 = getnimnode_535945_839829468(m0);
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = tmp0;
LOC2 = (NI64)0;
LOC2 = firstord_320001_3876443242(typ0);
LOC1[1] = rope_178401_2381377266(LOC2);
LOC1[2] = name0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_193), LOC1, 3);
}
N_NIMCALL(void, genenuminfo_536597_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ropeobj178006* name0) {
Ropeobj178006* nodeptrs0;
NI length0;
TY532811 LOC1;
Ropeobj178006* enumnames0;
Ropeobj178006* specialcases0;
NI firstnimnode0;
NIM_BOOL hasholes0;
Ropeobj178006* enumarray0;
Ropeobj178006* counter0;
TY178507 LOC24;
TY535238 LOC25;
TY536847 LOC26;
TY535235 LOC27;
gentypeinfoaux_536027_839829468(m0, typ0, typ0, name0);
nodeptrs0 = gettempname_533596_839829468(m0);
length0 = sonslen_295351_850551059((*typ0).n);
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = nodeptrs0;
LOC1[1] = rope_178401_2381377266(((NI64) (length0)));
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 12))- 0], ((NimStringDesc*) &T839829468_139), LOC1, 2);
enumnames0 = (Ropeobj178006*)0;
specialcases0 = (Ropeobj178006*)0;
firstnimnode0 = (*m0).typenodes;
hasholes0 = NIM_FALSE;
{
NI i_536622_839829468;
NI HEX3Atmp_536860_839829468;
NI res_536863_839829468;
i_536622_839829468 = (NI)0;
HEX3Atmp_536860_839829468 = (NI)0;
HEX3Atmp_536860_839829468 = (NI)(length0 - ((NI) 1));
res_536863_839829468 = ((NI) 0);
{
while (1) {
Tsym292834* field0;
Ropeobj178006* elemnode0;
if (!(res_536863_839829468 <= HEX3Atmp_536860_839829468)) goto LA4;
i_536622_839829468 = res_536863_839829468;
field0 = (*(*(*typ0).n).kindU.S6.sons->data[i_536622_839829468]).kindU.S4.sym;
elemnode0 = getnimnode_535945_839829468(m0);
{
Ropeobj178006* LOC9;
if (!((*field0).ast == NIM_NIL)) goto LA7;
LOC9 = (Ropeobj178006*)0;
LOC9 = makecstring_191638_155036129((*(*field0).name).s);
add_178482_2381377266(&enumnames0, LOC9);
}
goto LA5;
LA7: ;
{
Ropeobj178006* LOC11;
LOC11 = (Ropeobj178006*)0;
LOC11 = makecstring_191638_155036129((*(*field0).ast).kindU.S3.strval);
add_178482_2381377266(&enumnames0, LOC11);
}
LA5: ;
{
NimStringDesc* LOC16;
if (!(i_536622_839829468 < (NI)(length0 - ((NI) 1)))) goto LA14;
LOC16 = (NimStringDesc*)0;
LOC16 = rawNewString(tnl_176644_4151366050->Sup.len + 2);
appendString(LOC16, ((NimStringDesc*) &T839829468_110));
appendString(LOC16, tnl_176644_4151366050);
add_178487_2381377266(&enumnames0, LOC16);
}
LA14: ;
{
NIM_BOOL LOC19;
TY532811 LOC23;
LOC19 = (NIM_BOOL)0;
LOC19 = !(((*field0).position == i_536622_839829468));
if (LOC19) goto LA20;
LOC19 = (((*typ0).flags &(1U<<((NU)(((Ttypeflag292431) 5))&31U)))!=0);
LA20: ;
if (!LOC19) goto LA21;
memset((void*)LOC23, 0, sizeof(LOC23));
LOC23[0] = elemnode0;
LOC23[1] = rope_178401_2381377266(((NI64) ((*field0).position)));
addf_179205_2381377266(&specialcases0, ((NimStringDesc*) &T839829468_194), LOC23, 2);
hasholes0 = NIM_TRUE;
}
LA21: ;
res_536863_839829468 += ((NI) 1);
} LA4: ;
}
}
enumarray0 = gettempname_533596_839829468(m0);
counter0 = gettempname_533596_839829468(m0);
memset((void*)LOC24, 0, sizeof(LOC24));
LOC24[0] = counter0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 12))- 0], ((NimStringDesc*) &T839829468_195), LOC24, 1);
memset((void*)LOC25, 0, sizeof(LOC25));
LOC25[0] = enumarray0;
LOC25[1] = rope_178401_2381377266(((NI64) (length0)));
LOC25[2] = enumnames0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 12))- 0], ((NimStringDesc*) &T839829468_196), LOC25, 3);
memset((void*)LOC26, 0, sizeof(LOC26));
LOC26[0] = counter0;
LOC26[1] = rope_178401_2381377266(((NI64) (length0)));
LOC26[2] = (*m0).typenodesname;
LOC26[3] = rope_178401_2381377266(((NI64) (firstnimnode0)));
LOC26[4] = enumarray0;
LOC26[5] = nodeptrs0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_197), LOC26, 6);
add_178482_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], specialcases0);
memset((void*)LOC27, 0, sizeof(LOC27));
LOC27[0] = getnimnode_535945_839829468(m0);
LOC27[1] = rope_178401_2381377266(((NI64) (length0)));
LOC27[2] = nodeptrs0;
LOC27[3] = name0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_198), LOC27, 4);
{
TY178507 LOC32;
if (!hasholes0) goto LA30;
memset((void*)LOC32, 0, sizeof(LOC32));
LOC32[0] = name0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_199), LOC32, 1);
}
LA30: ;
}
N_NIMCALL(Ropeobj178006*, discriminatortablename_536057_839829468)(Tcgen529027* m0, Ttype292840* objtype_536060_839829468, Tsym292834* d0) {
Ropeobj178006* result0;
Ttype292840* objtype0;
TY532811 LOC8;
NimStringDesc* LOC9;
result0 = (Ropeobj178006*)0;
objtype0 = objtype_536060_839829468;
{
while (1) {
Tsym292834* LOC3;
LOC3 = (Tsym292834*)0;
LOC3 = lookupinrecord_299119_2984716966((*objtype0).n, (*d0).name);
if (!(LOC3 == NIM_NIL)) goto LA2;
objtype0 = (*objtype0).sons->data[((NI) 0)];
} LA2: ;
}
{
if (!((*objtype0).sym == NIM_NIL)) goto LA6;
internalerror_196100_155036129((*d0).info, ((NimStringDesc*) &T839829468_200));
}
LA6: ;
memset((void*)LOC8, 0, sizeof(LOC8));
LOC8[0] = rope_178401_2381377266(((NI64) ((*objtype0).Sup.id)));
LOC9 = (NimStringDesc*)0;
LOC9 = mangle_528847_2036603609((*(*d0).name).s);
LOC8[1] = rope_178277_2381377266(LOC9);
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_201), LOC8, 2);
return result0;
}
N_NIMCALL(void, genobjectfields_536104_839829468)(Tcgen529027* m0, Ttype292840* typ0, Tnode292802* n0, Ropeobj178006* expr0) {
switch ((*n0).kind) {
case ((Tnodekind292020) 138):
{
NI L0;
L0 = sonslen_295351_850551059(n0);
{
if (!(L0 == ((NI) 1))) goto LA4;
genobjectfields_536104_839829468(m0, typ0, (*n0).kindU.S6.sons->data[((NI) 0)], expr0);
}
goto LA2;
LA4: ;
{
Ropeobj178006* tmp0;
TY532811 LOC9;
TY535238 LOC14;
if (!(((NI) 0) < L0)) goto LA7;
tmp0 = gettempname_533596_839829468(m0);
memset((void*)LOC9, 0, sizeof(LOC9));
LOC9[0] = tmp0;
LOC9[1] = rope_178401_2381377266(((NI64) (L0)));
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 12))- 0], ((NimStringDesc*) &T839829468_139), LOC9, 2);
{
NI i_536127_839829468;
NI HEX3Atmp_536482_839829468;
NI res_536485_839829468;
i_536127_839829468 = (NI)0;
HEX3Atmp_536482_839829468 = (NI)0;
HEX3Atmp_536482_839829468 = (NI)(L0 - ((NI) 1));
res_536485_839829468 = ((NI) 0);
{
while (1) {
Ropeobj178006* tmp20;
TY535238 LOC13;
if (!(res_536485_839829468 <= HEX3Atmp_536482_839829468)) goto LA12;
i_536127_839829468 = res_536485_839829468;
tmp20 = getnimnode_535945_839829468(m0);
memset((void*)LOC13, 0, sizeof(LOC13));
LOC13[0] = tmp0;
LOC13[1] = rope_178401_2381377266(((NI64) (i_536127_839829468)));
LOC13[2] = tmp20;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_140), LOC13, 3);
genobjectfields_536104_839829468(m0, typ0, (*n0).kindU.S6.sons->data[i_536127_839829468], tmp20);
res_536485_839829468 += ((NI) 1);
} LA12: ;
}
}
memset((void*)LOC14, 0, sizeof(LOC14));
LOC14[0] = expr0;
LOC14[1] = rope_178401_2381377266(((NI64) (L0)));
LOC14[2] = tmp0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_142), LOC14, 3);
}
goto LA2;
LA7: ;
{
TY532811 LOC16;
memset((void*)LOC16, 0, sizeof(LOC16));
LOC16[0] = expr0;
LOC16[1] = rope_178401_2381377266(((NI64) (L0)));
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_143), LOC16, 2);
}
LA2: ;
}
break;
case ((Tnodekind292020) 139):
{
Tsym292834* field0;
Ropeobj178006* tmp0;
NI64 L0;
TY536401 LOC18;
TY532811 LOC19;
field0 = (*(*n0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym;
tmp0 = discriminatortablename_536057_839829468(m0, typ0, field0);
L0 = lengthord_320007_3876443242((*field0).typ);
memset((void*)LOC18, 0, sizeof(LOC18));
LOC18[0] = expr0;
LOC18[1] = gettypedesc_535671_839829468(m0, typ0);
LOC18[2] = (*field0).loc.r;
LOC18[3] = gentypeinfo_535941_839829468(m0, (*field0).typ);
LOC18[4] = makecstring_191638_155036129((*(*field0).name).s);
LOC18[5] = tmp0;
LOC18[6] = rope_178401_2381377266(L0);
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_202), LOC18, 7);
memset((void*)LOC19, 0, sizeof(LOC19));
LOC19[0] = tmp0;
LOC19[1] = rope_178401_2381377266((NI64)(L0 + IL64(1)));
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 8))- 0], ((NimStringDesc*) &T839829468_203), LOC19, 2);
{
NI i_536421_839829468;
NI HEX3Atmp_536499_839829468;
NI LOC21;
NI res_536502_839829468;
i_536421_839829468 = (NI)0;
HEX3Atmp_536499_839829468 = (NI)0;
LOC21 = (NI)0;
LOC21 = sonslen_295351_850551059(n0);
HEX3Atmp_536499_839829468 = (NI)(LOC21 - ((NI) 1));
res_536502_839829468 = ((NI) 1);
{
while (1) {
Tnode292802* b0;
Ropeobj178006* tmp20;
Tnode292802* LOC24;
if (!(res_536502_839829468 <= HEX3Atmp_536499_839829468)) goto LA23;
i_536421_839829468 = res_536502_839829468;
b0 = (*n0).kindU.S6.sons->data[i_536421_839829468];
tmp20 = getnimnode_535945_839829468(m0);
LOC24 = (Tnode292802*)0;
LOC24 = lastson_295364_850551059(b0);
genobjectfields_536104_839829468(m0, typ0, LOC24, tmp20);
switch ((*b0).kind) {
case ((Tnodekind292020) 85):
{
{
NI LOC28;
LOC28 = (NI)0;
LOC28 = sonslen_295351_850551059(b0);
if (!(LOC28 < ((NI) 2))) goto LA29;
internalerror_196100_155036129((*b0).info, ((NimStringDesc*) &T839829468_204));
}
LA29: ;
{
NI j_536436_839829468;
NI HEX3Atmp_536492_839829468;
NI LOC32;
NI res_536495_839829468;
j_536436_839829468 = (NI)0;
HEX3Atmp_536492_839829468 = (NI)0;
LOC32 = (NI)0;
LOC32 = sonslen_295351_850551059(b0);
HEX3Atmp_536492_839829468 = (NI)(LOC32 - ((NI) 2));
res_536495_839829468 = ((NI) 0);
{
while (1) {
if (!(res_536495_839829468 <= HEX3Atmp_536492_839829468)) goto LA34;
j_536436_839829468 = res_536495_839829468;
{
NI x0;
NI64 LOC39;
NI y0;
NI64 LOC40;
if (!((*(*b0).kindU.S6.sons->data[j_536436_839829468]).kind == ((Tnodekind292020) 44))) goto LA37;
LOC39 = (NI64)0;
LOC39 = getordvalue_320129_3876443242((*(*b0).kindU.S6.sons->data[j_536436_839829468]).kindU.S6.sons->data[((NI) 0)]);
x0 = ((NI) (LOC39));
LOC40 = (NI64)0;
LOC40 = getordvalue_320129_3876443242((*(*b0).kindU.S6.sons->data[j_536436_839829468]).kindU.S6.sons->data[((NI) 1)]);
y0 = ((NI) (LOC40));
{
while (1) {
TY535238 LOC43;
if (!(x0 <= y0)) goto LA42;
memset((void*)LOC43, 0, sizeof(LOC43));
LOC43[0] = tmp0;
LOC43[1] = rope_178401_2381377266(((NI64) (x0)));
LOC43[2] = tmp20;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_140), LOC43, 3);
x0 += ((NI) 1);
} LA42: ;
}
}
goto LA35;
LA37: ;
{
TY535238 LOC45;
NI64 LOC46;
memset((void*)LOC45, 0, sizeof(LOC45));
LOC45[0] = tmp0;
LOC46 = (NI64)0;
LOC46 = getordvalue_320129_3876443242((*b0).kindU.S6.sons->data[j_536436_839829468]);
LOC45[1] = rope_178401_2381377266(LOC46);
LOC45[2] = tmp20;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_140), LOC45, 3);
}
LA35: ;
res_536495_839829468 += ((NI) 1);
} LA34: ;
}
}
}
break;
case ((Tnodekind292020) 88):
{
TY535238 LOC48;
memset((void*)LOC48, 0, sizeof(LOC48));
LOC48[0] = tmp0;
LOC48[1] = rope_178401_2381377266(L0);
LOC48[2] = tmp20;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_140), LOC48, 3);
}
break;
default:
{
internalerror_196100_155036129((*n0).info, ((NimStringDesc*) &T839829468_205));
}
break;
}
res_536502_839829468 += ((NI) 1);
} LA23: ;
}
}
}
break;
case ((Tnodekind292020) 3):
{
Tsym292834* field0;
field0 = (*n0).kindU.S4.sym;
{
TY536475 LOC55;
if (!((*field0).kindU.S4.bitsize == ((NI) 0))) goto LA53;
memset((void*)LOC55, 0, sizeof(LOC55));
LOC55[0] = expr0;
LOC55[1] = gettypedesc_535671_839829468(m0, typ0);
LOC55[2] = (*field0).loc.r;
LOC55[3] = gentypeinfo_535941_839829468(m0, (*field0).typ);
LOC55[4] = makecstring_191638_155036129((*(*field0).name).s);
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_206), LOC55, 5);
}
LA53: ;
}
break;
default:
{
internalerror_196100_155036129((*n0).info, ((NimStringDesc*) &T839829468_207));
}
break;
}
}
N_NIMCALL(void, genobjectinfo_536506_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ttype292840* origtype0, Ropeobj178006* name0) {
Ropeobj178006* tmp0;
TY532811 LOC12;
Ttype292840* t0;
{
if (!((*typ0).kind == ((Ttypekind292244) 17))) goto LA3;
gentypeinfoaux_536027_839829468(m0, typ0, origtype0, name0);
}
goto LA1;
LA3: ;
{
Ropeobj178006* LOC6;
LOC6 = (Ropeobj178006*)0;
LOC6 = rope_178277_2381377266(((NimStringDesc*) &T839829468_18));
gentypeinfoauxbase_535960_839829468(m0, typ0, origtype0, name0, LOC6);
}
LA1: ;
tmp0 = getnimnode_535945_839829468(m0);
{
NIM_BOOL LOC9;
LOC9 = (NIM_BOOL)0;
LOC9 = isimportedcpptype_533476_839829468(typ0);
if (!!(LOC9)) goto LA10;
genobjectfields_536104_839829468(m0, typ0, (*typ0).n, tmp0);
}
LA10: ;
memset((void*)LOC12, 0, sizeof(LOC12));
LOC12[0] = name0;
LOC12[1] = tmp0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_144), LOC12, 2);
t0 = (*typ0).sons->data[((NI) 0)];
{
while (1) {
if (!!((t0 == NIM_NIL))) goto LA14;
t0 = skiptypes_296099_850551059(t0, IL64(211106247215360));
(*t0).flags |= ((NU32)1)<<((((Ttypeflag292431) 5))%(sizeof(NU32)*8));
t0 = (*t0).sons->data[((NI) 0)];
} LA14: ;
}
}
N_NIMCALL(void, gendeepcopyproc_538066_839829468)(Tcgen529027* m0, Tsym292834* s0, Ropeobj178006* result0) {
TY532811 LOC1;
genproc_532951_839829468(m0, s0);
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = result0;
LOC1[1] = (*s0).loc.r;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_208), LOC1, 2);
}
N_NIMCALL(Ropeobj178006*, gentypeinfo_535941_839829468)(Tcgen529027* m0, Ttype292840* t_535944_839829468) {
Ropeobj178006* result0;
Ttype292840* origtype0;
Ttype292840* t0;
TY178507 LOC1;
Tsym292834* owner0;
Ttype292840* LOC12;
Ropeobj178006* LOC66;
Ropeobj178006* LOC67;
Ropeobj178006* LOC68;
{ result0 = (Ropeobj178006*)0;
origtype0 = t_535944_839829468;
t0 = getuniquetype_528640_2036603609(t_535944_839829468);
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = rope_178401_2381377266(((NI64) ((*t0).Sup.id)));
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_127), LOC1, 1);
{
NIM_BOOL LOC4;
Ropeobj178006* LOC7;
Ropeobj178006* LOC8;
Ropeobj178006* LOC9;
LOC4 = (NIM_BOOL)0;
LOC4 = containsorincl_268862_2627731572((&(*m0).typeinfomarker), (*t0).Sup.id);
if (!LOC4) goto LA5;
LOC7 = (Ropeobj178006*)0;
LOC7 = rope_178277_2381377266(((NimStringDesc*) &T839829468_128));
LOC8 = (Ropeobj178006*)0;
LOC8 = HEX26_178418_2381377266(LOC7, result0);
LOC9 = (Ropeobj178006*)0;
LOC9 = rope_178277_2381377266(((NimStringDesc*) &T839829468_117));
result0 = HEX26_178418_2381377266(LOC8, LOC9);
goto BeforeRet;
}
LA5: ;
{
while (1) {
if (!((*t0).kind == ((Ttypekind292244) 13))) goto LA11;
t0 = lastson_295377_850551059(t0);
} LA11: ;
}
LOC12 = (Ttype292840*)0;
LOC12 = skiptypes_296099_850551059(t0, IL64(211106247256320));
owner0 = getmodule_299123_2984716966((*LOC12).owner);
{
Tcgen529027* LOC17;
Ropeobj178006* LOC18;
Ropeobj178006* LOC19;
Ropeobj178006* LOC20;
TY532811 LOC21;
NimStringDesc* LOC22;
Ropeobj178006* LOC23;
Ropeobj178006* LOC24;
Ropeobj178006* LOC25;
if (!!((owner0 == (*m0).module))) goto LA15;
LOC17 = (Tcgen529027*)0;
LOC17 = bmod_529201_3723162438(owner0);
LOC18 = (Ropeobj178006*)0;
LOC18 = gentypeinfo_535941_839829468(LOC17, t0);
LOC19 = (Ropeobj178006*)0;
LOC19 = cgsym_532403_839829468(m0, ((NimStringDesc*) &T839829468_129));
LOC20 = (Ropeobj178006*)0;
LOC20 = cgsym_532403_839829468(m0, ((NimStringDesc*) &T839829468_130));
memset((void*)LOC21, 0, sizeof(LOC21));
LOC21[0] = result0;
LOC22 = (NimStringDesc*)0;
LOC22 = typetostring_320017_3876443242(t0, ((Tprefereddesc320011) 0));
LOC21[1] = rope_178277_2381377266(LOC22);
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 9))- 0], ((NimStringDesc*) &T839829468_131), LOC21, 2);
LOC23 = (Ropeobj178006*)0;
LOC23 = rope_178277_2381377266(((NimStringDesc*) &T839829468_128));
LOC24 = (Ropeobj178006*)0;
LOC24 = HEX26_178418_2381377266(LOC23, result0);
LOC25 = (Ropeobj178006*)0;
LOC25 = rope_178277_2381377266(((NimStringDesc*) &T839829468_117));
result0 = HEX26_178418_2381377266(LOC24, LOC25);
goto BeforeRet;
}
LA15: ;
switch ((*t0).kind) {
case ((Ttypekind292244) 3):
case ((Ttypekind292244) 62):
{
result0 = rope_178277_2381377266(((NimStringDesc*) &T839829468_132));
}
break;
case ((Ttypekind292244) 26):
case ((Ttypekind292244) 1):
case ((Ttypekind292244) 2):
case ((Ttypekind292244) 29):
case ((Ttypekind292244) 28):
case ((Ttypekind292244) 31) ... ((Ttypekind292244) 44):
case ((Ttypekind292244) 23):
{
Ropeobj178006* LOC28;
LOC28 = (Ropeobj178006*)0;
LOC28 = rope_178277_2381377266(((NimStringDesc*) &T839829468_132));
gentypeinfoauxbase_535960_839829468(m0, t0, t0, result0, LOC28);
}
break;
case ((Ttypekind292244) 59):
{
{
Ttype292840* LOC34;
if (!!(((*t0).n == NIM_NIL))) goto LA32;
LOC34 = (Ttype292840*)0;
LOC34 = lastson_295377_850551059(t0);
result0 = gentypeinfo_535941_839829468(m0, LOC34);
}
goto LA30;
LA32: ;
{
NimStringDesc* LOC36;
LOC36 = (NimStringDesc*)0;
LOC36 = rawNewString(reprEnum((NI)(*t0).kind, (&NTI292244))->Sup.len + 13);
appendString(LOC36, ((NimStringDesc*) &T839829468_137));
appendString(LOC36, reprEnum((NI)(*t0).kind, (&NTI292244)));
appendChar(LOC36, 41);
internalerror_196113_155036129(LOC36);
}
LA30: ;
}
break;
case ((Ttypekind292244) 25):
{
{
Ropeobj178006* LOC42;
if (!!(((*t0).callconv == ((Tcallingconvention292002) 8)))) goto LA40;
LOC42 = (Ropeobj178006*)0;
LOC42 = rope_178277_2381377266(((NimStringDesc*) &T839829468_132));
gentypeinfoauxbase_535960_839829468(m0, t0, t0, result0, LOC42);
}
goto LA38;
LA40: ;
{
Ttype292840* LOC44;
LOC44 = (Ttype292840*)0;
LOC44 = fakeclosuretype_537010_839829468((*t0).owner);
gentupleinfo_536549_839829468(m0, LOC44, result0);
}
LA38: ;
}
break;
case ((Ttypekind292244) 24):
case ((Ttypekind292244) 22):
{
gentypeinfoaux_536027_839829468(m0, t0, t0, result0);
{
Ropeobj178006* markerproc0;
TY532811 LOC50;
if (!(((Tgcmode169080) 4) <= gselectedgc_169133_2607990831)) goto LA48;
markerproc0 = gentraverseproc_537632_839829468(m0, t0, ((Ttypeinforeason537016) 0));
memset((void*)LOC50, 0, sizeof(LOC50));
LOC50[0] = result0;
LOC50[1] = markerproc0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_192), LOC50, 2);
}
LA48: ;
}
break;
case ((Ttypekind292244) 21):
case ((Ttypekind292244) 20):
{
gentypeinfoaux_536027_839829468(m0, t0, t0, result0);
}
break;
case ((Ttypekind292244) 4):
case ((Ttypekind292244) 16):
{
genarrayinfo_537005_839829468(m0, t0, result0);
}
break;
case ((Ttypekind292244) 19):
{
gensetinfo_536867_839829468(m0, t0, result0);
}
break;
case ((Ttypekind292244) 14):
{
genenuminfo_536597_839829468(m0, t0, result0);
}
break;
case ((Ttypekind292244) 17):
{
genobjectinfo_536506_839829468(m0, t0, origtype0, result0);
}
break;
case ((Ttypekind292244) 18):
{
gentupleinfo_536549_839829468(m0, t0, result0);
}
break;
default:
{
NimStringDesc* LOC58;
LOC58 = (NimStringDesc*)0;
LOC58 = rawNewString(reprEnum((NI)(*t0).kind, (&NTI292244))->Sup.len + 13);
appendString(LOC58, ((NimStringDesc*) &T839829468_137));
appendString(LOC58, reprEnum((NI)(*t0).kind, (&NTI292244)));
appendChar(LOC58, 41);
internalerror_196113_155036129(LOC58);
}
break;
}
{
if (!!(((*t0).deepcopy == NIM_NIL))) goto LA61;
gendeepcopyproc_538066_839829468(m0, (*t0).deepcopy, result0);
}
goto LA59;
LA61: ;
{
if (!!(((*origtype0).deepcopy == NIM_NIL))) goto LA64;
gendeepcopyproc_538066_839829468(m0, (*origtype0).deepcopy, result0);
}
goto LA59;
LA64: ;
LA59: ;
LOC66 = (Ropeobj178006*)0;
LOC66 = rope_178277_2381377266(((NimStringDesc*) &T839829468_128));
LOC67 = (Ropeobj178006*)0;
LOC67 = HEX26_178418_2381377266(LOC66, result0);
LOC68 = (Ropeobj178006*)0;
LOC68 = rope_178277_2381377266(((NimStringDesc*) &T839829468_117));
result0 = HEX26_178418_2381377266(LOC67, LOC68);
}BeforeRet: ;
return result0;
}
N_NIMCALL(void, localdebuginfo_538449_839829468)(Tcproc529021* p0, Tsym292834* s0) {
Ropeobj178006* a0;
TY535235 LOC16;
NimStringDesc* LOC17;
{ {
if (!!(((163840 & (*p0).options) == 163840))) goto LA3;
goto BeforeRet;
}
LA3: ;
{
Ttype292840* LOC7;
LOC7 = (Ttype292840*)0;
LOC7 = skiptypes_296099_850551059((*s0).typ, IL64(211106240964864));
if (!((*LOC7).kind == ((Ttypekind292244) 27) || (*LOC7).kind == ((Ttypekind292244) 48))) goto LA8;
goto BeforeRet;
}
LA8: ;
a0 = HEX26_178452_2381377266(((NimStringDesc*) &T839829468_52), (*s0).loc.r);
{
NIM_BOOL LOC12;
LOC12 = (NIM_BOOL)0;
LOC12 = ((*s0).kind == ((Tsymkind292435) 3));
if (!(LOC12)) goto LA13;
LOC12 = ccgintroducedptr_533609_839829468(s0);
LA13: ;
if (!LOC12) goto LA14;
a0 = (*s0).loc.r;
}
LA14: ;
memset((void*)LOC16, 0, sizeof(LOC16));
LOC16[0] = rope_178401_2381377266(((NI64) ((*p0).maxframelen)));
LOC17 = (NimStringDesc*)0;
LOC17 = nsuNormalize((*(*s0).name).s);
LOC16[1] = makecstring_191638_155036129(LOC17);
LOC16[2] = a0;
LOC16[3] = gentypeinfo_535941_839829468((*p0).module, (*s0).loc.t);
linef_532700_839829468(p0, ((Tcprocsection529011) 1), ((NimStringDesc*) &T839829468_126), LOC16, 4);
(*p0).maxframelen += ((NI) 1);
(*p0).blocks->data[(NI)(((*p0).blocks ? (*p0).blocks->Sup.len : 0) - ((NI) 1))].framelen += ((NI) 1);
}BeforeRet: ;
}
N_NIMCALL(void, assignlocalvar_538614_839829468)(Tcproc529021* p0, Tsym292834* s0) {
Ropeobj178006* decl0;
Ropeobj178006* LOC1;
Ropeobj178006* LOC2;
LOC1 = (Ropeobj178006*)0;
LOC1 = localvardecl_538532_839829468(p0, s0);
LOC2 = (Ropeobj178006*)0;
LOC2 = HEX26_178447_2381377266(LOC1, ((NimStringDesc*) &T839829468_125));
decl0 = HEX26_178447_2381377266(LOC2, tnl_176644_4151366050);
line_532690_839829468(p0, ((Tcprocsection529011) 0), decl0);
localdebuginfo_538449_839829468(p0, s0);
}
N_NIMCALL(void, initlocalvar_538398_839829468)(Tcproc529021* p0, Tsym292834* v0, NIM_BOOL immediateasgn0) {
{
if (!!((((*v0).flags &(1U<<((NU)(((Tsymflag292184) 12))&31U)))!=0))) goto LA3;
{
if (!!(immediateasgn0)) goto LA7;
constructloc_538388_839829468(p0, (*v0).loc, NIM_FALSE);
}
LA7: ;
}
LA3: ;
}
N_NIMCALL(void, fillresult_533865_839829468)(Tsym292834* param0) {
TY533289 LOC1;
Ropeobj178006* LOC2;
memset((void*)LOC1, 0, sizeof(LOC1));
LOC2 = (Ropeobj178006*)0;
LOC2 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_210), LOC1, 0);
fillloc_532282_839829468((&(*param0).loc), ((Tlockind292808) 4), (*param0).typ, LOC2, ((Tstorageloc292812) 2));
{
NIM_BOOL LOC5;
Tctypekind529007 LOC6;
LOC5 = (NIM_BOOL)0;
LOC6 = (Tctypekind529007)0;
LOC6 = mapreturntype_533445_839829468((*param0).typ);
LOC5 = !((LOC6 == ((Tctypekind529007) 17)));
if (!(LOC5)) goto LA7;
LOC5 = isinvalidreturntype_533548_839829468((*param0).typ);
LA7: ;
if (!LOC5) goto LA8;
(*param0).loc.flags |= ((NU16)1)<<((((Tlocflag292810) 0))%(sizeof(NU16)*8));
(*param0).loc.s = ((Tstorageloc292812) 0);
}
LA8: ;
}
N_NIMCALL(void, assignparam_538994_839829468)(Tcproc529021* p0, Tsym292834* s0) {
localdebuginfo_538449_839829468(p0, s0);
}
N_NIMCALL(void, closuresetup_560158_839829468)(Tcproc529021* p0, Tsym292834* prc0) {
Tnode292802* ls0;
Tnode292802* LOC5;
Tsym292834* env0;
TY532811 LOC10;
{ {
if (!!((((*(*prc0).typ).flags &(1U<<((NU)(((Ttypeflag292431) 11))&31U)))!=0))) goto LA3;
goto BeforeRet;
}
LA3: ;
LOC5 = (Tnode292802*)0;
LOC5 = HEX5BHEX5D_293238_850551059((*prc0).ast, ((NI) 3));
ls0 = lastson_295364_850551059(LOC5);
{
if (!!(((*ls0).kind == ((Tnodekind292020) 3)))) goto LA8;
internalerror_196100_155036129((*prc0).info, ((NimStringDesc*) &T839829468_211));
}
LA8: ;
env0 = (*ls0).kindU.S4.sym;
assignlocalvar_538614_839829468(p0, env0);
memset((void*)LOC10, 0, sizeof(LOC10));
LOC10[0] = rdloc_538188_839829468((*env0).loc);
LOC10[1] = gettypedesc_535671_839829468((*p0).module, (*env0).typ);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_212), LOC10, 2);
}BeforeRet: ;
}
N_NIMCALL(Ropeobj178006*, initgcframe_538435_839829468)(Tcproc529021* p0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
{
TY178507 LOC5;
if (!(((NI) 0) < ((NI) ((*p0).gcframeid)))) goto LA3;
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = (*p0).gcframetype;
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_217), LOC5, 1);
}
LA3: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, initframe_560140_839829468)(Tcproc529021* p0, Ropeobj178006* procname0, Ropeobj178006* filename0) {
Ropeobj178006* result0;
Ropeobj178006* LOC1;
result0 = (Ropeobj178006*)0;
LOC1 = (Ropeobj178006*)0;
LOC1 = cgsym_532403_839829468((*p0).module, ((NimStringDesc*) &T839829468_218));
{
Ropeobj178006* LOC6;
TY535235 LOC7;
if (!(((NI) 0) < (*p0).maxframelen)) goto LA4;
LOC6 = (Ropeobj178006*)0;
LOC6 = cgsym_532403_839829468((*p0).module, ((NimStringDesc*) &T839829468_219));
memset((void*)LOC7, 0, sizeof(LOC7));
LOC7[0] = procname0;
LOC7[1] = filename0;
LOC7[2] = rope_178401_2381377266(((NI64) ((*p0).maxframelen)));
LOC7[3] = rope_178401_2381377266(((NI64) ((*p0).blocks->data[((NI) 0)].framelen)));
result0 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_220), LOC7, 4);
}
goto LA2;
LA4: ;
{
TY532811 LOC9;
memset((void*)LOC9, 0, sizeof(LOC9));
LOC9[0] = procname0;
LOC9[1] = filename0;
result0 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_221), LOC9, 2);
}
LA2: ;
return result0;
}
N_NIMCALL(void, appcg_532648_839829468)(Tcproc529021* p0, Tcprocsection529011 s0, NimStringDesc* frmt0, Ropeobj178006** args0, NI args0Len0) {
Ropeobj178006** LOC1;
Ropeobj178006* LOC2;
LOC1 = (Ropeobj178006**)0;
LOC1 = s_529179_3723162438(p0, s0);
LOC2 = (Ropeobj178006*)0;
LOC2 = ropecg_532407_839829468((*p0).module, frmt0, args0, args0Len0);
add_178482_2381377266(LOC1, LOC2);
}
N_NIMCALL(Ropeobj178006*, deinitgcframe_538441_839829468)(Tcproc529021* p0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
{
TY533289 LOC5;
if (!(((NI) 0) < ((NI) ((*p0).gcframeid)))) goto LA3;
memset((void*)LOC5, 0, sizeof(LOC5));
result0 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_225), LOC5, 0);
}
LA3: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, deinitframe_560150_839829468)(Tcproc529021* p0) {
Ropeobj178006* result0;
TY533289 LOC1;
result0 = (Ropeobj178006*)0;
memset((void*)LOC1, 0, sizeof(LOC1));
result0 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_226), LOC1, 0);
return result0;
}
N_NIMCALL(void, genprocaux_560284_839829468)(Tcgen529027* m0, Tsym292834* prc0) {
Tcproc529021* p0;
Ropeobj178006* header0;
Ropeobj178006* returnstmt0;
Tnode292802* LOC51;
Ropeobj178006* generatedproc0;
p0 = newproc_529206_3723162438(prc0, m0);
header0 = genprocheader_535867_839829468(m0, prc0);
returnstmt0 = NIM_NIL;
{
NIM_BOOL LOC3;
Tsym292834* res0;
LOC3 = (NIM_BOOL)0;
LOC3 = !((((*prc0).flags &(1U<<((NU)(((Tsymflag292184) 9))&31U)))!=0));
if (!(LOC3)) goto LA4;
LOC3 = !(((*(*prc0).typ).sons->data[((NI) 0)] == NIM_NIL));
LA4: ;
if (!LOC3) goto LA5;
{
NI LOC9;
LOC9 = (NI)0;
LOC9 = len_293081_850551059((*prc0).ast);
if (!(LOC9 <= ((NI) 7))) goto LA10;
internalerror_196100_155036129((*prc0).info, ((NimStringDesc*) &T839829468_120));
}
LA10: ;
res0 = (*(*(*prc0).ast).kindU.S6.sons->data[((NI) 7)]).kindU.S4.sym;
{
NIM_BOOL LOC14;
TY178507 LOC34;
LOC14 = (NIM_BOOL)0;
LOC14 = isinvalidreturntype_533548_839829468((*(*prc0).typ).sons->data[((NI) 0)]);
if (!!(LOC14)) goto LA15;
{
if (!(((*prc0).flags &(1U<<((NU)(((Tsymflag292184) 12))&31U)))!=0)) goto LA19;
(*res0).flags |= ((NU32)1)<<((((Tsymflag292184) 12))%(sizeof(NU32)*8));
}
LA19: ;
{
NIM_BOOL LOC23;
NIM_BOOL LOC24;
NIM_BOOL LOC26;
Tnode292802* val0;
Tnode292802* LOC29;
Ropeobj178006* decl0;
Tloc292816 a0;
TY532811 LOC32;
LOC23 = (NIM_BOOL)0;
LOC24 = (NIM_BOOL)0;
LOC24 = (((*prc0).flags &(1U<<((NU)(((Tsymflag292184) 12))&31U)))!=0);
if (!(LOC24)) goto LA25;
LOC26 = (NIM_BOOL)0;
LOC26 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC26) goto LA27;
LOC26 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA27: ;
LOC24 = LOC26;
LA25: ;
LOC23 = LOC24;
if (!(LOC23)) goto LA28;
LOC29 = (Tnode292802*)0;
LOC29 = getbody_335227_1724185294(prc0);
val0 = easyresultasgn_560191_839829468(LOC29);
LOC23 = !((val0 == NIM_NIL));
LA28: ;
if (!LOC23) goto LA30;
decl0 = localvardecl_538532_839829468(p0, res0);
memset((void*)(&a0), 0, sizeof(a0));
initlocexprsingleuse_539289_839829468(p0, val0, (&a0));
memset((void*)LOC32, 0, sizeof(LOC32));
LOC32[0] = decl0;
LOC32[1] = rdloc_538188_839829468(a0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_123), LOC32, 2);
}
goto LA21;
LA30: ;
{
assignlocalvar_538614_839829468(p0, res0);
initlocalvar_538398_839829468(p0, res0, NIM_FALSE);
}
LA21: ;
memset((void*)LOC34, 0, sizeof(LOC34));
LOC34[0] = rdloc_538188_839829468((*res0).loc);
returnstmt0 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_209), LOC34, 1);
}
goto LA12;
LA15: ;
{
fillresult_533865_839829468(res0);
assignparam_538994_839829468(p0, res0);
{
Ttype292840* LOC38;
LOC38 = (Ttype292840*)0;
LOC38 = skiptypes_296099_850551059((*res0).typ, IL64(211106232576256));
if (!((*LOC38).kind == ((Ttypekind292244) 16))) goto LA39;
(*res0).loc.s = ((Tstorageloc292812) 0);
}
LA39: ;
}
LA12: ;
}
LA5: ;
{
NI i_560627_839829468;
NI HEX3Atmp_560743_839829468;
NI LOC42;
NI res_560746_839829468;
i_560627_839829468 = (NI)0;
HEX3Atmp_560743_839829468 = (NI)0;
LOC42 = (NI)0;
LOC42 = sonslen_295351_850551059((*(*prc0).typ).n);
HEX3Atmp_560743_839829468 = (NI)(LOC42 - ((NI) 1));
res_560746_839829468 = ((NI) 1);
{
while (1) {
if (!(res_560746_839829468 <= HEX3Atmp_560743_839829468)) goto LA44;
i_560627_839829468 = res_560746_839829468;
{
Tsym292834* param0;
param0 = (*(*(*(*prc0).typ).n).kindU.S6.sons->data[i_560627_839829468]).kindU.S4.sym;
{
NIM_BOOL LOC48;
LOC48 = (NIM_BOOL)0;
LOC48 = iscompiletimeonly_328706_3876443242((*param0).typ);
if (!LOC48) goto LA49;
goto LA45;
}
LA49: ;
assignparam_538994_839829468(p0, param0);
} LA45: ;
res_560746_839829468 += ((NI) 1);
} LA44: ;
}
}
closuresetup_560158_839829468(p0, prc0);
LOC51 = (Tnode292802*)0;
LOC51 = getbody_335227_1724185294(prc0);
genstmts_539244_839829468(p0, LOC51);
generatedproc0 = (Ropeobj178006*)0;
{
if (!(((*prc0).flags &(1U<<((NU)(((Tsymflag292184) 14))&31U)))!=0)) goto LA54;
{
if (!((Cc_273413_2528170400[(ccompiler_273431_2528170400)- 1].Field20 &(1U<<((NU)(((Tinfoccprop273004) 6))&7U)))!=0)) goto LA58;
header0 = HEX26_178452_2381377266(((NimStringDesc*) &T839829468_213), header0);
}
LA58: ;
}
LA54: ;
{
TY535235 LOC68;
Ropeobj178006** LOC69;
Ropeobj178006** LOC70;
Ropeobj178006** LOC71;
if (!(((*prc0).flags &(1U<<((NU)(((Tsymflag292184) 9))&31U)))!=0)) goto LA62;
{
if (!((Cc_273413_2528170400[(ccompiler_273431_2528170400)- 1].Field20 &(1U<<((NU)(((Tinfoccprop273004) 6))&7U)))!=0)) goto LA66;
header0 = HEX26_178452_2381377266(((NimStringDesc*) &T839829468_214), header0);
}
LA66: ;
memset((void*)LOC68, 0, sizeof(LOC68));
LOC68[0] = header0;
LOC69 = (Ropeobj178006**)0;
LOC69 = s_529179_3723162438(p0, ((Tcprocsection529011) 0));
LOC68[1] = (*LOC69);
LOC70 = (Ropeobj178006**)0;
LOC70 = s_529179_3723162438(p0, ((Tcprocsection529011) 1));
LOC68[2] = (*LOC70);
LOC71 = (Ropeobj178006**)0;
LOC71 = s_529179_3723162438(p0, ((Tcprocsection529011) 2));
LOC68[3] = (*LOC71);
generatedproc0 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_215), LOC68, 4);
}
goto LA60;
LA62: ;
{
TY178507 LOC73;
Ropeobj178006* LOC74;
Ropeobj178006** LOC93;
Ropeobj178006** LOC94;
Ropeobj178006* LOC101;
TY533289 LOC107;
Ropeobj178006* LOC108;
memset((void*)LOC73, 0, sizeof(LOC73));
LOC73[0] = header0;
generatedproc0 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_216), LOC73, 1);
LOC74 = (Ropeobj178006*)0;
LOC74 = initgcframe_538435_839829468(p0);
add_178482_2381377266(&generatedproc0, LOC74);
{
Ropeobj178006** LOC79;
Ropeobj178006* procname0;
Ropeobj178006* LOC80;
Ropeobj178006* LOC81;
if (!(((*prc0).options &(1U<<((NU)(((Toption169009) 15))&31U)))!=0)) goto LA77;
LOC79 = (Ropeobj178006**)0;
LOC79 = s_529179_3723162438(p0, ((Tcprocsection529011) 0));
add_178482_2381377266(&generatedproc0, (*LOC79));
procname0 = makecstring_191638_155036129((*(*prc0).name).s);
LOC80 = (Ropeobj178006*)0;
LOC80 = quotedfilename_196818_155036129((*prc0).info);
LOC81 = (Ropeobj178006*)0;
LOC81 = initframe_560140_839829468(p0, procname0, LOC80);
add_178482_2381377266(&generatedproc0, LOC81);
}
goto LA75;
LA77: ;
{
Ropeobj178006** LOC83;
LOC83 = (Ropeobj178006**)0;
LOC83 = s_529179_3723162438(p0, ((Tcprocsection529011) 0));
add_178482_2381377266(&generatedproc0, (*LOC83));
}
LA75: ;
{
TY533289 LOC88;
if (!(((*prc0).options &(1U<<((NU)(((Toption169009) 19))&31U)))!=0)) goto LA86;
memset((void*)LOC88, 0, sizeof(LOC88));
appcg_532648_839829468(p0, ((Tcprocsection529011) 1), ((NimStringDesc*) &T839829468_222), LOC88, 0);
}
LA86: ;
{
if (!(*p0).beforeretneeded) goto LA91;
add_178487_2381377266(&generatedproc0, ((NimStringDesc*) &T839829468_223));
}
LA91: ;
LOC93 = (Ropeobj178006**)0;
LOC93 = s_529179_3723162438(p0, ((Tcprocsection529011) 1));
add_178482_2381377266(&generatedproc0, (*LOC93));
LOC94 = (Ropeobj178006**)0;
LOC94 = s_529179_3723162438(p0, ((Tcprocsection529011) 2));
add_178482_2381377266(&generatedproc0, (*LOC94));
{
TY533289 LOC99;
Ropeobj178006* LOC100;
if (!(*p0).beforeretneeded) goto LA97;
memset((void*)LOC99, 0, sizeof(LOC99));
LOC100 = (Ropeobj178006*)0;
LOC100 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_224), LOC99, 0);
add_178482_2381377266(&generatedproc0, LOC100);
}
LA97: ;
LOC101 = (Ropeobj178006*)0;
LOC101 = deinitgcframe_538441_839829468(p0);
add_178482_2381377266(&generatedproc0, LOC101);
{
Ropeobj178006* LOC106;
if (!(((*prc0).options &(1U<<((NU)(((Toption169009) 15))&31U)))!=0)) goto LA104;
LOC106 = (Ropeobj178006*)0;
LOC106 = deinitframe_560150_839829468(p0);
add_178482_2381377266(&generatedproc0, LOC106);
}
LA104: ;
add_178482_2381377266(&generatedproc0, returnstmt0);
memset((void*)LOC107, 0, sizeof(LOC107));
LOC108 = (Ropeobj178006*)0;
LOC108 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_227), LOC107, 0);
add_178482_2381377266(&generatedproc0, LOC108);
}
LA60: ;
add_178482_2381377266(&(*m0).s[(((Tcfilesection529005) 10))- 0], generatedproc0);
}
N_NIMCALL(Tcgen529027*, findpendingmodule_532241_839829468)(Tcgen529027* m0, Tsym292834* s0) {
Tcgen529027* result0;
Tsym292834* ms0;
result0 = (Tcgen529027*)0;
ms0 = getmodule_299123_2984716966(s0);
result0 = gmodules_529170_3723162438->data[(*ms0).position];
return result0;
}
N_NIMCALL(NIM_BOOL, isgetprocaddr_559442_839829468)(Tlib292820* lib0) {
NIM_BOOL result0;
Tnode292802* n0;
NIM_BOOL LOC1;
NIM_BOOL LOC2;
result0 = (NIM_BOOL)0;
n0 = (*lib0).path;
LOC1 = (NIM_BOOL)0;
LOC2 = (NIM_BOOL)0;
LOC2 = ((*n0).kind == ((Tnodekind292020) 27) || (*n0).kind == ((Tnodekind292020) 29) || (*n0).kind == ((Tnodekind292020) 30) || (*n0).kind == ((Tnodekind292020) 31) || (*n0).kind == ((Tnodekind292020) 26) || (*n0).kind == ((Tnodekind292020) 28) || (*n0).kind == ((Tnodekind292020) 32));
if (!(LOC2)) goto LA3;
LOC2 = !(((*n0).typ == NIM_NIL));
LA3: ;
LOC1 = LOC2;
if (!(LOC1)) goto LA4;
LOC1 = ((*(*n0).typ).kind == ((Ttypekind292244) 26) || (*(*n0).typ).kind == ((Ttypekind292244) 25));
LA4: ;
result0 = LOC1;
return result0;
}
N_NIMCALL(void, initlocexpr_539283_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* result0) {
initloc_532273_839829468(result0, ((Tlockind292808) 0), (*e0).typ, ((Tstorageloc292812) 0));
expr_539248_839829468(p0, e0, result0);
}
N_NIMCALL(void, loaddynamiclib_559480_839829468)(Tcgen529027* m0, Tlib292820* lib0) {
{
Ropeobj178006* tmp0;
TY178507 LOC5;
if (!!((*lib0).generated)) goto LA3;
(*lib0).generated = NIM_TRUE;
tmp0 = gettempname_533596_839829468(m0);
asgnRefNoCycle((void**) (&(*lib0).name), tmp0);
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = tmp0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 9))- 0], ((NimStringDesc*) &T839829468_228), LOC5, 1);
{
TY135002* s0;
Ropeobj178006* loadlib0;
TY532811 LOC18;
if (!((*(*lib0).path).kind >= ((Tnodekind292020) 20) && (*(*lib0).path).kind <= ((Tnodekind292020) 22))) goto LA8;
s0 = (TY135002*) newSeq((&NTI135002), 0);
libcandidates_170605_2607990831((*(*lib0).path).kindU.S3.strval, (&s0));
rawmessage_194612_155036129(((Tmsgkind191002) 286), (*(*lib0).path).kindU.S3.strval);
loadlib0 = NIM_NIL;
{
NI i_559847_839829468;
NI HEX3Atmp_559902_839829468;
NI res_559905_839829468;
i_559847_839829468 = (NI)0;
HEX3Atmp_559902_839829468 = (NI)0;
HEX3Atmp_559902_839829468 = (s0 ? (s0->Sup.len-1) : -1);
res_559905_839829468 = ((NI) 0);
{
while (1) {
TY532811 LOC17;
if (!(res_559905_839829468 <= HEX3Atmp_559902_839829468)) goto LA12;
i_559847_839829468 = res_559905_839829468;
(*m0).labels += ((NI) 1);
{
if (!(((NI) 0) < i_559847_839829468)) goto LA15;
add_178487_2381377266(&loadlib0, ((NimStringDesc*) &T839829468_229));
}
LA15: ;
memset((void*)LOC17, 0, sizeof(LOC17));
LOC17[0] = tmp0;
LOC17[1] = getstrlit_549468_839829468(m0, s0->data[i_559847_839829468]);
appcg_532632_839829468(m0, &loadlib0, ((NimStringDesc*) &T839829468_230), LOC17, 2);
res_559905_839829468 += ((NI) 1);
} LA12: ;
}
}
memset((void*)LOC18, 0, sizeof(LOC18));
LOC18[0] = loadlib0;
LOC18[1] = getstrlit_549468_839829468(m0, (*(*lib0).path).kindU.S3.strval);
appcg_532632_839829468(m0, &(*m0).s[(((Tcfilesection529005) 16))- 0], ((NimStringDesc*) &T839829468_231), LOC18, 2);
}
goto LA6;
LA8: ;
{
Tcproc529021* p0;
Tloc292816 dest0;
Ropeobj178006** LOC20;
Ropeobj178006** LOC21;
Ropeobj178006** LOC22;
TY532811 LOC23;
p0 = newproc_529206_3723162438(NIM_NIL, m0);
(*p0).options = ((*p0).options & ~ 163840);
memset((void*)(&dest0), 0, sizeof(dest0));
initlocexpr_539283_839829468(p0, (*lib0).path, (&dest0));
LOC20 = (Ropeobj178006**)0;
LOC20 = s_529179_3723162438(p0, ((Tcprocsection529011) 0));
add_178482_2381377266(&(*m0).s[(((Tcfilesection529005) 9))- 0], (*LOC20));
LOC21 = (Ropeobj178006**)0;
LOC21 = s_529179_3723162438(p0, ((Tcprocsection529011) 1));
add_178482_2381377266(&(*m0).s[(((Tcfilesection529005) 16))- 0], (*LOC21));
LOC22 = (Ropeobj178006**)0;
LOC22 = s_529179_3723162438(p0, ((Tcprocsection529011) 2));
add_178482_2381377266(&(*m0).s[(((Tcfilesection529005) 16))- 0], (*LOC22));
memset((void*)LOC23, 0, sizeof(LOC23));
LOC23[0] = tmp0;
LOC23[1] = rdloc_538188_839829468(dest0);
appcg_532632_839829468(m0, &(*m0).s[(((Tcfilesection529005) 16))- 0], ((NimStringDesc*) &T839829468_232), LOC23, 2);
}
LA6: ;
}
LA3: ;
{
if (!((*lib0).name == NIM_NIL)) goto LA26;
internalerror_196113_155036129(((NimStringDesc*) &T839829468_233));
}
LA26: ;
}
N_NIMCALL(Ropeobj178006*, mangledynlibproc_538816_839829468)(Tsym292834* sym0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
{
if (!(((*sym0).flags &(1U<<((NU)(((Tsymflag292184) 16))&31U)))!=0)) goto LA3;
result0 = rope_178277_2381377266((*(*sym0).name).s);
}
goto LA1;
LA3: ;
{
TY178507 LOC6;
memset((void*)LOC6, 0, sizeof(LOC6));
LOC6[0] = rope_178401_2381377266(((NI64) ((*sym0).Sup.id)));
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_234), LOC6, 1);
}
LA1: ;
return result0;
}
N_NIMCALL(void, symindynamiclib_559929_839829468)(Tcgen529027* m0, Tsym292834* sym0) {
Tlib292820* lib0;
NIM_BOOL iscall0;
Ropeobj178006* extname0;
Ropeobj178006* tmp0;
TY532811 LOC43;
lib0 = (*sym0).annex;
iscall0 = isgetprocaddr_559442_839829468(lib0);
extname0 = (*sym0).loc.r;
{
if (!!(iscall0)) goto LA3;
loaddynamiclib_559480_839829468(m0, lib0);
}
LA3: ;
tmp0 = mangledynlibproc_538816_839829468(sym0);
asgnRefNoCycle((void**) (&(*sym0).loc.r), tmp0);
asgnRefNoCycle((void**) (&(*(*sym0).typ).sym), NIM_NIL);
(*m0).labels += ((NI) 2);
{
Tnode292802* n0;
Tloc292816 a0;
Tnode292802* LOC9;
Ropeobj178006* params0;
Ropeobj178006* LOC10;
Ropeobj178006* load0;
TY535235 LOC17;
NimStringDesc* LOC18;
Tnode292802* last0;
NimStringDesc* idx0;
if (!iscall0) goto LA7;
n0 = (*lib0).path;
memset((void*)(&a0), 0, sizeof(a0));
LOC9 = (Tnode292802*)0;
LOC9 = HEX5BHEX5D_293238_850551059(n0, ((NI) 0));
initlocexpr_539283_839829468((*m0).initproc, LOC9, (&a0));
LOC10 = (Ropeobj178006*)0;
LOC10 = rdloc_538188_839829468(a0);
params0 = HEX26_178447_2381377266(LOC10, ((NimStringDesc*) &T839829468_118));
{
NI i_559964_839829468;
NI HEX3Atmp_560025_839829468;
NI LOC12;
NI res_560028_839829468;
i_559964_839829468 = (NI)0;
HEX3Atmp_560025_839829468 = (NI)0;
LOC12 = (NI)0;
LOC12 = len_293081_850551059(n0);
HEX3Atmp_560025_839829468 = (NI)(LOC12 - ((NI) 2));
res_560028_839829468 = ((NI) 1);
{
while (1) {
Tnode292802* LOC15;
Ropeobj178006* LOC16;
if (!(res_560028_839829468 <= HEX3Atmp_560025_839829468)) goto LA14;
i_559964_839829468 = res_560028_839829468;
LOC15 = (Tnode292802*)0;
LOC15 = HEX5BHEX5D_293238_850551059(n0, i_559964_839829468);
initlocexpr_539283_839829468((*m0).initproc, LOC15, (&a0));
LOC16 = (Ropeobj178006*)0;
LOC16 = rdloc_538188_839829468(a0);
add_178482_2381377266(¶ms0, LOC16);
add_178487_2381377266(¶ms0, ((NimStringDesc*) &T839829468_110));
res_560028_839829468 += ((NI) 1);
} LA14: ;
}
}
memset((void*)LOC17, 0, sizeof(LOC17));
LOC17[0] = tmp0;
LOC17[1] = gettypedesc_535671_839829468(m0, (*sym0).typ);
LOC17[2] = params0;
LOC18 = (NimStringDesc*)0;
LOC18 = HEX24_178856_2381377266(extname0);
LOC17[3] = makecstring_191638_155036129(LOC18);
load0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_235), LOC17, 4);
last0 = lastson_295364_850551059(n0);
{
if (!((*last0).kind == ((Tnodekind292020) 58))) goto LA21;
last0 = (*last0).kindU.S6.sons->data[((NI) 1)];
}
LA21: ;
{
NimStringDesc* LOC27;
if (!!(((*last0).kind == ((Tnodekind292020) 20)))) goto LA25;
LOC27 = (NimStringDesc*)0;
LOC27 = HEX24_196185_1689653243(T839829468_236);
internalerror_196113_155036129(LOC27);
}
LA25: ;
idx0 = (*last0).kindU.S3.strval;
{
Ropeobj178006** LOC32;
if (!((idx0 ? idx0->Sup.len : 0) == ((NI) 0))) goto LA30;
LOC32 = (Ropeobj178006**)0;
LOC32 = s_529179_3723162438((*m0).initproc, ((Tcprocsection529011) 2));
add_178482_2381377266(LOC32, load0);
}
goto LA28;
LA30: ;
{
NIM_BOOL LOC34;
LOC34 = (NIM_BOOL)0;
LOC34 = ((idx0 ? idx0->Sup.len : 0) == ((NI) 1));
if (!(LOC34)) goto LA35;
LOC34 = (((NU8)(idx0->data[((NI) 0)])) >= ((NU8)(48)) && ((NU8)(idx0->data[((NI) 0)])) <= ((NU8)(57)));
LA35: ;
if (!LOC34) goto LA36;
add_178482_2381377266(&(*m0).extensionloaders[(((NU8)(idx0->data[((NI) 0)])))- 48], load0);
}
goto LA28;
LA36: ;
{
NimStringDesc* LOC39;
LOC39 = (NimStringDesc*)0;
LOC39 = rawNewString(idx0->Sup.len + 13);
appendString(LOC39, ((NimStringDesc*) &T839829468_237));
appendString(LOC39, idx0);
internalerror_196100_155036129((*sym0).info, LOC39);
}
LA28: ;
}
goto LA5;
LA7: ;
{
TY535235 LOC41;
NimStringDesc* LOC42;
memset((void*)LOC41, 0, sizeof(LOC41));
LOC41[0] = tmp0;
LOC41[1] = gettypedesc_535671_839829468(m0, (*sym0).typ);
LOC41[2] = (*lib0).name;
LOC42 = (NimStringDesc*)0;
LOC42 = HEX24_178856_2381377266(extname0);
LOC41[3] = makecstring_191638_155036129(LOC42);
appcg_532632_839829468(m0, &(*m0).s[(((Tcfilesection529005) 16))- 0], ((NimStringDesc*) &T839829468_238), LOC41, 4);
}
LA5: ;
memset((void*)LOC43, 0, sizeof(LOC43));
LOC43[0] = (*sym0).loc.r;
LOC43[1] = gettypedesc_535671_839829468(m0, (*sym0).loc.t);
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 9))- 0], ((NimStringDesc*) &T839829468_239), LOC43, 2);
}
N_NIMCALL(void, symindynamiclibpartial_560071_839829468)(Tcgen529027* m0, Tsym292834* sym0) {
asgnRefNoCycle((void**) (&(*sym0).loc.r), mangledynlibproc_538816_839829468(sym0));
asgnRefNoCycle((void**) (&(*(*sym0).typ).sym), NIM_NIL);
}
N_NIMCALL(void, genprocnoforward_560906_839829468)(Tcgen529027* m0, Tsym292834* prc0) {
{ fillprocloc_539201_839829468(prc0);
useheader_532369_839829468(m0, prc0);
{
Ropeobj178006* LOC5;
if (!(((*prc0).loc.flags &(1U<<((NU)(((Tlocflag292810) 7))&15U)))!=0)) goto LA3;
LOC5 = (Ropeobj178006*)0;
LOC5 = cgsym_532403_839829468(m0, (*(*prc0).name).s);
goto BeforeRet;
}
LA3: ;
genprocprototype_539254_839829468(m0, prc0);
{
if (!(((*prc0).loc.flags &(1U<<((NU)(((Tlocflag292810) 3))&15U)))!=0)) goto LA8;
}
goto LA6;
LA8: ;
{
if (!((*(*prc0).typ).callconv == ((Tcallingconvention292002) 5))) goto LA11;
{
NIM_BOOL LOC15;
LOC15 = (NIM_BOOL)0;
LOC15 = containsorincl_268862_2627731572((&(*m0).declaredthings), (*prc0).Sup.id);
if (!!(LOC15)) goto LA16;
genprocaux_560284_839829468(m0, prc0);
}
LA16: ;
}
goto LA6;
LA11: ;
{
Tcgen529027* q0;
if (!(((*prc0).loc.flags &(1U<<((NU)(((Tlocflag292810) 4))&15U)))!=0)) goto LA19;
q0 = findpendingmodule_532241_839829468(m0, prc0);
{
NIM_BOOL LOC23;
NIM_BOOL LOC25;
LOC23 = (NIM_BOOL)0;
LOC23 = !((q0 == NIM_NIL));
if (!(LOC23)) goto LA24;
LOC25 = (NIM_BOOL)0;
LOC25 = containsorincl_268862_2627731572((&(*q0).declaredthings), (*prc0).Sup.id);
LOC23 = !(LOC25);
LA24: ;
if (!LOC23) goto LA26;
symindynamiclib_559929_839829468(q0, prc0);
}
goto LA21;
LA26: ;
{
symindynamiclibpartial_560071_839829468(m0, prc0);
}
LA21: ;
}
goto LA6;
LA19: ;
{
Tcgen529027* q0;
if (!!((((*prc0).flags &(1U<<((NU)(((Tsymflag292184) 5))&31U)))!=0))) goto LA30;
q0 = findpendingmodule_532241_839829468(m0, prc0);
{
NIM_BOOL LOC34;
NIM_BOOL LOC36;
LOC34 = (NIM_BOOL)0;
LOC34 = !((q0 == NIM_NIL));
if (!(LOC34)) goto LA35;
LOC36 = (NIM_BOOL)0;
LOC36 = containsorincl_268862_2627731572((&(*q0).declaredthings), (*prc0).Sup.id);
LOC34 = !(LOC36);
LA35: ;
if (!LOC34) goto LA37;
genprocaux_560284_839829468(q0, prc0);
}
LA37: ;
}
goto LA6;
LA30: ;
LA6: ;
}BeforeRet: ;
}
N_NIMCALL(void, genproc_532951_839829468)(Tcgen529027* m0, Tsym292834* prc0) {
{ {
NIM_BOOL LOC3;
NIM_BOOL LOC5;
LOC3 = (NIM_BOOL)0;
LOC3 = (((*prc0).flags &(1U<<((NU)(((Tsymflag292184) 26))&31U)))!=0);
if (LOC3) goto LA4;
LOC5 = (NIM_BOOL)0;
LOC5 = isactivated_561431_839829468(prc0);
LOC3 = !(LOC5);
LA4: ;
if (!LOC3) goto LA6;
goto BeforeRet;
}
LA6: ;
fillprocloc_539201_839829468(prc0);
{
if (!(((*prc0).flags &(1U<<((NU)(((Tsymflag292184) 4))&31U)))!=0)) goto LA10;
addforwardedproc_532203_839829468(m0, prc0);
}
goto LA8;
LA10: ;
{
genprocnoforward_560906_839829468(m0, prc0);
{
NIM_BOOL LOC15;
NIM_BOOL LOC16;
LOC15 = (NIM_BOOL)0;
LOC16 = (NIM_BOOL)0;
LOC16 = ((65600 & (*prc0).flags) == 64);
if (!(LOC16)) goto LA17;
LOC16 = !((generatedheader_532201_839829468 == NIM_NIL));
LA17: ;
LOC15 = LOC16;
if (!(LOC15)) goto LA18;
LOC15 = !((((*prc0).loc.flags &(1U<<((NU)(((Tlocflag292810) 3))&15U)))!=0));
LA18: ;
if (!LOC15) goto LA19;
genprocprototype_539254_839829468(generatedheader_532201_839829468, prc0);
{
if (!((*(*prc0).typ).callconv == ((Tcallingconvention292002) 5))) goto LA23;
{
NIM_BOOL LOC27;
LOC27 = (NIM_BOOL)0;
LOC27 = containsorincl_268862_2627731572((&(*generatedheader_532201_839829468).declaredthings), (*prc0).Sup.id);
if (!!(LOC27)) goto LA28;
genprocaux_560284_839829468(generatedheader_532201_839829468, prc0);
}
LA28: ;
}
LA23: ;
}
LA19: ;
}
LA8: ;
}BeforeRet: ;
}
static N_INLINE(NIM_BOOL, emulatedthreadvars_532949_839829468)(void) {
NIM_BOOL result0;
result0 = (NIM_BOOL)0;
result0 = ((71303168 & ~ gglobaloptions_169130_2607990831)==0);
return result0;
}
N_NIMCALL(void, declarethreadvar_538676_839829468)(Tcgen529027* m0, Tsym292834* s0, NIM_BOOL isextern0) {
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = emulatedthreadvars_532949_839829468();
if (!LOC3) goto LA4;
{
NIM_BOOL LOC8;
TY532811 LOC11;
LOC8 = (NIM_BOOL)0;
LOC8 = containsorincl_268862_2627731572((&nimtvdeclared_538675_839829468), (*s0).Sup.id);
if (!!(LOC8)) goto LA9;
nimtvdeps_538674_839829468 = (Ttypeseq292836*) incrSeqV2(&(nimtvdeps_538674_839829468)->Sup, sizeof(Ttype292840*));
asgnRefNoCycle((void**) (&nimtvdeps_538674_839829468->data[nimtvdeps_538674_839829468->Sup.len]), (*s0).loc.t);
++nimtvdeps_538674_839829468->Sup.len;
memset((void*)LOC11, 0, sizeof(LOC11));
LOC11[0] = gettypedesc_535671_839829468(m0, (*s0).loc.t);
LOC11[1] = (*s0).loc.r;
addf_179205_2381377266(&nimtv_538656_839829468, ((NimStringDesc*) &T839829468_54), LOC11, 2);
}
LA9: ;
}
goto LA1;
LA4: ;
{
Ropeobj178006* LOC21;
TY178507 LOC22;
{
if (!isextern0) goto LA15;
add_178487_2381377266(&(*m0).s[(((Tcfilesection529005) 9))- 0], ((NimStringDesc*) &T839829468_240));
}
LA15: ;
{
if (!((gglobaloptions_169130_2607990831 &((NU64)1<<((NU)(((Tglobaloption169013) 22))&63U)))!=0)) goto LA19;
add_178487_2381377266(&(*m0).s[(((Tcfilesection529005) 9))- 0], ((NimStringDesc*) &T839829468_241));
}
LA19: ;
LOC21 = (Ropeobj178006*)0;
LOC21 = gettypedesc_535671_839829468(m0, (*s0).loc.t);
add_178482_2381377266(&(*m0).s[(((Tcfilesection529005) 9))- 0], LOC21);
memset((void*)LOC22, 0, sizeof(LOC22));
LOC22[0] = (*s0).loc.r;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 9))- 0], ((NimStringDesc*) &T839829468_242), LOC22, 1);
}
LA1: ;
}
N_NIMCALL(void, genvarprototypeaux_544254_839829468)(Tcgen529027* m0, Tsym292834* sym0) {
Ropeobj178006* LOC1;
{ useheader_532369_839829468(m0, sym0);
LOC1 = (Ropeobj178006*)0;
LOC1 = manglename_533205_839829468(sym0);
fillloc_532282_839829468((&(*sym0).loc), ((Tlockind292808) 3), (*sym0).typ, LOC1, ((Tstorageloc292812) 3));
{
NIM_BOOL LOC4;
LOC4 = (NIM_BOOL)0;
LOC4 = (((*sym0).loc.flags &(1U<<((NU)(((Tlocflag292810) 3))&15U)))!=0);
if (LOC4) goto LA5;
LOC4 = containsorincl_268862_2627731572((&(*m0).declaredthings), (*sym0).Sup.id);
LA5: ;
if (!LOC4) goto LA6;
goto BeforeRet;
}
LA6: ;
{
if (!!(((*(*sym0).owner).Sup.id == (*(*m0).module).Sup.id))) goto LA10;
{
if (!(((*sym0).flags &(1U<<((NU)(((Tsymflag292184) 22))&31U)))!=0)) goto LA14;
declarethreadvar_538676_839829468(m0, sym0, NIM_TRUE);
}
goto LA12;
LA14: ;
{
Ropeobj178006* LOC17;
TY178507 LOC30;
add_178487_2381377266(&(*m0).s[(((Tcfilesection529005) 9))- 0], ((NimStringDesc*) &T839829468_240));
LOC17 = (Ropeobj178006*)0;
LOC17 = gettypedesc_535671_839829468(m0, (*sym0).loc.t);
add_178482_2381377266(&(*m0).s[(((Tcfilesection529005) 9))- 0], LOC17);
{
if (!(((*sym0).loc.flags &(1U<<((NU)(((Tlocflag292810) 4))&15U)))!=0)) goto LA20;
add_178487_2381377266(&(*m0).s[(((Tcfilesection529005) 9))- 0], ((NimStringDesc*) &T839829468_53));
}
LA20: ;
{
if (!(((*sym0).flags &(1U<<((NU)(((Tsymflag292184) 8))&31U)))!=0)) goto LA24;
add_178487_2381377266(&(*m0).s[(((Tcfilesection529005) 9))- 0], ((NimStringDesc*) &T839829468_121));
}
LA24: ;
{
if (!(((*sym0).flags &(1U<<((NU)(((Tsymflag292184) 7))&31U)))!=0)) goto LA28;
add_178487_2381377266(&(*m0).s[(((Tcfilesection529005) 9))- 0], ((NimStringDesc*) &T839829468_122));
}
LA28: ;
memset((void*)LOC30, 0, sizeof(LOC30));
LOC30[0] = (*sym0).loc.r;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 9))- 0], ((NimStringDesc*) &T839829468_242), LOC30, 1);
}
LA12: ;
}
LA10: ;
}BeforeRet: ;
}
N_NIMCALL(void, genvarprototype_539236_839829468)(Tcgen529027* m0, Tsym292834* sym0) {
genvarprototypeaux_544254_839829468(m0, sym0);
}
N_NIMCALL(Ropeobj178006*, cgsym_532403_839829468)(Tcgen529027* m0, NimStringDesc* name0) {
Ropeobj178006* result0;
Tsym292834* sym0;
result0 = (Ropeobj178006*)0;
sym0 = getcompilerproc_338746_3937434831(name0);
{
if (!!((sym0 == NIM_NIL))) goto LA3;
switch ((*sym0).kind) {
case ((Tsymkind292435) 12):
case ((Tsymkind292435) 13):
case ((Tsymkind292435) 15):
case ((Tsymkind292435) 14):
{
genproc_532951_839829468(m0, sym0);
}
break;
case ((Tsymkind292435) 8):
case ((Tsymkind292435) 11):
case ((Tsymkind292435) 9):
{
genvarprototype_539236_839829468(m0, sym0);
}
break;
case ((Tsymkind292435) 7):
{
Ropeobj178006* LOC8;
LOC8 = (Ropeobj178006*)0;
LOC8 = gettypedesc_535671_839829468(m0, (*sym0).typ);
}
break;
default:
{
NimStringDesc* LOC10;
LOC10 = (NimStringDesc*)0;
LOC10 = rawNewString(name0->Sup.len + reprEnum((NI)(*sym0).kind, (&NTI292435))->Sup.len + 9);
appendString(LOC10, ((NimStringDesc*) &T839829468_243));
appendString(LOC10, name0);
appendString(LOC10, ((NimStringDesc*) &T839829468_244));
appendString(LOC10, reprEnum((NI)(*sym0).kind, (&NTI292435)));
internalerror_196113_155036129(LOC10);
}
break;
}
}
goto LA1;
LA3: ;
{
rawmessage_194612_155036129(((Tmsgkind191002) 68), name0);
}
LA1: ;
result0 = (*sym0).loc.r;
return result0;
}
N_NIMCALL(Ropeobj178006*, ropecg_532407_839829468)(Tcgen529027* m0, NimStringDesc* frmt0, Ropeobj178006** args0, NI args0Len0) {
Ropeobj178006* result0;
NI i0;
NI length0;
NI num0;
result0 = (Ropeobj178006*)0;
i0 = ((NI) 0);
length0 = (frmt0 ? frmt0->Sup.len : 0);
result0 = NIM_NIL;
num0 = ((NI) 0);
{
while (1) {
NI start0;
if (!(i0 < length0)) goto LA2;
{
if (!((NU8)(frmt0->data[i0]) == (NU8)(36))) goto LA5;
i0 += ((NI) 1);
switch (((NU8)(frmt0->data[i0]))) {
case 36:
{
add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_19));
i0 += ((NI) 1);
}
break;
case 35:
{
i0 += ((NI) 1);
add_178482_2381377266(&result0, args0[num0]);
num0 += ((NI) 1);
}
break;
case 48 ... 57:
{
NI j0;
j0 = ((NI) 0);
{
while (1) {
j0 = (NI)((NI)((NI)(j0 * ((NI) 10)) + ((NI) (((NU8)(frmt0->data[i0]))))) - ((NI) 48));
i0 += ((NI) 1);
{
NIM_BOOL LOC14;
LOC14 = (NIM_BOOL)0;
LOC14 = (length0 <= i0);
if (LOC14) goto LA15;
LOC14 = !((((NU8)(frmt0->data[i0])) >= ((NU8)(48)) && ((NU8)(frmt0->data[i0])) <= ((NU8)(57))));
LA15: ;
if (!LOC14) goto LA16;
goto LA10;
}
LA16: ;
}
} LA10: ;
num0 = j0;
{
NimStringDesc* LOC22;
NimStringDesc* LOC23;
if (!((NI)((args0Len0-1) + ((NI) 1)) < j0)) goto LA20;
LOC22 = (NimStringDesc*)0;
LOC23 = (NimStringDesc*)0;
LOC23 = nimIntToStr(j0);
LOC22 = rawNewString(LOC23->Sup.len + 30);
appendString(LOC22, ((NimStringDesc*) &T839829468_20));
appendString(LOC22, LOC23);
internalerror_196113_155036129(LOC22);
}
LA20: ;
add_178482_2381377266(&result0, args0[(NI)(j0 - ((NI) 1))]);
}
break;
case 110:
{
{
if (!!(((goptions_169128_2607990831 &(1U<<((NU)(((Toption169009) 10))&31U)))!=0))) goto LA27;
add_178482_2381377266(&result0, rnl_178903_2381377266);
}
LA27: ;
i0 += ((NI) 1);
}
break;
case 78:
{
add_178482_2381377266(&result0, rnl_178903_2381377266);
i0 += ((NI) 1);
}
break;
default:
{
NimStringDesc* LOC31;
LOC31 = (NimStringDesc*)0;
LOC31 = rawNewString(31);
appendString(LOC31, ((NimStringDesc*) &T839829468_20));
appendChar(LOC31, frmt0->data[i0]);
internalerror_196113_155036129(LOC31);
}
break;
}
}
goto LA3;
LA5: ;
{
NIM_BOOL LOC33;
NI j0;
NimStringDesc* ident0;
Ropeobj178006* LOC39;
LOC33 = (NIM_BOOL)0;
LOC33 = ((NU8)(frmt0->data[i0]) == (NU8)(35));
if (!(LOC33)) goto LA34;
LOC33 = (((NU8)(frmt0->data[(NI)(i0 + ((NI) 1))])) >= ((NU8)(97)) && ((NU8)(frmt0->data[(NI)(i0 + ((NI) 1))])) <= ((NU8)(122)) || ((NU8)(frmt0->data[(NI)(i0 + ((NI) 1))])) >= ((NU8)(65)) && ((NU8)(frmt0->data[(NI)(i0 + ((NI) 1))])) <= ((NU8)(90)) || ((NU8)(frmt0->data[(NI)(i0 + ((NI) 1))])) == ((NU8)(95)));
LA34: ;
if (!LOC33) goto LA35;
i0 += ((NI) 1);
j0 = i0;
{
while (1) {
if (!(((NU8)(frmt0->data[j0])) >= ((NU8)(97)) && ((NU8)(frmt0->data[j0])) <= ((NU8)(122)) || ((NU8)(frmt0->data[j0])) >= ((NU8)(65)) && ((NU8)(frmt0->data[j0])) <= ((NU8)(90)) || ((NU8)(frmt0->data[j0])) >= ((NU8)(48)) && ((NU8)(frmt0->data[j0])) <= ((NU8)(57)) || ((NU8)(frmt0->data[j0])) == ((NU8)(95)))) goto LA38;
j0 += ((NI) 1);
} LA38: ;
}
ident0 = copyStrLast(frmt0, i0, (NI)(j0 - ((NI) 1)));
i0 = j0;
LOC39 = (Ropeobj178006*)0;
LOC39 = cgsym_532403_839829468(m0, ident0);
add_178482_2381377266(&result0, LOC39);
}
goto LA3;
LA35: ;
{
NIM_BOOL LOC41;
NI j0;
NimStringDesc* LOC47;
Ropeobj178006* LOC48;
LOC41 = (NIM_BOOL)0;
LOC41 = ((NU8)(frmt0->data[i0]) == (NU8)(35));
if (!(LOC41)) goto LA42;
LOC41 = ((NU8)(frmt0->data[(NI)(i0 + ((NI) 1))]) == (NU8)(36));
LA42: ;
if (!LOC41) goto LA43;
i0 += ((NI) 2);
j0 = ((NI) 0);
{
while (1) {
if (!(((NU8)(frmt0->data[i0])) >= ((NU8)(48)) && ((NU8)(frmt0->data[i0])) <= ((NU8)(57)))) goto LA46;
j0 = (NI)((NI)((NI)(j0 * ((NI) 10)) + ((NI) (((NU8)(frmt0->data[i0]))))) - ((NI) 48));
i0 += ((NI) 1);
} LA46: ;
}
LOC47 = (NimStringDesc*)0;
LOC47 = HEX24_178856_2381377266(args0[(NI)(j0 - ((NI) 1))]);
LOC48 = (Ropeobj178006*)0;
LOC48 = cgsym_532403_839829468(m0, LOC47);
add_178482_2381377266(&result0, LOC48);
}
goto LA3;
LA43: ;
LA3: ;
start0 = i0;
{
while (1) {
if (!(i0 < length0)) goto LA50;
{
NIM_BOOL LOC53;
LOC53 = (NIM_BOOL)0;
LOC53 = !(((NU8)(frmt0->data[i0]) == (NU8)(36)));
if (!(LOC53)) goto LA54;
LOC53 = !(((NU8)(frmt0->data[i0]) == (NU8)(35)));
LA54: ;
if (!LOC53) goto LA55;
i0 += ((NI) 1);
}
goto LA51;
LA55: ;
{
goto LA49;
}
LA51: ;
} LA50: ;
} LA49: ;
{
NimStringDesc* LOC62;
if (!(start0 <= (NI)(i0 - ((NI) 1)))) goto LA60;
LOC62 = (NimStringDesc*)0;
LOC62 = copyStrLast(frmt0, start0, (NI)(i0 - ((NI) 1)));
add_178487_2381377266(&result0, LOC62);
}
LA60: ;
} LA2: ;
}
return result0;
}
static N_INLINE(NIM_BOOL, crossescppboundary_560754_839829468)(Tcgen529027* m0, Tsym292834* sym0) {
NIM_BOOL result0;
NIM_BOOL LOC1;
NIM_BOOL LOC2;
Tsym292834* LOC4;
result0 = (NIM_BOOL)0;
LOC1 = (NIM_BOOL)0;
LOC2 = (NIM_BOOL)0;
LOC2 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
if (!(LOC2)) goto LA3;
LOC4 = (Tsym292834*)0;
LOC4 = getmodule_299123_2984716966(sym0);
LOC2 = !((((*LOC4).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0));
LA3: ;
LOC1 = LOC2;
if (!(LOC1)) goto LA5;
LOC1 = !((gcmd_169132_2607990831 == ((Tcommands169076) 2)));
LA5: ;
result0 = LOC1;
return result0;
}
N_NIMCALL(void, genprocprototype_539254_839829468)(Tcgen529027* m0, Tsym292834* sym0) {
{ useheader_532369_839829468(m0, sym0);
{
if (!(((*sym0).loc.flags &(1U<<((NU)(((Tlocflag292810) 3))&15U)))!=0)) goto LA3;
goto BeforeRet;
}
LA3: ;
{
if (!(((*sym0).loc.flags &(1U<<((NU)(((Tlocflag292810) 4))&15U)))!=0)) goto LA7;
{
NIM_BOOL LOC11;
Tsym292834* LOC12;
NIM_BOOL LOC14;
TY532811 LOC17;
Ropeobj178006* LOC18;
LOC11 = (NIM_BOOL)0;
LOC12 = (Tsym292834*)0;
LOC12 = getmodule_299123_2984716966(sym0);
LOC11 = !(((*LOC12).Sup.id == (*(*m0).module).Sup.id));
if (!(LOC11)) goto LA13;
LOC14 = (NIM_BOOL)0;
LOC14 = containsorincl_268862_2627731572((&(*m0).declaredthings), (*sym0).Sup.id);
LOC11 = !(LOC14);
LA13: ;
if (!LOC11) goto LA15;
memset((void*)LOC17, 0, sizeof(LOC17));
LOC17[0] = gettypedesc_535671_839829468(m0, (*sym0).loc.t);
LOC17[1] = mangledynlibproc_538816_839829468(sym0);
LOC18 = (Ropeobj178006*)0;
LOC18 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_245), LOC17, 2);
add_178482_2381377266(&(*m0).s[(((Tcfilesection529005) 9))- 0], LOC18);
}
LA15: ;
}
goto LA5;
LA7: ;
{
NIM_BOOL LOC20;
Ropeobj178006* header0;
TY178507 LOC47;
Ropeobj178006* LOC48;
LOC20 = (NIM_BOOL)0;
LOC20 = containsorincl_268862_2627731572((&(*m0).declaredprotos), (*sym0).Sup.id);
if (!!(LOC20)) goto LA21;
header0 = genprocheader_535867_839829468(m0, sym0);
{
NIM_BOOL LOC25;
LOC25 = (NIM_BOOL)0;
LOC25 = (((*sym0).flags &(1U<<((NU)(((Tsymflag292184) 14))&31U)))!=0);
if (!(LOC25)) goto LA26;
LOC25 = ((Cc_273413_2528170400[(ccompiler_273431_2528170400)- 1].Field20 &(1U<<((NU)(((Tinfoccprop273004) 6))&7U)))!=0);
LA26: ;
if (!LOC25) goto LA27;
header0 = HEX26_178452_2381377266(((NimStringDesc*) &T839829468_213), header0);
}
LA27: ;
{
NIM_BOOL LOC31;
LOC31 = (NIM_BOOL)0;
LOC31 = !(((*(*sym0).typ).callconv == ((Tcallingconvention292002) 5)));
if (!(LOC31)) goto LA32;
LOC31 = crossescppboundary_560754_839829468(m0, sym0);
LA32: ;
if (!LOC31) goto LA33;
header0 = HEX26_178452_2381377266(((NimStringDesc*) &T839829468_246), header0);
}
LA33: ;
{
NIM_BOOL LOC37;
LOC37 = (NIM_BOOL)0;
LOC37 = (((*sym0).flags &(1U<<((NU)(((Tsymflag292184) 9))&31U)))!=0);
if (!(LOC37)) goto LA38;
LOC37 = ((Cc_273413_2528170400[(ccompiler_273431_2528170400)- 1].Field20 &(1U<<((NU)(((Tinfoccprop273004) 7))&7U)))!=0);
LA38: ;
if (!LOC37) goto LA39;
add_178487_2381377266(&header0, ((NimStringDesc*) &T839829468_247));
}
LA39: ;
{
NIM_BOOL LOC43;
LOC43 = (NIM_BOOL)0;
LOC43 = (((*sym0).flags &(1U<<((NU)(((Tsymflag292184) 14))&31U)))!=0);
if (!(LOC43)) goto LA44;
LOC43 = ((Cc_273413_2528170400[(ccompiler_273431_2528170400)- 1].Field20 &(1U<<((NU)(((Tinfoccprop273004) 7))&7U)))!=0);
LA44: ;
if (!LOC43) goto LA45;
add_178487_2381377266(&header0, ((NimStringDesc*) &T839829468_248));
}
LA45: ;
memset((void*)LOC47, 0, sizeof(LOC47));
LOC47[0] = header0;
LOC48 = (Ropeobj178006*)0;
LOC48 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_191), LOC47, 1);
add_178482_2381377266(&(*m0).s[(((Tcfilesection529005) 7))- 0], LOC48);
}
goto LA5;
LA21: ;
LA5: ;
}BeforeRet: ;
}
static N_INLINE(NIM_BOOL, usesnativegc_169177_2607990831)(void) {
NIM_BOOL result0;
result0 = (NIM_BOOL)0;
result0 = (((Tgcmode169080) 5) <= gselectedgc_169133_2607990831);
return result0;
}
N_NIMCALL(void, genrefassign_538311_839829468)(Tcproc529021* p0, Tloc292816 dest0, Tloc292816 src0, Tassignmentflag538302Set flags0) {
{
NIM_BOOL LOC3;
NIM_BOOL LOC5;
TY532811 LOC8;
LOC3 = (NIM_BOOL)0;
LOC3 = (dest0.s == ((Tstorageloc292812) 2));
if (LOC3) goto LA4;
LOC5 = (NIM_BOOL)0;
LOC5 = usesnativegc_169177_2607990831();
LOC3 = !(LOC5);
LA4: ;
if (!LOC3) goto LA6;
memset((void*)LOC8, 0, sizeof(LOC8));
LOC8[0] = rdloc_538188_839829468(dest0);
LOC8[1] = rdloc_538188_839829468(src0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_123), LOC8, 2);
}
goto LA1;
LA6: ;
{
if (!(dest0.s == ((Tstorageloc292812) 3))) goto LA10;
{
NIM_BOOL LOC14;
TY532811 LOC17;
LOC14 = (NIM_BOOL)0;
LOC14 = canformacycle_320123_3876443242(dest0.t);
if (!LOC14) goto LA15;
memset((void*)LOC17, 0, sizeof(LOC17));
LOC17[0] = addrloc_538204_839829468(dest0);
LOC17[1] = rdloc_538188_839829468(src0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_249), LOC17, 2);
}
goto LA12;
LA15: ;
{
TY532811 LOC19;
memset((void*)LOC19, 0, sizeof(LOC19));
LOC19[0] = addrloc_538204_839829468(dest0);
LOC19[1] = rdloc_538188_839829468(src0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_250), LOC19, 2);
}
LA12: ;
}
goto LA1;
LA10: ;
{
TY532811 LOC21;
memset((void*)LOC21, 0, sizeof(LOC21));
LOC21[0] = addrloc_538204_839829468(dest0);
LOC21[1] = rdloc_538188_839829468(src0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_251), LOC21, 2);
}
LA1: ;
}
N_NIMCALL(void, optasgnloc_549788_839829468)(Tloc292816 a0, Ttype292840* t0, Ropeobj178006* field0, Tloc292816* Result) {
Ropeobj178006* LOC1;
Ropeobj178006* LOC2;
(*Result).k = ((Tlockind292808) 5);
(*Result).s = a0.s;
unsureAsgnRef((void**) (&(*Result).t), t0);
LOC1 = (Ropeobj178006*)0;
LOC1 = rdloc_538188_839829468(a0);
LOC2 = (Ropeobj178006*)0;
LOC2 = HEX26_178447_2381377266(LOC1, ((NimStringDesc*) &T839829468_257));
unsureAsgnRef((void**) (&(*Result).r), HEX26_178418_2381377266(LOC2, field0));
}
N_NIMCALL(void, genoptasgntuple_550001_839829468)(Tcproc529021* p0, Tloc292816 dest0, Tloc292816 src0, Tassignmentflag538302Set flags0) {
Tassignmentflag538302Set newflags0;
Ttype292840* t_550053_839829468;
Ttype292840* LOC9;
{
if (!(src0.s == ((Tstorageloc292812) 1))) goto LA3;
newflags0 = (flags0 | 1);
}
goto LA1;
LA3: ;
{
if (!(((*dest0.t).flags &(1U<<((NU)(((Ttypeflag292431) 6))&31U)))!=0)) goto LA6;
newflags0 = (flags0 & ~ 1);
}
goto LA1;
LA6: ;
{
newflags0 = flags0;
}
LA1: ;
LOC9 = (Ttype292840*)0;
LOC9 = skiptypes_296099_850551059(dest0.t, IL64(211106232576256));
t_550053_839829468 = getuniquetype_528640_2036603609(LOC9);
{
NI i_550071_839829468;
NI HEX3Atmp_550077_839829468;
NI LOC11;
NI res_550080_839829468;
i_550071_839829468 = (NI)0;
HEX3Atmp_550077_839829468 = (NI)0;
LOC11 = (NI)0;
LOC11 = len_295339_850551059(t_550053_839829468);
HEX3Atmp_550077_839829468 = (LOC11 - 1);
res_550080_839829468 = ((NI) 0);
{
while (1) {
Ttype292840* t0;
Ropeobj178006* field0;
TY178507 LOC14;
Tloc292816 LOC15;
Tloc292816 LOC16;
if (!(res_550080_839829468 <= HEX3Atmp_550077_839829468)) goto LA13;
i_550071_839829468 = res_550080_839829468;
t0 = (*t_550053_839829468).sons->data[i_550071_839829468];
memset((void*)LOC14, 0, sizeof(LOC14));
LOC14[0] = rope_178401_2381377266(((NI64) (i_550071_839829468)));
field0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_260), LOC14, 1);
memset((void*)(&LOC15), 0, sizeof(LOC15));
optasgnloc_549788_839829468(dest0, t0, field0, (&LOC15));
memset((void*)(&LOC16), 0, sizeof(LOC16));
optasgnloc_549788_839829468(src0, t0, field0, (&LOC16));
genassignment_539264_839829468(p0, LOC15, LOC16, newflags0);
res_550080_839829468 += ((NI) 1);
} LA13: ;
}
}
}
N_NIMCALL(void, gengenericasgn_550167_839829468)(Tcproc529021* p0, Tloc292816 dest0, Tloc292816 src0, Tassignmentflag538302Set flags0) {
{
NIM_BOOL LOC3;
Ttype292840* LOC5;
LOC3 = (NIM_BOOL)0;
LOC3 = !(((flags0 &(1U<<((NU)(((Tassignmentflag538302) 0))&7U)))!=0));
if (LOC3) goto LA4;
LOC5 = (Ttype292840*)0;
LOC5 = skiptypes_296099_850551059(dest0.t, IL64(211106242013440));
LOC3 = (((*LOC5).flags &(1U<<((NU)(((Ttypeflag292431) 6))&31U)))!=0);
LA4: ;
if (!LOC3) goto LA6;
{
NIM_BOOL LOC10;
NIM_BOOL LOC12;
TY535238 LOC15;
LOC10 = (NIM_BOOL)0;
LOC10 = (dest0.s == ((Tstorageloc292812) 2));
if (LOC10) goto LA11;
LOC12 = (NIM_BOOL)0;
LOC12 = usesnativegc_169177_2607990831();
LOC10 = !(LOC12);
LA11: ;
if (!LOC10) goto LA13;
usestringh_532345_839829468((*p0).module);
memset((void*)LOC15, 0, sizeof(LOC15));
LOC15[0] = addrloc_538204_839829468(dest0);
LOC15[1] = addrloc_538204_839829468(src0);
LOC15[2] = rdloc_538188_839829468(dest0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_261), LOC15, 3);
}
goto LA8;
LA13: ;
{
TY535238 LOC17;
memset((void*)LOC17, 0, sizeof(LOC17));
LOC17[0] = addrloc_538204_839829468(dest0);
LOC17[1] = addrloc_538204_839829468(src0);
LOC17[2] = gentypeinfo_535941_839829468((*p0).module, dest0.t);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_262), LOC17, 3);
}
LA8: ;
}
goto LA1;
LA6: ;
{
TY535238 LOC19;
memset((void*)LOC19, 0, sizeof(LOC19));
LOC19[0] = addrloc_538204_839829468(dest0);
LOC19[1] = addrloc_538204_839829468(src0);
LOC19[2] = gentypeinfo_535941_839829468((*p0).module, dest0.t);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_263), LOC19, 3);
}
LA1: ;
}
N_NIMCALL(NI, asgncomplexity_549750_839829468)(Tnode292802* n0) {
NI result0;
result0 = (NI)0;
{
if (!!((n0 == NIM_NIL))) goto LA3;
switch ((*n0).kind) {
case ((Tnodekind292020) 3):
{
result0 = ((NI) 1);
}
break;
case ((Tnodekind292020) 139):
{
result0 = ((NI) 100);
}
break;
case ((Tnodekind292020) 138):
{
{
Tnode292802* t_549767_839829468;
t_549767_839829468 = (Tnode292802*)0;
{
NI i_549781_839829468;
NI HEX3Atmp_549783_839829468;
NI LOC10;
NI res_549785_839829468;
i_549781_839829468 = (NI)0;
HEX3Atmp_549783_839829468 = (NI)0;
LOC10 = (NI)0;
LOC10 = len_293081_850551059(n0);
HEX3Atmp_549783_839829468 = (LOC10 - 1);
res_549785_839829468 = ((NI) 0);
{
while (1) {
NI LOC13;
if (!(res_549785_839829468 <= HEX3Atmp_549783_839829468)) goto LA12;
i_549781_839829468 = res_549785_839829468;
t_549767_839829468 = (*n0).kindU.S6.sons->data[i_549781_839829468];
LOC13 = (NI)0;
LOC13 = asgncomplexity_549750_839829468(t_549767_839829468);
result0 += LOC13;
res_549785_839829468 += ((NI) 1);
} LA12: ;
}
}
}
}
break;
default:
{
}
break;
}
}
LA3: ;
return result0;
}
N_NIMCALL(void, genoptasgnobject_550084_839829468)(Tcproc529021* p0, Tloc292816 dest0, Tloc292816 src0, Tassignmentflag538302Set flags0, Tnode292802* t0) {
Tassignmentflag538302Set newflags0;
{ {
if (!(t0 == NIM_NIL)) goto LA3;
goto BeforeRet;
}
LA3: ;
{
if (!(src0.s == ((Tstorageloc292812) 1))) goto LA7;
newflags0 = (flags0 | 1);
}
goto LA5;
LA7: ;
{
if (!(((*dest0.t).flags &(1U<<((NU)(((Ttypeflag292431) 6))&31U)))!=0)) goto LA10;
newflags0 = (flags0 & ~ 1);
}
goto LA5;
LA10: ;
{
newflags0 = flags0;
}
LA5: ;
switch ((*t0).kind) {
case ((Tnodekind292020) 3):
{
Tsym292834* field0;
Tloc292816 LOC14;
Tloc292816 LOC15;
field0 = (*t0).kindU.S4.sym;
memset((void*)(&LOC14), 0, sizeof(LOC14));
optasgnloc_549788_839829468(dest0, (*field0).typ, (*field0).loc.r, (&LOC14));
memset((void*)(&LOC15), 0, sizeof(LOC15));
optasgnloc_549788_839829468(src0, (*field0).typ, (*field0).loc.r, (&LOC15));
genassignment_539264_839829468(p0, LOC14, LOC15, newflags0);
}
break;
case ((Tnodekind292020) 138):
{
{
Tnode292802* child_550155_839829468;
child_550155_839829468 = (Tnode292802*)0;
{
NI i_550160_839829468;
NI HEX3Atmp_550162_839829468;
NI LOC19;
NI res_550164_839829468;
i_550160_839829468 = (NI)0;
HEX3Atmp_550162_839829468 = (NI)0;
LOC19 = (NI)0;
LOC19 = len_293081_850551059(t0);
HEX3Atmp_550162_839829468 = (LOC19 - 1);
res_550164_839829468 = ((NI) 0);
{
while (1) {
if (!(res_550164_839829468 <= HEX3Atmp_550162_839829468)) goto LA21;
i_550160_839829468 = res_550164_839829468;
child_550155_839829468 = (*t0).kindU.S6.sons->data[i_550160_839829468];
genoptasgnobject_550084_839829468(p0, dest0, src0, newflags0, child_550155_839829468);
res_550164_839829468 += ((NI) 1);
} LA21: ;
}
}
}
}
break;
default:
{
}
break;
}
}BeforeRet: ;
}
N_NIMCALL(void, genassignment_539264_839829468)(Tcproc529021* p0, Tloc292816 dest0, Tloc292816 src0, Tassignmentflag538302Set flags0) {
Ttype292840* ty0;
{ {
NIM_BOOL LOC3;
TY532811 LOC7;
LOC3 = (NIM_BOOL)0;
LOC3 = !((src0.t == NIM_NIL));
if (!(LOC3)) goto LA4;
LOC3 = ((*src0.t).kind == ((Ttypekind292244) 21));
LA4: ;
if (!LOC3) goto LA5;
memset((void*)LOC7, 0, sizeof(LOC7));
LOC7[0] = rdloc_538188_839829468(dest0);
LOC7[1] = rdloc_538188_839829468(src0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_123), LOC7, 2);
goto BeforeRet;
}
LA5: ;
ty0 = skiptypes_296099_850551059(dest0.t, IL64(211106233624832));
switch ((*ty0).kind) {
case ((Ttypekind292244) 22):
{
genrefassign_538311_839829468(p0, dest0, src0, flags0);
}
break;
case ((Ttypekind292244) 24):
{
{
NIM_BOOL LOC12;
LOC12 = (NIM_BOOL)0;
LOC12 = !(((flags0 &(1U<<((NU)(((Tassignmentflag538302) 0))&7U)))!=0));
if (!(LOC12)) goto LA13;
LOC12 = !((src0.s == ((Tstorageloc292812) 1)));
LA13: ;
if (!LOC12) goto LA14;
genrefassign_538311_839829468(p0, dest0, src0, flags0);
}
goto LA10;
LA14: ;
{
TY535238 LOC17;
memset((void*)LOC17, 0, sizeof(LOC17));
LOC17[0] = addrloc_538204_839829468(dest0);
LOC17[1] = rdloc_538188_839829468(src0);
LOC17[2] = gentypeinfo_535941_839829468((*p0).module, dest0.t);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_252), LOC17, 3);
}
LA10: ;
}
break;
case ((Ttypekind292244) 28):
{
{
NIM_BOOL LOC21;
LOC21 = (NIM_BOOL)0;
LOC21 = !(((flags0 &(1U<<((NU)(((Tassignmentflag538302) 0))&7U)))!=0));
if (!(LOC21)) goto LA22;
LOC21 = !((src0.s == ((Tstorageloc292812) 1)));
LA22: ;
if (!LOC21) goto LA23;
genrefassign_538311_839829468(p0, dest0, src0, flags0);
}
goto LA19;
LA23: ;
{
{
NIM_BOOL LOC28;
NIM_BOOL LOC30;
TY532811 LOC33;
LOC28 = (NIM_BOOL)0;
LOC28 = (dest0.s == ((Tstorageloc292812) 2));
if (LOC28) goto LA29;
LOC30 = (NIM_BOOL)0;
LOC30 = usesnativegc_169177_2607990831();
LOC28 = !(LOC30);
LA29: ;
if (!LOC28) goto LA31;
memset((void*)LOC33, 0, sizeof(LOC33));
LOC33[0] = rdloc_538188_839829468(dest0);
LOC33[1] = rdloc_538188_839829468(src0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_253), LOC33, 2);
}
goto LA26;
LA31: ;
{
Tloc292816 tmp0;
TY535238 LOC37;
TY178507 LOC38;
if (!(dest0.s == ((Tstorageloc292812) 3))) goto LA35;
memset((void*)(&tmp0), 0, sizeof(tmp0));
gettemp_537032_839829468(p0, ty0, (&tmp0), NIM_FALSE);
memset((void*)LOC37, 0, sizeof(LOC37));
LOC37[0] = rdloc_538188_839829468(dest0);
LOC37[1] = rdloc_538188_839829468(src0);
LOC37[2] = rdloc_538188_839829468(tmp0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_254), LOC37, 3);
memset((void*)LOC38, 0, sizeof(LOC38));
LOC38[0] = rdloc_538188_839829468(tmp0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_255), LOC38, 1);
}
goto LA26;
LA35: ;
{
TY532811 LOC40;
memset((void*)LOC40, 0, sizeof(LOC40));
LOC40[0] = addrloc_538204_839829468(dest0);
LOC40[1] = rdloc_538188_839829468(src0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_256), LOC40, 2);
}
LA26: ;
}
LA19: ;
}
break;
case ((Ttypekind292244) 25):
{
{
NIM_BOOL LOC44;
Tloc292816 a0;
Ropeobj178006* LOC47;
Tloc292816 LOC48;
Tloc292816 b0;
Ropeobj178006* LOC49;
Tloc292816 LOC50;
TY532811 LOC51;
LOC44 = (NIM_BOOL)0;
LOC44 = needscomplexassignment_533509_839829468(dest0.t);
if (!LOC44) goto LA45;
memset((void*)(&a0), 0, sizeof(a0));
LOC47 = (Ropeobj178006*)0;
LOC47 = rope_178277_2381377266(((NimStringDesc*) &T839829468_258));
memset((void*)(&LOC48), 0, sizeof(LOC48));
optasgnloc_549788_839829468(dest0, dest0.t, LOC47, (&LOC48));
memcpy((void*)(&a0), (NIM_CONST void*)(&LOC48), sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
LOC49 = (Ropeobj178006*)0;
LOC49 = rope_178277_2381377266(((NimStringDesc*) &T839829468_258));
memset((void*)(&LOC50), 0, sizeof(LOC50));
optasgnloc_549788_839829468(src0, dest0.t, LOC49, (&LOC50));
memcpy((void*)(&b0), (NIM_CONST void*)(&LOC50), sizeof(b0));
genrefassign_538311_839829468(p0, a0, b0, flags0);
memset((void*)LOC51, 0, sizeof(LOC51));
LOC51[0] = rdloc_538188_839829468(dest0);
LOC51[1] = rdloc_538188_839829468(src0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_259), LOC51, 2);
}
goto LA42;
LA45: ;
{
TY532811 LOC53;
memset((void*)LOC53, 0, sizeof(LOC53));
LOC53[0] = rdloc_538188_839829468(dest0);
LOC53[1] = rdloc_538188_839829468(src0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_123), LOC53, 2);
}
LA42: ;
}
break;
case ((Ttypekind292244) 18):
{
{
NIM_BOOL LOC57;
LOC57 = (NIM_BOOL)0;
LOC57 = needscomplexassignment_533509_839829468(dest0.t);
if (!LOC57) goto LA58;
{
NI LOC62;
LOC62 = (NI)0;
LOC62 = len_295339_850551059(dest0.t);
if (!(LOC62 <= ((NI) 4))) goto LA63;
genoptasgntuple_550001_839829468(p0, dest0, src0, flags0);
}
goto LA60;
LA63: ;
{
gengenericasgn_550167_839829468(p0, dest0, src0, flags0);
}
LA60: ;
}
goto LA55;
LA58: ;
{
TY532811 LOC67;
memset((void*)LOC67, 0, sizeof(LOC67));
LOC67[0] = rdloc_538188_839829468(dest0);
LOC67[1] = rdloc_538188_839829468(src0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_123), LOC67, 2);
}
LA55: ;
}
break;
case ((Ttypekind292244) 17):
{
{
NIM_BOOL LOC71;
TY532811 LOC74;
LOC71 = (NIM_BOOL)0;
LOC71 = isimportedcpptype_533476_839829468(ty0);
if (!LOC71) goto LA72;
memset((void*)LOC74, 0, sizeof(LOC74));
LOC74[0] = rdloc_538188_839829468(dest0);
LOC74[1] = rdloc_538188_839829468(src0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_123), LOC74, 2);
}
goto LA69;
LA72: ;
{
NIM_BOOL LOC76;
LOC76 = (NIM_BOOL)0;
LOC76 = isobjlackingtypefield_533513_839829468(ty0);
if (!!(LOC76)) goto LA77;
gengenericasgn_550167_839829468(p0, dest0, src0, flags0);
}
goto LA69;
LA77: ;
{
NIM_BOOL LOC80;
LOC80 = (NIM_BOOL)0;
LOC80 = needscomplexassignment_533509_839829468(ty0);
if (!LOC80) goto LA81;
{
NIM_BOOL LOC85;
NI LOC87;
Ropeobj178006* LOC90;
LOC85 = (NIM_BOOL)0;
LOC85 = (*ty0).sons->data[((NI) 0)] == 0;
if (!(LOC85)) goto LA86;
LOC87 = (NI)0;
LOC87 = asgncomplexity_549750_839829468((*ty0).n);
LOC85 = (LOC87 <= ((NI) 4));
LA86: ;
if (!LOC85) goto LA88;
LOC90 = (Ropeobj178006*)0;
LOC90 = gettypedesc_535671_839829468((*p0).module, ty0);
ty0 = getuniquetype_528640_2036603609(ty0);
{
NimStringDesc* LOC95;
if (!!(!(((*ty0).n == NIM_NIL)))) goto LA93;
LOC95 = (NimStringDesc*)0;
LOC95 = HEX24_196185_1689653243(T839829468_264);
internalerror_196113_155036129(LOC95);
}
LA93: ;
genoptasgnobject_550084_839829468(p0, dest0, src0, flags0, (*ty0).n);
}
goto LA83;
LA88: ;
{
gengenericasgn_550167_839829468(p0, dest0, src0, flags0);
}
LA83: ;
}
goto LA69;
LA81: ;
{
TY532811 LOC98;
memset((void*)LOC98, 0, sizeof(LOC98));
LOC98[0] = rdloc_538188_839829468(dest0);
LOC98[1] = rdloc_538188_839829468(src0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_123), LOC98, 2);
}
LA69: ;
}
break;
case ((Ttypekind292244) 16):
case ((Ttypekind292244) 4):
{
{
NIM_BOOL LOC102;
LOC102 = (NIM_BOOL)0;
LOC102 = needscomplexassignment_533509_839829468(dest0.t);
if (!LOC102) goto LA103;
gengenericasgn_550167_839829468(p0, dest0, src0, flags0);
}
goto LA100;
LA103: ;
{
TY535238 LOC106;
usestringh_532345_839829468((*p0).module);
memset((void*)LOC106, 0, sizeof(LOC106));
LOC106[0] = rdloc_538188_839829468(dest0);
LOC106[1] = rdloc_538188_839829468(src0);
LOC106[2] = gettypedesc_535671_839829468((*p0).module, ty0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_261), LOC106, 3);
}
LA100: ;
}
break;
case ((Ttypekind292244) 27):
case ((Ttypekind292244) 48):
{
{
NIM_BOOL LOC110;
TY535238 LOC113;
LOC110 = (NIM_BOOL)0;
LOC110 = needscomplexassignment_533509_839829468(dest0.t);
if (!LOC110) goto LA111;
memset((void*)LOC113, 0, sizeof(LOC113));
LOC113[0] = addrloc_538204_839829468(dest0);
LOC113[1] = addrloc_538204_839829468(src0);
LOC113[2] = gentypeinfo_535941_839829468((*p0).module, dest0.t);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_266), LOC113, 3);
}
goto LA108;
LA111: ;
{
TY532811 LOC115;
usestringh_532345_839829468((*p0).module);
memset((void*)LOC115, 0, sizeof(LOC115));
LOC115[0] = rdloc_538188_839829468(dest0);
LOC115[1] = rdloc_538188_839829468(src0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_267), LOC115, 2);
}
LA108: ;
}
break;
case ((Ttypekind292244) 19):
{
{
Tctypekind529007 LOC119;
TY535238 LOC122;
NI64 LOC123;
LOC119 = (Tctypekind529007)0;
LOC119 = maptype_533393_839829468(ty0);
if (!(LOC119 == ((Tctypekind529007) 17))) goto LA120;
usestringh_532345_839829468((*p0).module);
memset((void*)LOC122, 0, sizeof(LOC122));
LOC122[0] = rdloc_538188_839829468(dest0);
LOC122[1] = rdloc_538188_839829468(src0);
LOC123 = (NI64)0;
LOC123 = getsize_320135_3876443242(dest0.t);
LOC122[2] = rope_178401_2381377266(LOC123);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_268), LOC122, 3);
}
goto LA117;
LA120: ;
{
TY532811 LOC125;
memset((void*)LOC125, 0, sizeof(LOC125));
LOC125[0] = rdloc_538188_839829468(dest0);
LOC125[1] = rdloc_538188_839829468(src0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_123), LOC125, 2);
}
LA117: ;
}
break;
case ((Ttypekind292244) 21):
case ((Ttypekind292244) 26):
case ((Ttypekind292244) 2):
case ((Ttypekind292244) 1):
case ((Ttypekind292244) 14):
case ((Ttypekind292244) 29):
case ((Ttypekind292244) 31) ... ((Ttypekind292244) 44):
case ((Ttypekind292244) 20):
case ((Ttypekind292244) 23):
{
TY532811 LOC127;
memset((void*)LOC127, 0, sizeof(LOC127));
LOC127[0] = rdloc_538188_839829468(dest0);
LOC127[1] = rdloc_538188_839829468(src0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_123), LOC127, 2);
}
break;
default:
{
NimStringDesc* LOC129;
LOC129 = (NimStringDesc*)0;
LOC129 = rawNewString(reprEnum((NI)(*ty0).kind, (&NTI292244))->Sup.len + 15);
appendString(LOC129, ((NimStringDesc*) &T839829468_269));
appendString(LOC129, reprEnum((NI)(*ty0).kind, (&NTI292244)));
internalerror_196113_155036129(LOC129);
}
break;
}
}BeforeRet: ;
}
N_NIMCALL(void, putlocintodest_539258_839829468)(Tcproc529021* p0, Tloc292816* d0, Tloc292816 s0) {
{
if (!!(((*d0).k == ((Tlockind292808) 0)))) goto LA3;
{
if (!(((*d0).flags &(1U<<((NU)(((Tlocflag292810) 2))&15U)))!=0)) goto LA7;
genassignment_539264_839829468(p0, (*d0), s0, 0);
}
goto LA5;
LA7: ;
{
genassignment_539264_839829468(p0, (*d0), s0, 1);
}
LA5: ;
}
goto LA1;
LA3: ;
{
genericAssign((void*)(&(*d0)), (void*)(&s0), (&NTI292816));
}
LA1: ;
}
N_NIMCALL(NIM_BOOL, issimpleconst_532311_839829468)(Ttype292840* typ0) {
NIM_BOOL result0;
Ttype292840* t0;
NIM_BOOL LOC1;
NIM_BOOL LOC3;
result0 = (NIM_BOOL)0;
t0 = skiptypes_296099_850551059(typ0, IL64(211106240964864));
LOC1 = (NIM_BOOL)0;
LOC1 = !(((*t0).kind == ((Ttypekind292244) 18) || (*t0).kind == ((Ttypekind292244) 17) || (*t0).kind == ((Ttypekind292244) 16) || (*t0).kind == ((Ttypekind292244) 4) || (*t0).kind == ((Ttypekind292244) 19) || (*t0).kind == ((Ttypekind292244) 24)));
if (!(LOC1)) goto LA2;
LOC3 = (NIM_BOOL)0;
LOC3 = ((*t0).kind == ((Ttypekind292244) 25));
if (!(LOC3)) goto LA4;
LOC3 = ((*t0).callconv == ((Tcallingconvention292002) 8));
LA4: ;
LOC1 = !(LOC3);
LA2: ;
result0 = LOC1;
return result0;
}
N_NIMCALL(void, putintodest_550468_839829468)(Tcproc529021* p0, Tloc292816* d0, Ttype292840* t0, Ropeobj178006* r0, Tstorageloc292812 s0) {
Tloc292816 a0;
memset((void*)(&a0), 0, sizeof(a0));
{
if (!!(((*d0).k == ((Tlockind292808) 0)))) goto LA3;
initloc_532273_839829468((&a0), ((Tlockind292808) 6), t0, s0);
a0.r = r0;
{
if (!(((*d0).flags &(1U<<((NU)(((Tlocflag292810) 2))&15U)))!=0)) goto LA7;
genassignment_539264_839829468(p0, (*d0), a0, 0);
}
goto LA5;
LA7: ;
{
genassignment_539264_839829468(p0, (*d0), a0, 1);
}
LA5: ;
}
goto LA1;
LA3: ;
{
(*d0).k = ((Tlockind292808) 6);
unsureAsgnRef((void**) (&(*d0).t), t0);
unsureAsgnRef((void**) (&(*d0).r), r0);
}
LA1: ;
}
N_NIMCALL(NI64, bitsettoword_549578_839829468)(Tbitset339004* s0, NI size0) {
NI64 result0;
result0 = (NI64)0;
result0 = IL64(0);
{
NI j_549612_839829468;
NI HEX3Atmp_549622_839829468;
NI res_549625_839829468;
j_549612_839829468 = (NI)0;
HEX3Atmp_549622_839829468 = (NI)0;
HEX3Atmp_549622_839829468 = (NI)(size0 - ((NI) 1));
res_549625_839829468 = ((NI) 0);
{
while (1) {
if (!(res_549625_839829468 <= HEX3Atmp_549622_839829468)) goto LA3;
j_549612_839829468 = res_549625_839829468;
{
if (!(j_549612_839829468 < (s0 ? s0->Sup.len : 0))) goto LA6;
result0 = (NI64)(result0 | (NI64)((NU64)(((NI64)(NU64)(NU8)(s0->data[j_549612_839829468]))) << (NU64)(((NI64) ((NI)(j_549612_839829468 * ((NI) 8)))))));
}
LA6: ;
res_549625_839829468 += ((NI) 1);
} LA3: ;
}
}
return result0;
}
N_NIMCALL(Ropeobj178006*, genrawsetdata_549629_839829468)(Tbitset339004* cs0, NI size0) {
Ropeobj178006* result0;
NimStringDesc* frmt0;
result0 = (Ropeobj178006*)0;
frmt0 = (NimStringDesc*)0;
{
TY533289 LOC5;
if (!(((NI) 8) < size0)) goto LA3;
memset((void*)LOC5, 0, sizeof(LOC5));
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_273), LOC5, 0);
{
NI i_549649_839829468;
NI HEX3Atmp_549657_839829468;
NI res_549660_839829468;
i_549649_839829468 = (NI)0;
HEX3Atmp_549657_839829468 = (NI)0;
HEX3Atmp_549657_839829468 = (NI)(size0 - ((NI) 1));
res_549660_839829468 = ((NI) 0);
{
while (1) {
TY178507 LOC19;
NimStringDesc* LOC20;
if (!(res_549660_839829468 <= HEX3Atmp_549657_839829468)) goto LA8;
i_549649_839829468 = res_549660_839829468;
{
if (!(i_549649_839829468 < (NI)(size0 - ((NI) 1)))) goto LA11;
{
if (!(((NI) ((NI)((NI)(i_549649_839829468 + ((NI) 1)) % ((NI) 8)))) == ((NI) 0))) goto LA15;
frmt0 = copyString(((NimStringDesc*) &T839829468_274));
}
goto LA13;
LA15: ;
{
frmt0 = copyString(((NimStringDesc*) &T839829468_275));
}
LA13: ;
}
goto LA9;
LA11: ;
{
frmt0 = copyString(((NimStringDesc*) &T839829468_276));
}
LA9: ;
memset((void*)LOC19, 0, sizeof(LOC19));
LOC20 = (NimStringDesc*)0;
LOC20 = nsuToHex(((NI64)(NU64)(NU8)(cs0->data[i_549649_839829468])), ((NI) 2));
LOC19[0] = rope_178277_2381377266(LOC20);
addf_179205_2381377266(&result0, frmt0, LOC19, 1);
res_549660_839829468 += ((NI) 1);
} LA8: ;
}
}
}
goto LA1;
LA3: ;
{
NI64 LOC22;
LOC22 = (NI64)0;
LOC22 = bitsettoword_549578_839829468(cs0, size0);
result0 = intliteral_539270_839829468(LOC22);
}
LA1: ;
return result0;
}
N_NIMCALL(void, appcg_532640_839829468)(Tcgen529027* m0, Tcfilesection529005 s0, NimStringDesc* frmt0, Ropeobj178006** args0, NI args0Len0) {
Ropeobj178006* LOC1;
LOC1 = (Ropeobj178006*)0;
LOC1 = ropecg_532407_839829468(m0, frmt0, args0, args0Len0);
add_178482_2381377266(&(*m0).s[(s0)- 0], LOC1);
}
N_NIMCALL(Ropeobj178006*, genconstseq_559371_839829468)(Tcproc529021* p0, Tnode292802* n0, Ttype292840* t0) {
Ropeobj178006* result0;
Ropeobj178006* data0;
TY178507 LOC1;
NI LOC2;
TY535235 LOC18;
NI LOC19;
TY532811 LOC20;
result0 = (Ropeobj178006*)0;
memset((void*)LOC1, 0, sizeof(LOC1));
LOC2 = (NI)0;
LOC2 = len_293081_850551059(n0);
LOC1[0] = rope_178401_2381377266(((NI64) (LOC2)));
data0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_277), LOC1, 1);
{
NI LOC5;
LOC5 = (NI)0;
LOC5 = len_293081_850551059(n0);
if (!(((NI) 0) < LOC5)) goto LA6;
add_178487_2381377266(&data0, ((NimStringDesc*) &T839829468_278));
{
NI i_559395_839829468;
NI HEX3Atmp_559411_839829468;
NI LOC9;
NI res_559414_839829468;
i_559395_839829468 = (NI)0;
HEX3Atmp_559411_839829468 = (NI)0;
LOC9 = (NI)0;
LOC9 = len_293081_850551059(n0);
HEX3Atmp_559411_839829468 = (NI)(LOC9 - ((NI) 1));
res_559414_839829468 = ((NI) 0);
{
while (1) {
Ropeobj178006* LOC17;
if (!(res_559414_839829468 <= HEX3Atmp_559411_839829468)) goto LA11;
i_559395_839829468 = res_559414_839829468;
{
TY533289 LOC16;
if (!(((NI) 0) < i_559395_839829468)) goto LA14;
memset((void*)LOC16, 0, sizeof(LOC16));
addf_179205_2381377266(&data0, ((NimStringDesc*) &T839829468_279), LOC16, 0);
}
LA14: ;
LOC17 = (Ropeobj178006*)0;
LOC17 = genconstexpr_554849_839829468(p0, (*n0).kindU.S6.sons->data[i_559395_839829468]);
add_178482_2381377266(&data0, LOC17);
res_559414_839829468 += ((NI) 1);
} LA11: ;
}
}
add_178487_2381377266(&data0, ((NimStringDesc*) &T839829468_280));
}
LA6: ;
add_178487_2381377266(&data0, ((NimStringDesc*) &T839829468_280));
result0 = gettempname_533596_839829468((*p0).module);
memset((void*)LOC18, 0, sizeof(LOC18));
LOC18[0] = gettypedesc_535671_839829468((*p0).module, (*t0).sons->data[((NI) 0)]);
LOC19 = (NI)0;
LOC19 = len_293081_850551059(n0);
LOC18[1] = rope_178401_2381377266(((NI64) (LOC19)));
LOC18[2] = result0;
LOC18[3] = data0;
appcg_532640_839829468((*p0).module, ((Tcfilesection529005) 8), ((NimStringDesc*) &T839829468_281), LOC18, 4);
memset((void*)LOC20, 0, sizeof(LOC20));
LOC20[0] = gettypedesc_535671_839829468((*p0).module, t0);
LOC20[1] = result0;
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_282), LOC20, 2);
return result0;
}
N_NIMCALL(Ropeobj178006*, gennamedconstexpr_559284_839829468)(Tcproc529021* p0, Tnode292802* n0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
{
if (!((*n0).kind == ((Tnodekind292020) 34))) goto LA3;
result0 = genconstexpr_554849_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 1)]);
}
goto LA1;
LA3: ;
{
result0 = genconstexpr_554849_839829468(p0, n0);
}
LA1: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, genconstsimplelist_559299_839829468)(Tcproc529021* p0, Tnode292802* n0) {
Ropeobj178006* result0;
NI length0;
TY533289 LOC10;
result0 = (Ropeobj178006*)0;
length0 = sonslen_295351_850551059(n0);
result0 = rope_178277_2381377266(((NimStringDesc*) &T839829468_223));
{
NI i_559333_839829468;
NI HEX3Atmp_559362_839829468;
NI HEX3Atmp_559363_839829468;
NI res_559366_839829468;
i_559333_839829468 = (NI)0;
HEX3Atmp_559362_839829468 = (NI)0;
HEX3Atmp_559363_839829468 = (NI)0;
HEX3Atmp_559362_839829468 = ((*n0).kind == ((Tnodekind292020) 38));
HEX3Atmp_559363_839829468 = (NI)(length0 - ((NI) 2));
res_559366_839829468 = ((NI) (HEX3Atmp_559362_839829468));
{
while (1) {
TY178507 LOC4;
if (!(res_559366_839829468 <= HEX3Atmp_559363_839829468)) goto LA3;
i_559333_839829468 = res_559366_839829468;
memset((void*)LOC4, 0, sizeof(LOC4));
LOC4[0] = gennamedconstexpr_559284_839829468(p0, (*n0).kindU.S6.sons->data[i_559333_839829468]);
addf_179205_2381377266(&result0, ((NimStringDesc*) &T839829468_283), LOC4, 1);
res_559366_839829468 += ((NI) 1);
} LA3: ;
}
}
{
Ropeobj178006* LOC9;
if (!(((NI) (((*n0).kind == ((Tnodekind292020) 38)))) < length0)) goto LA7;
LOC9 = (Ropeobj178006*)0;
LOC9 = gennamedconstexpr_559284_839829468(p0, (*n0).kindU.S6.sons->data[(NI)(length0 - ((NI) 1))]);
add_178482_2381377266(&result0, LOC9);
}
LA7: ;
memset((void*)LOC10, 0, sizeof(LOC10));
addf_179205_2381377266(&result0, ((NimStringDesc*) &T839829468_160), LOC10, 0);
return result0;
}
N_NIMCALL(Ropeobj178006*, genconstexpr_554849_839829468)(Tcproc529021* p0, Tnode292802* n0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
switch ((*n0).kind) {
case ((Tnodekind292020) 58):
case ((Tnodekind292020) 59):
{
result0 = genconstexpr_554849_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 1)]);
}
break;
case ((Tnodekind292020) 39):
{
Tbitset339004* cs0;
NI64 LOC3;
cs0 = (Tbitset339004*)0;
tobitset_340001_452470228(n0, (&cs0));
LOC3 = (NI64)0;
LOC3 = getsize_320135_3876443242((*n0).typ);
result0 = genrawsetdata_549629_839829468(cs0, ((NI) (LOC3)));
}
break;
case ((Tnodekind292020) 41):
case ((Tnodekind292020) 37):
case ((Tnodekind292020) 155):
case ((Tnodekind292020) 38):
{
Ttype292840* t0;
t0 = skiptypes_296099_850551059((*n0).typ, IL64(211106232576256));
{
if (!((*t0).kind == ((Ttypekind292244) 24))) goto LA7;
result0 = genconstseq_559371_839829468(p0, n0, t0);
}
goto LA5;
LA7: ;
{
result0 = genconstsimplelist_559299_839829468(p0, n0);
}
LA5: ;
}
break;
default:
{
Tloc292816 d0;
memset((void*)(&d0), 0, sizeof(d0));
initlocexpr_539283_839829468(p0, n0, (&d0));
result0 = rdloc_538188_839829468(d0);
}
break;
}
return result0;
}
N_NIMCALL(void, requestconstimpl_539240_839829468)(Tcproc529021* p0, Tsym292834* sym0) {
Tcgen529027* m0;
Tcgen529027* q0;
{ m0 = (*p0).module;
useheader_532369_839829468(m0, sym0);
{
Ropeobj178006* LOC5;
if (!((*sym0).loc.k == ((Tlockind292808) 0))) goto LA3;
LOC5 = (Ropeobj178006*)0;
LOC5 = manglename_533205_839829468(sym0);
fillloc_532282_839829468((&(*sym0).loc), ((Tlockind292808) 8), (*sym0).typ, LOC5, ((Tstorageloc292812) 1));
}
LA3: ;
{
if (!(((*sym0).loc.flags &(1U<<((NU)(((Tlocflag292810) 3))&15U)))!=0)) goto LA8;
goto BeforeRet;
}
LA8: ;
q0 = findpendingmodule_532241_839829468(m0, sym0);
{
NIM_BOOL LOC12;
NIM_BOOL LOC14;
TY535238 LOC17;
LOC12 = (NIM_BOOL)0;
LOC12 = !((q0 == NIM_NIL));
if (!(LOC12)) goto LA13;
LOC14 = (NIM_BOOL)0;
LOC14 = containsorincl_268862_2627731572((&(*q0).declaredthings), (*sym0).Sup.id);
LOC12 = !(LOC14);
LA13: ;
if (!LOC12) goto LA15;
memset((void*)LOC17, 0, sizeof(LOC17));
LOC17[0] = gettypedesc_535671_839829468(q0, (*sym0).typ);
LOC17[1] = (*sym0).loc.r;
LOC17[2] = genconstexpr_554849_839829468((*q0).initproc, (*sym0).ast);
addf_179205_2381377266(&(*q0).s[(((Tcfilesection529005) 8))- 0], ((NimStringDesc*) &T839829468_272), LOC17, 3);
}
LA15: ;
{
NIM_BOOL LOC20;
NIM_BOOL LOC22;
Ropeobj178006* headerdecl0;
TY532811 LOC25;
LOC20 = (NIM_BOOL)0;
LOC20 = !((q0 == m0));
if (!(LOC20)) goto LA21;
LOC22 = (NIM_BOOL)0;
LOC22 = containsorincl_268862_2627731572((&(*m0).declaredthings), (*sym0).Sup.id);
LOC20 = !(LOC22);
LA21: ;
if (!LOC20) goto LA23;
memset((void*)LOC25, 0, sizeof(LOC25));
LOC25[0] = gettypedesc_535671_839829468(m0, (*sym0).loc.t);
LOC25[1] = (*sym0).loc.r;
headerdecl0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_284), LOC25, 2);
add_178482_2381377266(&(*m0).s[(((Tcfilesection529005) 8))- 0], headerdecl0);
{
NIM_BOOL LOC28;
LOC28 = (NIM_BOOL)0;
LOC28 = (((*sym0).flags &(1U<<((NU)(((Tsymflag292184) 6))&31U)))!=0);
if (!(LOC28)) goto LA29;
LOC28 = !((generatedheader_532201_839829468 == NIM_NIL));
LA29: ;
if (!LOC28) goto LA30;
add_178482_2381377266(&(*generatedheader_532201_839829468).s[(((Tcfilesection529005) 8))- 0], headerdecl0);
}
LA30: ;
}
LA23: ;
}BeforeRet: ;
}
N_NIMCALL(void, gencomplexconst_558249_839829468)(Tcproc529021* p0, Tsym292834* sym0, Tloc292816* d0) {
requestconstimpl_539240_839829468(p0, sym0);
putlocintodest_539258_839829468(p0, d0, (*sym0).loc);
}
static N_INLINE(Ropeobj178006**, procsec_529194_3723162438)(Tcproc529021* p0, Tcprocsection529011 s0) {
Ropeobj178006** result0;
result0 = (Ropeobj178006**)0;
result0 = &(*p0).blocks->data[((NI) 0)].sections[(s0)- 0];
return result0;
}
N_NIMCALL(void, accessthreadlocalvar_532945_839829468)(Tcproc529021* p0, Tsym292834* s0) {
{
NIM_BOOL LOC3;
Ropeobj178006** LOC7;
TY533289 LOC8;
Ropeobj178006** LOC9;
TY533289 LOC10;
Ropeobj178006* LOC11;
LOC3 = (NIM_BOOL)0;
LOC3 = emulatedthreadvars_532949_839829468();
if (!(LOC3)) goto LA4;
LOC3 = !((*p0).threadvaraccessed);
LA4: ;
if (!LOC3) goto LA5;
(*p0).threadvaraccessed = NIM_TRUE;
(*(*p0).module).flags |= ((NU8)1)<<((((Codegenflag529025) 1))%(sizeof(NU8)*8));
LOC7 = (Ropeobj178006**)0;
LOC7 = procsec_529194_3723162438(p0, ((Tcprocsection529011) 0));
memset((void*)LOC8, 0, sizeof(LOC8));
addf_179205_2381377266(LOC7, ((NimStringDesc*) &T839829468_286), LOC8, 0);
LOC9 = (Ropeobj178006**)0;
LOC9 = procsec_529194_3723162438(p0, ((Tcprocsection529011) 1));
memset((void*)LOC10, 0, sizeof(LOC10));
LOC11 = (Ropeobj178006*)0;
LOC11 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_287), LOC10, 0);
add_178482_2381377266(LOC9, LOC11);
}
LA5: ;
}
static N_INLINE(NIM_BOOL, isemptytype_297440_850551059)(Ttype292840* t0) {
NIM_BOOL result0;
NIM_BOOL LOC1;
result0 = (NIM_BOOL)0;
LOC1 = (NIM_BOOL)0;
LOC1 = (t0 == NIM_NIL);
if (LOC1) goto LA2;
LOC1 = ((*t0).kind == ((Ttypekind292244) 62) || (*t0).kind == ((Ttypekind292244) 7));
LA2: ;
result0 = LOC1;
return result0;
}
N_NIMCALL(void, putdataintodest_550436_839829468)(Tcproc529021* p0, Tloc292816* d0, Ttype292840* t0, Ropeobj178006* r0) {
Tloc292816 a0;
memset((void*)(&a0), 0, sizeof(a0));
{
if (!!(((*d0).k == ((Tlockind292808) 0)))) goto LA3;
initloc_532273_839829468((&a0), ((Tlockind292808) 8), t0, ((Tstorageloc292812) 1));
a0.r = r0;
{
if (!(((*d0).flags &(1U<<((NU)(((Tlocflag292810) 2))&15U)))!=0)) goto LA7;
genassignment_539264_839829468(p0, (*d0), a0, 0);
}
goto LA5;
LA7: ;
{
genassignment_539264_839829468(p0, (*d0), a0, 1);
}
LA5: ;
}
goto LA1;
LA3: ;
{
(*d0).k = ((Tlockind292808) 8);
unsureAsgnRef((void**) (&(*d0).t), t0);
unsureAsgnRef((void**) (&(*d0).r), r0);
}
LA1: ;
}
N_NIMCALL(NIM_BOOL, freshlineinfo_532818_839829468)(Tcproc529021* p0, Tlineinfo191336 info0) {
NIM_BOOL result0;
result0 = (NIM_BOOL)0;
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = !(((*p0).lastlineinfo.line == info0.line));
if (LOC3) goto LA4;
LOC3 = !(((*p0).lastlineinfo.fileindex == info0.fileindex));
LA4: ;
if (!LOC3) goto LA5;
(*p0).lastlineinfo.line = info0.line;
(*p0).lastlineinfo.fileindex = info0.fileindex;
result0 = NIM_TRUE;
}
LA5: ;
return result0;
}
N_NIMCALL(void, genlinedir_532823_839829468)(Tcproc529021* p0, Tnode292802* t0) {
NI line0;
Ropeobj178006** LOC11;
NimStringDesc* LOC12;
line0 = safelinenm_532721_839829468((*t0).info);
{
Ropeobj178006** LOC5;
TY533289 LOC6;
Ropeobj178006* LOC7;
Ropeobj178006* LOC8;
Ropeobj178006* LOC9;
Ropeobj178006* LOC10;
if (!((gglobaloptions_169130_2607990831 &((NU64)1<<((NU)(((Tglobaloption169013) 28))&63U)))!=0)) goto LA3;
LOC5 = (Ropeobj178006**)0;
LOC5 = s_529179_3723162438(p0, ((Tcprocsection529011) 2));
memset((void*)LOC6, 0, sizeof(LOC6));
LOC7 = (Ropeobj178006*)0;
LOC7 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_293), LOC6, 0);
LOC8 = (Ropeobj178006*)0;
LOC8 = sourceline_192068_155036129((*t0).info);
LOC9 = (Ropeobj178006*)0;
LOC9 = HEX26_178418_2381377266(LOC7, LOC8);
LOC10 = (Ropeobj178006*)0;
LOC10 = HEX26_178418_2381377266(LOC9, rnl_178903_2381377266);
add_178482_2381377266(LOC5, LOC10);
}
LA3: ;
LOC11 = (Ropeobj178006**)0;
LOC11 = s_529179_3723162438(p0, ((Tcprocsection529011) 2));
LOC12 = (NimStringDesc*)0;
LOC12 = tofullpath_192264_155036129((*t0).info.fileindex);
genclinedir_532725_839829468(LOC11, LOC12, line0);
{
NIM_BOOL LOC15;
NIM_BOOL LOC17;
LOC15 = (NIM_BOOL)0;
LOC15 = ((163840 & (*p0).options) == 163840);
if (!(LOC15)) goto LA16;
LOC17 = (NIM_BOOL)0;
LOC17 = ((*p0).prc == NIM_NIL);
if (LOC17) goto LA18;
LOC17 = !((((*(*p0).prc).flags &(1U<<((NU)(((Tsymflag292184) 9))&31U)))!=0));
LA18: ;
LOC15 = LOC17;
LA16: ;
if (!LOC15) goto LA19;
{
NIM_BOOL LOC23;
TY532811 LOC26;
NimStringDesc* LOC27;
LOC23 = (NIM_BOOL)0;
LOC23 = freshlineinfo_532818_839829468(p0, (*t0).info);
if (!LOC23) goto LA24;
memset((void*)LOC26, 0, sizeof(LOC26));
LOC26[0] = rope_178401_2381377266(((NI64) (line0)));
LOC27 = (NimStringDesc*)0;
LOC27 = tofilename_192260_155036129((*t0).info.fileindex);
LOC26[1] = makecstring_191638_155036129(LOC27);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_294), LOC26, 2);
}
LA24: ;
}
goto LA13;
LA19: ;
{
NIM_BOOL LOC29;
NIM_BOOL LOC30;
NIM_BOOL LOC32;
LOC29 = (NIM_BOOL)0;
LOC30 = (NIM_BOOL)0;
LOC30 = ((98304 & (*p0).options) == 98304);
if (!(LOC30)) goto LA31;
LOC32 = (NIM_BOOL)0;
LOC32 = ((*p0).prc == NIM_NIL);
if (LOC32) goto LA33;
LOC32 = !((((*(*p0).prc).flags &(1U<<((NU)(((Tsymflag292184) 9))&31U)))!=0));
LA33: ;
LOC30 = LOC32;
LA31: ;
LOC29 = LOC30;
if (!(LOC29)) goto LA34;
LOC29 = (((NI32) 0) <= (*t0).info.fileindex);
LA34: ;
if (!LOC29) goto LA35;
{
NIM_BOOL LOC39;
TY532811 LOC42;
LOC39 = (NIM_BOOL)0;
LOC39 = freshlineinfo_532818_839829468(p0, (*t0).info);
if (!LOC39) goto LA40;
memset((void*)LOC42, 0, sizeof(LOC42));
LOC42[0] = rope_178401_2381377266(((NI64) (line0)));
LOC42[1] = quotedfilename_196818_155036129((*t0).info);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_295), LOC42, 2);
}
LA40: ;
}
goto LA13;
LA35: ;
LA13: ;
}
N_NIMCALL(Ropeobj178006*, getlabel_539217_839829468)(Tcproc529021* p0) {
Ropeobj178006* result0;
Ropeobj178006* LOC1;
result0 = (Ropeobj178006*)0;
(*p0).labels += ((NI) 1);
LOC1 = (Ropeobj178006*)0;
LOC1 = rope_178401_2381377266(((NI64) ((*p0).labels)));
result0 = HEX26_178452_2381377266(((NimStringDesc*) &T839829468_296), LOC1);
return result0;
}
N_NIMCALL(void, fixlabel_539230_839829468)(Tcproc529021* p0, Ropeobj178006* labl0) {
TY178507 LOC1;
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = labl0;
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_299), LOC1, 1);
}
N_NIMCALL(void, genandor_554311_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 m0) {
Ropeobj178006* L0;
Tloc292816 tmp0;
L0 = (Ropeobj178006*)0;
memset((void*)(&tmp0), 0, sizeof(tmp0));
gettemp_537032_839829468(p0, (*e0).typ, (&tmp0), NIM_FALSE);
(*p0).splitdecls += ((NI) 1);
expr_539248_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&tmp0));
L0 = getlabel_539217_839829468(p0);
{
TY532811 LOC5;
if (!(m0 == ((Tmagic292524) 127))) goto LA3;
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = rdloc_538188_839829468(tmp0);
LOC5[1] = L0;
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_297), LOC5, 2);
}
goto LA1;
LA3: ;
{
TY532811 LOC7;
memset((void*)LOC7, 0, sizeof(LOC7));
LOC7[0] = rdloc_538188_839829468(tmp0);
LOC7[1] = L0;
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_298), LOC7, 2);
}
LA1: ;
expr_539248_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&tmp0));
fixlabel_539230_839829468(p0, L0);
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA10;
genericAssign((void*)(&(*d0)), (void*)(&tmp0), (&NTI292816));
}
goto LA8;
LA10: ;
{
genassignment_539264_839829468(p0, (*d0), tmp0, 0);
}
LA8: ;
(*p0).splitdecls -= ((NI) 1);
}
N_NIMCALL(void, unaryarith_552646_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 op0) {
Tloc292816 a0;
Ttype292840* t0;
TY535238 LOC1;
NI64 LOC2;
Ropeobj178006* LOC3;
memset((void*)(&a0), 0, sizeof(a0));
t0 = (Ttype292840*)0;
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
t0 = skiptypes_296099_850551059((*e0).typ, IL64(211106233624832));
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = rdloc_538188_839829468(a0);
LOC2 = (NI64)0;
LOC2 = getsize_320135_3876443242(t0);
LOC1[1] = rope_178401_2381377266((NI64)(LOC2 * IL64(8)));
LOC1[2] = getsimpletypedesc_533936_839829468((*p0).module, (*e0).typ);
LOC3 = (Ropeobj178006*)0;
LOC3 = HEX25_178905_2381377266(unarithtab_552653_839829468[(op0)- 99], LOC1, 3);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC3, ((Tstorageloc292812) 0));
}
N_NIMCALL(void, unaryarithoverflow_551633_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 m0) {
Tloc292816 a0;
Ttype292840* t0;
TY532811 LOC7;
NI64 LOC8;
Ropeobj178006* LOC9;
memset((void*)(&a0), 0, sizeof(a0));
t0 = (Ttype292840*)0;
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
t0 = skiptypes_296099_850551059((*e0).typ, IL64(211106233624832));
{
TY532811 LOC5;
NI64 LOC6;
if (!(((*p0).options &(1U<<((NU)(((Toption169009) 5))&31U)))!=0)) goto LA3;
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = rdloc_538188_839829468(a0);
LOC6 = (NI64)0;
LOC6 = firstord_320001_3876443242(t0);
LOC5[1] = intliteral_539270_839829468(LOC6);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_317), LOC5, 2);
}
LA3: ;
memset((void*)LOC7, 0, sizeof(LOC7));
LOC7[0] = rdloc_538188_839829468(a0);
LOC8 = (NI64)0;
LOC8 = getsize_320135_3876443242(t0);
LOC7[1] = rope_178401_2381377266((NI64)(LOC8 * IL64(8)));
LOC9 = (Ropeobj178006*)0;
LOC9 = HEX25_178905_2381377266(opr_551640_839829468[(m0)- 96], LOC7, 2);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC9, ((Tstorageloc292812) 0));
}
N_NIMCALL(void, binaryarith_551819_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 op0) {
Tloc292816 a0;
Tloc292816 b0;
NI64 s0;
NI64 LOC1;
NI64 LOC2;
TY535235 LOC3;
Ropeobj178006* LOC4;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
s0 = (NI64)0;
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0));
LOC1 = (NI64)0;
LOC1 = getsize_320135_3876443242(a0.t);
LOC2 = (NI64)0;
LOC2 = getsize_320135_3876443242(b0.t);
s0 = (NI64)(((LOC1 >= LOC2) ? LOC1 : LOC2) * IL64(8));
memset((void*)LOC3, 0, sizeof(LOC3));
LOC3[0] = rdloc_538188_839829468(a0);
LOC3[1] = rdloc_538188_839829468(b0);
LOC3[2] = rope_178401_2381377266(s0);
LOC3[3] = getsimpletypedesc_533936_839829468((*p0).module, (*e0).typ);
LOC4 = (Ropeobj178006*)0;
LOC4 = HEX25_178905_2381377266(binarithtab_551826_839829468[(op0)- 52], LOC3, 4);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC4, ((Tstorageloc292812) 0));
}
N_NIMCALL(void, binaryfloatarith_556728_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 m0) {
{
Tloc292816 a0;
Tloc292816 b0;
TY535235 LOC5;
Tnode292802* LOC6;
Ropeobj178006* LOC7;
if (!!(((384 & (*p0).options) == 0))) goto LA3;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0));
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = rope_178277_2381377266(opr_556762_839829468[(m0)- 52]);
LOC5[1] = rdloc_538188_839829468(a0);
LOC5[2] = rdloc_538188_839829468(b0);
LOC6 = (Tnode292802*)0;
LOC6 = HEX5BHEX5D_293238_850551059(e0, ((NI) 1));
LOC5[3] = getsimpletypedesc_533936_839829468((*p0).module, (*LOC6).typ);
LOC7 = (Ropeobj178006*)0;
LOC7 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_319), LOC5, 4);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC7, ((Tstorageloc292812) 0));
{
TY178507 LOC12;
if (!(((*p0).options &(1U<<((NU)(((Toption169009) 7))&31U)))!=0)) goto LA10;
memset((void*)LOC12, 0, sizeof(LOC12));
LOC12[0] = rdloc_538188_839829468((*d0));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_323), LOC12, 1);
}
LA10: ;
{
TY178507 LOC17;
if (!(((*p0).options &(1U<<((NU)(((Toption169009) 8))&31U)))!=0)) goto LA15;
memset((void*)LOC17, 0, sizeof(LOC17));
LOC17[0] = rdloc_538188_839829468((*d0));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_324), LOC17, 1);
}
LA15: ;
}
goto LA1;
LA3: ;
{
binaryarith_551819_839829468(p0, e0, d0, m0);
}
LA1: ;
}
N_NIMCALL(void, geneqproc_552214_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
Tloc292816 a0;
Tloc292816 b0;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0));
{
Ttype292840* LOC3;
TY532811 LOC6;
Ropeobj178006* LOC7;
LOC3 = (Ttype292840*)0;
LOC3 = skiptypes_296099_850551059(a0.t, IL64(211106232576256));
if (!((*LOC3).callconv == ((Tcallingconvention292002) 8))) goto LA4;
memset((void*)LOC6, 0, sizeof(LOC6));
LOC6[0] = rdloc_538188_839829468(a0);
LOC6[1] = rdloc_538188_839829468(b0);
LOC7 = (Ropeobj178006*)0;
LOC7 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_352), LOC6, 2);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC7, ((Tstorageloc292812) 0));
}
goto LA1;
LA4: ;
{
TY532811 LOC9;
Ropeobj178006* LOC10;
memset((void*)LOC9, 0, sizeof(LOC9));
LOC9[0] = rdloc_538188_839829468(a0);
LOC9[1] = rdloc_538188_839829468(b0);
LOC10 = (Ropeobj178006*)0;
LOC10 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_341), LOC9, 2);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC10, ((Tstorageloc292812) 0));
}
LA1: ;
}
N_NIMCALL(Ropeobj178006*, rdcharloc_538227_839829468)(Tloc292816 a0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
result0 = rdloc_538188_839829468(a0);
{
Ttype292840* LOC3;
TY178507 LOC6;
LOC3 = (Ttype292840*)0;
LOC3 = skiptypes_296099_850551059(a0.t, IL64(211106233624832));
if (!((*LOC3).kind == ((Ttypekind292244) 2))) goto LA4;
memset((void*)LOC6, 0, sizeof(LOC6));
LOC6[0] = result0;
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_358), LOC6, 1);
}
LA4: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, binaryarithoverflowraw_551235_839829468)(Tcproc529021* p0, Ttype292840* t0, Tloc292816 a0, Tloc292816 b0, NimStringDesc* frmt0) {
Ropeobj178006* result0;
NI64 size0;
Ropeobj178006* storage0;
TY532811 LOC6;
TY535238 LOC7;
result0 = (Ropeobj178006*)0;
size0 = getsize_320135_3876443242(t0);
{
if (!(size0 < ((NI64) (intsize_176641_4151366050)))) goto LA3;
storage0 = rope_178277_2381377266(((NimStringDesc*) &T839829468_36));
}
goto LA1;
LA3: ;
{
storage0 = gettypedesc_535671_839829468((*p0).module, t0);
}
LA1: ;
result0 = gettempname_533596_839829468((*p0).module);
memset((void*)LOC6, 0, sizeof(LOC6));
LOC6[0] = storage0;
LOC6[1] = result0;
linefmt_532714_839829468(p0, ((Tcprocsection529011) 0), ((NimStringDesc*) &T839829468_54), LOC6, 2);
memset((void*)LOC7, 0, sizeof(LOC7));
LOC7[0] = result0;
LOC7[1] = rdcharloc_538227_839829468(a0);
LOC7[2] = rdcharloc_538227_839829468(b0);
linecg_532707_839829468(p0, ((Tcprocsection529011) 2), frmt0, LOC7, 3);
{
NIM_BOOL LOC10;
TY535238 LOC14;
NI64 LOC15;
NI64 LOC16;
LOC10 = (NIM_BOOL)0;
LOC10 = (size0 < ((NI64) (intsize_176641_4151366050)));
if (LOC10) goto LA11;
LOC10 = ((*t0).kind == ((Ttypekind292244) 20) || (*t0).kind == ((Ttypekind292244) 14));
LA11: ;
if (!LOC10) goto LA12;
memset((void*)LOC14, 0, sizeof(LOC14));
LOC14[0] = result0;
LOC15 = (NI64)0;
LOC15 = firstord_320001_3876443242(t0);
LOC14[1] = intliteral_539270_839829468(LOC15);
LOC16 = (NI64)0;
LOC16 = lastord_320004_3876443242(t0);
LOC14[2] = intliteral_539270_839829468(LOC16);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_359), LOC14, 3);
}
LA12: ;
return result0;
}
N_NIMCALL(void, binaryarithoverflow_551262_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 m0) {
Tloc292816 a0;
Tloc292816 b0;
Ttype292840* t0;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0));
t0 = skiptypes_296099_850551059((*e0).typ, IL64(211106233624832));
{
Ropeobj178006* res0;
TY535238 LOC5;
if (!!((((*p0).options &(1U<<((NU)(((Toption169009) 5))&31U)))!=0))) goto LA3;
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = gettypedesc_535671_839829468((*p0).module, t0);
LOC5[1] = rdloc_538188_839829468(a0);
LOC5[2] = rdloc_538188_839829468(b0);
res0 = HEX25_178905_2381377266(opr_551279_839829468[(m0)- 45], LOC5, 3);
putintodest_550468_839829468(p0, d0, (*e0).typ, res0, ((Tstorageloc292812) 0));
}
goto LA1;
LA3: ;
{
Ropeobj178006* res0;
NimStringDesc* LOC7;
TY532811 LOC13;
Ropeobj178006* LOC14;
LOC7 = (NimStringDesc*)0;
{
if (!((*t0).kind == ((Ttypekind292244) 35))) goto LA10;
LOC7 = copyString(prc64_551274_839829468[(m0)- 45]);
}
goto LA8;
LA10: ;
{
LOC7 = copyString(prc_551269_839829468[(m0)- 45]);
}
LA8: ;
res0 = binaryarithoverflowraw_551235_839829468(p0, t0, a0, b0, LOC7);
memset((void*)LOC13, 0, sizeof(LOC13));
LOC13[0] = gettypedesc_535671_839829468((*p0).module, t0);
LOC13[1] = res0;
LOC14 = (Ropeobj178006*)0;
LOC14 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_370), LOC13, 2);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC14, ((Tstorageloc292812) 0));
}
LA1: ;
}
N_NIMCALL(Ropeobj178006*, lenfield_539305_839829468)(Tcproc529021* p0) {
Ropeobj178006* result0;
NimStringDesc* LOC1;
result0 = (Ropeobj178006*)0;
LOC1 = (NimStringDesc*)0;
{
NIM_BOOL LOC4;
LOC4 = (NIM_BOOL)0;
LOC4 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC4) goto LA5;
LOC4 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA5: ;
if (!LOC4) goto LA6;
LOC1 = copyString(((NimStringDesc*) &T839829468_157));
}
goto LA2;
LA6: ;
{
LOC1 = copyString(((NimStringDesc*) &T839829468_158));
}
LA2: ;
result0 = rope_178277_2381377266(LOC1);
return result0;
}
N_NIMCALL(void, gcusage_554439_839829468)(Tnode292802* n0) {
{
NimStringDesc* LOC5;
if (!(gselectedgc_169133_2607990831 == ((Tgcmode169080) 0))) goto LA3;
LOC5 = (NimStringDesc*)0;
LOC5 = rendertree_311044_382274130(n0, 0);
message_196095_155036129((*n0).info, ((Tmsgkind191002) 263), LOC5);
}
LA3: ;
}
N_NIMCALL(void, genrepr_555339_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
Tloc292816 a0;
Ttype292840* t0;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
t0 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106242013440));
switch ((*t0).kind) {
case ((Ttypekind292244) 31) ... ((Ttypekind292244) 35):
case ((Ttypekind292244) 40) ... ((Ttypekind292244) 44):
{
TY178507 LOC2;
Ropeobj178006* LOC3;
memset((void*)LOC2, 0, sizeof(LOC2));
LOC2[0] = rdloc_538188_839829468(a0);
LOC3 = (Ropeobj178006*)0;
LOC3 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_371), LOC2, 1);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC3, a0.s);
}
break;
case ((Ttypekind292244) 36) ... ((Ttypekind292244) 39):
{
TY178507 LOC5;
Ropeobj178006* LOC6;
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = rdloc_538188_839829468(a0);
LOC6 = (Ropeobj178006*)0;
LOC6 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_372), LOC5, 1);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC6, a0.s);
}
break;
case ((Ttypekind292244) 1):
{
TY178507 LOC8;
Ropeobj178006* LOC9;
memset((void*)LOC8, 0, sizeof(LOC8));
LOC8[0] = rdloc_538188_839829468(a0);
LOC9 = (Ropeobj178006*)0;
LOC9 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_373), LOC8, 1);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC9, a0.s);
}
break;
case ((Ttypekind292244) 2):
{
TY178507 LOC11;
Ropeobj178006* LOC12;
memset((void*)LOC11, 0, sizeof(LOC11));
LOC11[0] = rdloc_538188_839829468(a0);
LOC12 = (Ropeobj178006*)0;
LOC12 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_374), LOC11, 1);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC12, a0.s);
}
break;
case ((Ttypekind292244) 14):
case ((Ttypekind292244) 15):
{
TY532811 LOC14;
Ropeobj178006* LOC15;
memset((void*)LOC14, 0, sizeof(LOC14));
LOC14[0] = rdloc_538188_839829468(a0);
LOC14[1] = gentypeinfo_535941_839829468((*p0).module, t0);
LOC15 = (Ropeobj178006*)0;
LOC15 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_375), LOC14, 2);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC15, a0.s);
}
break;
case ((Ttypekind292244) 28):
{
TY178507 LOC17;
Ropeobj178006* LOC18;
memset((void*)LOC17, 0, sizeof(LOC17));
LOC17[0] = rdloc_538188_839829468(a0);
LOC18 = (Ropeobj178006*)0;
LOC18 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_376), LOC17, 1);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC18, a0.s);
}
break;
case ((Ttypekind292244) 19):
{
TY532811 LOC20;
Ropeobj178006* LOC21;
memset((void*)LOC20, 0, sizeof(LOC20));
LOC20[0] = addrloc_538204_839829468(a0);
LOC20[1] = gentypeinfo_535941_839829468((*p0).module, t0);
LOC21 = (Ropeobj178006*)0;
LOC21 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_377), LOC20, 2);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC21, a0.s);
}
break;
case ((Ttypekind292244) 27):
case ((Ttypekind292244) 48):
{
Tloc292816 b0;
TY532811 LOC34;
Ttype292840* LOC35;
Ropeobj178006* LOC36;
memset((void*)(&b0), 0, sizeof(b0));
switch ((*a0.t).kind) {
case ((Ttypekind292244) 27):
case ((Ttypekind292244) 48):
{
TY178507 LOC24;
Ropeobj178006* LOC25;
memset((void*)LOC24, 0, sizeof(LOC24));
LOC24[0] = rdloc_538188_839829468(a0);
LOC25 = (Ropeobj178006*)0;
LOC25 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_378), LOC24, 1);
putintodest_550468_839829468(p0, (&b0), (*e0).typ, LOC25, a0.s);
}
break;
case ((Ttypekind292244) 28):
case ((Ttypekind292244) 24):
{
TY532811 LOC27;
Ropeobj178006* LOC28;
memset((void*)LOC27, 0, sizeof(LOC27));
LOC27[0] = rdloc_538188_839829468(a0);
LOC27[1] = lenfield_539305_839829468(p0);
LOC28 = (Ropeobj178006*)0;
LOC28 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_379), LOC27, 2);
putintodest_550468_839829468(p0, (&b0), (*e0).typ, LOC28, a0.s);
}
break;
case ((Ttypekind292244) 16):
case ((Ttypekind292244) 4):
{
TY532811 LOC30;
NI64 LOC31;
Ropeobj178006* LOC32;
memset((void*)LOC30, 0, sizeof(LOC30));
LOC30[0] = rdloc_538188_839829468(a0);
LOC31 = (NI64)0;
LOC31 = lengthord_320007_3876443242(a0.t);
LOC30[1] = rope_178401_2381377266(LOC31);
LOC32 = (Ropeobj178006*)0;
LOC32 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_380), LOC30, 2);
putintodest_550468_839829468(p0, (&b0), (*e0).typ, LOC32, a0.s);
}
break;
default:
{
internalerror_196100_155036129((*(*e0).kindU.S6.sons->data[((NI) 0)]).info, ((NimStringDesc*) &T839829468_381));
}
break;
}
memset((void*)LOC34, 0, sizeof(LOC34));
LOC34[0] = rdloc_538188_839829468(b0);
LOC35 = (Ttype292840*)0;
LOC35 = elemtype_320394_3876443242(t0);
LOC34[1] = gentypeinfo_535941_839829468((*p0).module, LOC35);
LOC36 = (Ropeobj178006*)0;
LOC36 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_382), LOC34, 2);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC36, a0.s);
}
break;
case ((Ttypekind292244) 29):
case ((Ttypekind292244) 16):
case ((Ttypekind292244) 4):
case ((Ttypekind292244) 22):
case ((Ttypekind292244) 21):
case ((Ttypekind292244) 26):
case ((Ttypekind292244) 5):
case ((Ttypekind292244) 24):
{
TY532811 LOC38;
Ropeobj178006* LOC39;
memset((void*)LOC38, 0, sizeof(LOC38));
LOC38[0] = rdloc_538188_839829468(a0);
LOC38[1] = gentypeinfo_535941_839829468((*p0).module, t0);
LOC39 = (Ropeobj178006*)0;
LOC39 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_383), LOC38, 2);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC39, a0.s);
}
break;
case ((Ttypekind292244) 3):
case ((Ttypekind292244) 62):
{
localerror_196085_155036129((*e0).info, ((NimStringDesc*) &T839829468_384));
}
break;
default:
{
TY532811 LOC42;
Ropeobj178006* LOC43;
memset((void*)LOC42, 0, sizeof(LOC42));
LOC42[0] = addrloc_538204_839829468(a0);
LOC42[1] = gentypeinfo_535941_839829468((*p0).module, t0);
LOC43 = (Ropeobj178006*)0;
LOC43 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_383), LOC42, 2);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC43, a0.s);
}
break;
}
gcusage_554439_839829468(e0);
}
N_NIMCALL(void, gengettypeinfo_555383_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
Ttype292840* t0;
Ropeobj178006* LOC1;
t0 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106242013440));
LOC1 = (Ropeobj178006*)0;
LOC1 = gentypeinfo_535941_839829468((*p0).module, t0);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC1, ((Tstorageloc292812) 0));
}
N_NIMCALL(void, genswap_555638_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
Tloc292816 a0;
Tloc292816 b0;
Tloc292816 tmp0;
Ttype292840* LOC1;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
memset((void*)(&tmp0), 0, sizeof(tmp0));
LOC1 = (Ttype292840*)0;
LOC1 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106240964864));
gettemp_537032_839829468(p0, LOC1, (&tmp0), NIM_FALSE);
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0));
genassignment_539264_839829468(p0, tmp0, a0, 0);
genassignment_539264_839829468(p0, a0, b0, 0);
genassignment_539264_839829468(p0, b0, tmp0, 0);
}
N_NIMCALL(void, unaryexpr_551209_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, NimStringDesc* frmt0) {
Tloc292816 a0;
TY178507 LOC1;
Ropeobj178006* LOC2;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = rdloc_538188_839829468(a0);
LOC2 = (Ropeobj178006*)0;
LOC2 = ropecg_532407_839829468((*p0).module, frmt0, LOC1, 1);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC2, ((Tstorageloc292812) 0));
}
N_NIMCALL(void, binarystmt_550501_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, NimStringDesc* frmt0) {
Tloc292816 a0;
Tloc292816 b0;
TY532811 LOC5;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
{
if (!!(((*d0).k == ((Tlockind292808) 0)))) goto LA3;
internalerror_196100_155036129((*e0).info, ((NimStringDesc*) &T839829468_387));
}
LA3: ;
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0));
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = rdloc_538188_839829468(a0);
LOC5[1] = rdloc_538188_839829468(b0);
linecg_532707_839829468(p0, ((Tcprocsection529011) 2), frmt0, LOC5, 2);
}
N_NIMCALL(void, genstrconcat_554452_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
Tloc292816 a0;
Tloc292816 tmp0;
NI L0;
Ropeobj178006* appends0;
Ropeobj178006* lens0;
TY535238 LOC21;
Ropeobj178006** LOC22;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&tmp0), 0, sizeof(tmp0));
gettemp_537032_839829468(p0, (*e0).typ, (&tmp0), NIM_FALSE);
L0 = ((NI) 0);
appends0 = NIM_NIL;
lens0 = NIM_NIL;
{
NI i_554475_839829468;
NI HEX3Atmp_554547_839829468;
NI LOC2;
NI res_554550_839829468;
i_554475_839829468 = (NI)0;
HEX3Atmp_554547_839829468 = (NI)0;
LOC2 = (NI)0;
LOC2 = sonslen_295351_850551059(e0);
HEX3Atmp_554547_839829468 = (NI)(LOC2 - ((NI) 2));
res_554550_839829468 = ((NI) 0);
{
while (1) {
if (!(res_554550_839829468 <= HEX3Atmp_554547_839829468)) goto LA4;
i_554475_839829468 = res_554550_839829468;
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[(NI)(i_554475_839829468 + ((NI) 1))], (&a0));
{
Ttype292840* LOC7;
TY532811 LOC10;
Ropeobj178006* LOC11;
LOC7 = (Ttype292840*)0;
LOC7 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[(NI)(i_554475_839829468 + ((NI) 1))]).typ, IL64(211106242013440));
if (!((*LOC7).kind == ((Ttypekind292244) 2))) goto LA8;
L0 += ((NI) 1);
memset((void*)LOC10, 0, sizeof(LOC10));
LOC10[0] = tmp0.r;
LOC10[1] = rdloc_538188_839829468(a0);
LOC11 = (Ropeobj178006*)0;
LOC11 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_390), LOC10, 2);
add_178482_2381377266(&appends0, LOC11);
}
goto LA5;
LA8: ;
{
TY532811 LOC19;
Ropeobj178006* LOC20;
{
if (!((*(*e0).kindU.S6.sons->data[(NI)(i_554475_839829468 + ((NI) 1))]).kind >= ((Tnodekind292020) 20) && (*(*e0).kindU.S6.sons->data[(NI)(i_554475_839829468 + ((NI) 1))]).kind <= ((Tnodekind292020) 22))) goto LA15;
L0 += ((*(*e0).kindU.S6.sons->data[(NI)(i_554475_839829468 + ((NI) 1))]).kindU.S3.strval ? (*(*e0).kindU.S6.sons->data[(NI)(i_554475_839829468 + ((NI) 1))]).kindU.S3.strval->Sup.len : 0);
}
goto LA13;
LA15: ;
{
TY532811 LOC18;
memset((void*)LOC18, 0, sizeof(LOC18));
LOC18[0] = rdloc_538188_839829468(a0);
LOC18[1] = lenfield_539305_839829468(p0);
addf_179205_2381377266(&lens0, ((NimStringDesc*) &T839829468_391), LOC18, 2);
}
LA13: ;
memset((void*)LOC19, 0, sizeof(LOC19));
LOC19[0] = tmp0.r;
LOC19[1] = rdloc_538188_839829468(a0);
LOC20 = (Ropeobj178006*)0;
LOC20 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_392), LOC19, 2);
add_178482_2381377266(&appends0, LOC20);
}
LA5: ;
res_554550_839829468 += ((NI) 1);
} LA4: ;
}
}
memset((void*)LOC21, 0, sizeof(LOC21));
LOC21[0] = tmp0.r;
LOC21[1] = lens0;
LOC21[2] = rope_178401_2381377266(((NI64) (L0)));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_393), LOC21, 3);
LOC22 = (Ropeobj178006**)0;
LOC22 = s_529179_3723162438(p0, ((Tcprocsection529011) 2));
add_178482_2381377266(LOC22, appends0);
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA25;
genericAssign((void*)(&(*d0)), (void*)(&tmp0), (&NTI292816));
}
goto LA23;
LA25: ;
{
genassignment_539264_839829468(p0, (*d0), tmp0, 0);
}
LA23: ;
gcusage_554439_839829468(e0);
}
N_NIMCALL(void, genstrappend_554554_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
Tloc292816 a0;
Tloc292816 dest0;
Ropeobj178006* appends0;
Ropeobj178006* lens0;
NI L0;
TY535238 LOC21;
Ropeobj178006** LOC22;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&dest0), 0, sizeof(dest0));
appends0 = (Ropeobj178006*)0;
lens0 = (Ropeobj178006*)0;
L0 = ((NI) 0);
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&dest0));
{
NI i_554615_839829468;
NI HEX3Atmp_554676_839829468;
NI LOC2;
NI res_554679_839829468;
i_554615_839829468 = (NI)0;
HEX3Atmp_554676_839829468 = (NI)0;
LOC2 = (NI)0;
LOC2 = sonslen_295351_850551059(e0);
HEX3Atmp_554676_839829468 = (NI)(LOC2 - ((NI) 3));
res_554679_839829468 = ((NI) 0);
{
while (1) {
if (!(res_554679_839829468 <= HEX3Atmp_554676_839829468)) goto LA4;
i_554615_839829468 = res_554679_839829468;
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[(NI)(i_554615_839829468 + ((NI) 2))], (&a0));
{
Ttype292840* LOC7;
TY532811 LOC10;
Ropeobj178006* LOC11;
LOC7 = (Ttype292840*)0;
LOC7 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[(NI)(i_554615_839829468 + ((NI) 2))]).typ, IL64(211106242013440));
if (!((*LOC7).kind == ((Ttypekind292244) 2))) goto LA8;
L0 += ((NI) 1);
memset((void*)LOC10, 0, sizeof(LOC10));
LOC10[0] = rdloc_538188_839829468(dest0);
LOC10[1] = rdloc_538188_839829468(a0);
LOC11 = (Ropeobj178006*)0;
LOC11 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_390), LOC10, 2);
add_178482_2381377266(&appends0, LOC11);
}
goto LA5;
LA8: ;
{
TY532811 LOC19;
Ropeobj178006* LOC20;
{
if (!((*(*e0).kindU.S6.sons->data[(NI)(i_554615_839829468 + ((NI) 2))]).kind >= ((Tnodekind292020) 20) && (*(*e0).kindU.S6.sons->data[(NI)(i_554615_839829468 + ((NI) 2))]).kind <= ((Tnodekind292020) 22))) goto LA15;
L0 += ((*(*e0).kindU.S6.sons->data[(NI)(i_554615_839829468 + ((NI) 2))]).kindU.S3.strval ? (*(*e0).kindU.S6.sons->data[(NI)(i_554615_839829468 + ((NI) 2))]).kindU.S3.strval->Sup.len : 0);
}
goto LA13;
LA15: ;
{
TY532811 LOC18;
memset((void*)LOC18, 0, sizeof(LOC18));
LOC18[0] = rdloc_538188_839829468(a0);
LOC18[1] = lenfield_539305_839829468(p0);
addf_179205_2381377266(&lens0, ((NimStringDesc*) &T839829468_391), LOC18, 2);
}
LA13: ;
memset((void*)LOC19, 0, sizeof(LOC19));
LOC19[0] = rdloc_538188_839829468(dest0);
LOC19[1] = rdloc_538188_839829468(a0);
LOC20 = (Ropeobj178006*)0;
LOC20 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_392), LOC19, 2);
add_178482_2381377266(&appends0, LOC20);
}
LA5: ;
res_554679_839829468 += ((NI) 1);
} LA4: ;
}
}
memset((void*)LOC21, 0, sizeof(LOC21));
LOC21[0] = rdloc_538188_839829468(dest0);
LOC21[1] = lens0;
LOC21[2] = rope_178401_2381377266(((NI64) (L0)));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_395), LOC21, 3);
LOC22 = (Ropeobj178006**)0;
LOC22 = s_529179_3723162438(p0, ((Tcprocsection529011) 2));
add_178482_2381377266(LOC22, appends0);
gcusage_554439_839829468(e0);
}
N_NIMCALL(void, genseqelemappend_554683_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
NimStringDesc* seqappendpattern0;
Tloc292816 a0;
Tloc292816 b0;
Tloc292816 dest0;
Ttype292840* bt0;
TY535238 LOC8;
Ttype292840* LOC9;
TY532811 LOC10;
TY532811 LOC11;
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC3) goto LA4;
LOC3 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA4: ;
if (!!(LOC3)) goto LA5;
seqappendpattern0 = copyString(((NimStringDesc*) &T839829468_396));
}
goto LA1;
LA5: ;
{
seqappendpattern0 = copyString(((NimStringDesc*) &T839829468_397));
}
LA1: ;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
memset((void*)(&dest0), 0, sizeof(dest0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0));
bt0 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 2)]).typ, IL64(211106240964864));
memset((void*)LOC8, 0, sizeof(LOC8));
LOC8[0] = rdloc_538188_839829468(a0);
LOC9 = (Ttype292840*)0;
LOC9 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106240964864));
LOC8[1] = gettypedesc_535671_839829468((*p0).module, LOC9);
LOC8[2] = gettypedesc_535671_839829468((*p0).module, bt0);
linecg_532707_839829468(p0, ((Tcprocsection529011) 2), seqappendpattern0, LOC8, 3);
initloc_532273_839829468((&dest0), ((Tlockind292808) 6), bt0, ((Tstorageloc292812) 3));
memset((void*)LOC10, 0, sizeof(LOC10));
LOC10[0] = rdloc_538188_839829468(a0);
LOC10[1] = lenfield_539305_839829468(p0);
dest0.r = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_398), LOC10, 2);
genassignment_539264_839829468(p0, dest0, b0, 3);
memset((void*)LOC11, 0, sizeof(LOC11));
LOC11[0] = rdloc_538188_839829468(a0);
LOC11[1] = lenfield_539305_839829468(p0);
linecg_532707_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_399), LOC11, 2);
gcusage_554439_839829468(e0);
}
N_NIMCALL(void, binaryexpr_550549_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, NimStringDesc* frmt0) {
Tloc292816 a0;
Tloc292816 b0;
TY532811 LOC1;
Ropeobj178006* LOC2;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0));
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = rdloc_538188_839829468(a0);
LOC1[1] = rdloc_538188_839829468(b0);
LOC2 = (Ropeobj178006*)0;
LOC2 = ropecg_532407_839829468((*p0).module, frmt0, LOC1, 2);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC2, ((Tstorageloc292812) 0));
}
N_NIMCALL(void, genstrequals_556666_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
Tloc292816 x0;
Tnode292802* a0;
Tnode292802* b0;
memset((void*)(&x0), 0, sizeof(x0));
a0 = (*e0).kindU.S6.sons->data[((NI) 1)];
b0 = (*e0).kindU.S6.sons->data[((NI) 2)];
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = ((*a0).kind == ((Tnodekind292020) 23));
if (LOC3) goto LA4;
LOC3 = ((*b0).kind == ((Tnodekind292020) 23));
LA4: ;
if (!LOC3) goto LA5;
binaryexpr_550549_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_341));
}
goto LA1;
LA5: ;
{
NIM_BOOL LOC8;
TY532811 LOC12;
Ropeobj178006* LOC13;
LOC8 = (NIM_BOOL)0;
LOC8 = ((*a0).kind >= ((Tnodekind292020) 20) && (*a0).kind <= ((Tnodekind292020) 22));
if (!(LOC8)) goto LA9;
LOC8 = (((*a0).kindU.S3.strval) && ((*a0).kindU.S3.strval)->Sup.len == 0);
LA9: ;
if (!LOC8) goto LA10;
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&x0));
memset((void*)LOC12, 0, sizeof(LOC12));
LOC12[0] = rdloc_538188_839829468(x0);
LOC12[1] = lenfield_539305_839829468(p0);
LOC13 = (Ropeobj178006*)0;
LOC13 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_400), LOC12, 2);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC13, ((Tstorageloc292812) 0));
}
goto LA1;
LA10: ;
{
NIM_BOOL LOC15;
TY532811 LOC19;
Ropeobj178006* LOC20;
LOC15 = (NIM_BOOL)0;
LOC15 = ((*b0).kind >= ((Tnodekind292020) 20) && (*b0).kind <= ((Tnodekind292020) 22));
if (!(LOC15)) goto LA16;
LOC15 = (((*b0).kindU.S3.strval) && ((*b0).kindU.S3.strval)->Sup.len == 0);
LA16: ;
if (!LOC15) goto LA17;
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&x0));
memset((void*)LOC19, 0, sizeof(LOC19));
LOC19[0] = rdloc_538188_839829468(x0);
LOC19[1] = lenfield_539305_839829468(p0);
LOC20 = (Ropeobj178006*)0;
LOC20 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_400), LOC19, 2);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC20, ((Tstorageloc292812) 0));
}
goto LA1;
LA17: ;
{
binaryexpr_550549_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_401));
}
LA1: ;
}
N_NIMCALL(void, genisnil_552620_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
Ttype292840* t0;
t0 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106233624832));
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = ((*t0).kind == ((Ttypekind292244) 25));
if (!(LOC3)) goto LA4;
LOC3 = ((*t0).callconv == ((Tcallingconvention292002) 8));
LA4: ;
if (!LOC3) goto LA5;
unaryexpr_551209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_404));
}
goto LA1;
LA5: ;
{
unaryexpr_551209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_405));
}
LA1: ;
}
N_NIMCALL(void, gendollar_555391_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0, NimStringDesc* frmt0) {
Tloc292816 a0;
TY178507 LOC1;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 1)], (&a0));
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = rdloc_538188_839829468(a0);
a0.r = ropecg_532407_839829468((*p0).module, frmt0, LOC1, 1);
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA4;
gettemp_537032_839829468(p0, (*n0).typ, d0, NIM_FALSE);
}
LA4: ;
genassignment_539264_839829468(p0, (*d0), a0, 0);
gcusage_554439_839829468(n0);
}
N_NIMCALL(Ropeobj178006*, genofhelper_555139_839829468)(Tcproc529021* p0, Ttype292840* dest0, Ropeobj178006* a0) {
Ropeobj178006* result0;
Ropeobj178006* ti0;
result0 = (Ropeobj178006*)0;
ti0 = gentypeinfo_535941_839829468((*p0).module, dest0);
{
NIM_BOOL LOC3;
NIM_BOOL LOC5;
TY532811 LOC9;
LOC3 = (NIM_BOOL)0;
LOC3 = (((*dest0).flags &(1U<<((NU)(((Ttypeflag292431) 2))&31U)))!=0);
if (LOC3) goto LA4;
LOC5 = (NIM_BOOL)0;
LOC5 = (((*(*p0).module).flags &(1U<<((NU)(((Codegenflag529025) 5))&7U)))!=0);
if (!(LOC5)) goto LA6;
LOC5 = !((((*dest0).flags &(1U<<((NU)(((Ttypeflag292431) 5))&31U)))!=0));
LA6: ;
LOC3 = LOC5;
LA4: ;
if (!LOC3) goto LA7;
memset((void*)LOC9, 0, sizeof(LOC9));
LOC9[0] = a0;
LOC9[1] = ti0;
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_414), LOC9, 2);
}
goto LA1;
LA7: ;
{
Ropeobj178006* LOC11;
Ropeobj178006* cache0;
Ropeobj178006* LOC12;
TY178507 LOC13;
TY535238 LOC14;
LOC11 = (Ropeobj178006*)0;
LOC11 = cgsym_532403_839829468((*p0).module, ((NimStringDesc*) &T839829468_129));
(*(*p0).module).labels += ((NI) 1);
LOC12 = (Ropeobj178006*)0;
LOC12 = rope_178401_2381377266(((NI64) ((*(*p0).module).labels)));
cache0 = HEX26_178452_2381377266(((NimStringDesc*) &T839829468_415), LOC12);
memset((void*)LOC13, 0, sizeof(LOC13));
LOC13[0] = cache0;
addf_179205_2381377266(&(*(*p0).module).s[(((Tcfilesection529005) 9))- 0], ((NimStringDesc*) &T839829468_416), LOC13, 1);
memset((void*)LOC14, 0, sizeof(LOC14));
LOC14[0] = a0;
LOC14[1] = ti0;
LOC14[2] = cache0;
result0 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_417), LOC14, 3);
}
LA1: ;
return result0;
}
N_NIMCALL(void, genof_555201_839829468)(Tcproc529021* p0, Tnode292802* x0, Ttype292840* typ0, Tloc292816* d0) {
Tloc292816 a0;
Ttype292840* dest0;
Ropeobj178006* r0;
Ropeobj178006* nilcheck0;
Ttype292840* t0;
Ttype292840* LOC41;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, x0, (&a0));
dest0 = skiptypes_296099_850551059(typ0, IL64(211106247256320));
r0 = rdloc_538188_839829468(a0);
nilcheck0 = NIM_NIL;
t0 = skiptypes_296099_850551059(a0.t, IL64(211106232576256));
{
while (1) {
Ttype292840* LOC16;
if (!((*t0).kind == ((Ttypekind292244) 23) || (*t0).kind == ((Ttypekind292244) 21) || (*t0).kind == ((Ttypekind292244) 22))) goto LA2;
{
if (!!(((*t0).kind == ((Ttypekind292244) 23)))) goto LA5;
nilcheck0 = r0;
}
LA5: ;
{
NIM_BOOL LOC9;
NIM_BOOL LOC11;
TY178507 LOC15;
LOC9 = (NIM_BOOL)0;
LOC9 = !(((*t0).kind == ((Ttypekind292244) 23)));
if (LOC9) goto LA10;
LOC11 = (NIM_BOOL)0;
LOC11 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC11) goto LA12;
LOC11 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA12: ;
LOC9 = !(LOC11);
LA10: ;
if (!LOC9) goto LA13;
memset((void*)LOC15, 0, sizeof(LOC15));
LOC15[0] = r0;
r0 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_124), LOC15, 1);
}
LA13: ;
LOC16 = (Ttype292840*)0;
LOC16 = lastson_295377_850551059(t0);
t0 = skiptypes_296099_850551059(LOC16, IL64(211106232576256));
} LA2: ;
}
{
NIM_BOOL LOC19;
LOC19 = (NIM_BOOL)0;
LOC19 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC19) goto LA20;
LOC19 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA20: ;
if (!!(LOC19)) goto LA21;
{
while (1) {
NIM_BOOL LOC25;
TY533289 LOC27;
Ropeobj178006* LOC28;
LOC25 = (NIM_BOOL)0;
LOC25 = ((*t0).kind == ((Ttypekind292244) 17));
if (!(LOC25)) goto LA26;
LOC25 = !(((*t0).sons->data[((NI) 0)] == NIM_NIL));
LA26: ;
if (!LOC25) goto LA24;
memset((void*)LOC27, 0, sizeof(LOC27));
LOC28 = (Ropeobj178006*)0;
LOC28 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_153), LOC27, 0);
add_178482_2381377266(&r0, LOC28);
t0 = skiptypes_296099_850551059((*t0).sons->data[((NI) 0)], IL64(211106247215360));
} LA24: ;
}
}
LA21: ;
{
NIM_BOOL LOC31;
LOC31 = (NIM_BOOL)0;
LOC31 = isobjlackingtypefield_533513_839829468(t0);
if (!LOC31) goto LA32;
globalerror_196071_155036129((*x0).info, ((Tmsgkind191002) 4), ((NimStringDesc*) &T839829468_412));
}
LA32: ;
{
TY532811 LOC38;
if (!!((nilcheck0 == NIM_NIL))) goto LA36;
memset((void*)LOC38, 0, sizeof(LOC38));
LOC38[0] = nilcheck0;
LOC38[1] = genofhelper_555139_839829468(p0, dest0, r0);
r0 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_413), LOC38, 2);
}
goto LA34;
LA36: ;
{
TY178507 LOC40;
memset((void*)LOC40, 0, sizeof(LOC40));
LOC40[0] = genofhelper_555139_839829468(p0, dest0, r0);
r0 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_418), LOC40, 1);
}
LA34: ;
LOC41 = (Ttype292840*)0;
LOC41 = getsystype_338150_3937434831(((Ttypekind292244) 1));
putintodest_550468_839829468(p0, d0, LOC41, r0, a0.s);
}
N_NIMCALL(void, genof_555331_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0) {
genof_555201_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 1)], (*(*n0).kindU.S6.sons->data[((NI) 2)]).typ, d0);
}
N_NIMCALL(void, rawgennew_554741_839829468)(Tcproc529021* p0, Tloc292816 a0, Ropeobj178006* sizeexpr_554745_839829468) {
Ropeobj178006* sizeexpr0;
Ttype292840* reftype0;
Tloc292816 b0;
TY535238 args0;
Ttype292840* bt0;
sizeexpr0 = sizeexpr_554745_839829468;
reftype0 = skiptypes_296099_850551059(a0.t, IL64(211106242013440));
memset((void*)(&b0), 0, sizeof(b0));
initloc_532273_839829468((&b0), ((Tlockind292808) 6), a0.t, ((Tstorageloc292812) 3));
{
TY178507 LOC5;
Ttype292840* LOC6;
if (!sizeexpr0 == 0) goto LA3;
memset((void*)LOC5, 0, sizeof(LOC5));
LOC6 = (Ttype292840*)0;
LOC6 = skiptypes_296099_850551059((*reftype0).sons->data[((NI) 0)], IL64(211106233624832));
LOC5[0] = gettypedesc_535671_839829468((*p0).module, LOC6);
sizeexpr0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_419), LOC5, 1);
}
LA3: ;
memset((void*)args0, 0, sizeof(args0));
args0[0] = gettypedesc_535671_839829468((*p0).module, reftype0);
args0[1] = gentypeinfo_535941_839829468((*p0).module, reftype0);
args0[2] = sizeexpr0;
{
NIM_BOOL LOC9;
TY532811 LOC21;
LOC9 = (NIM_BOOL)0;
LOC9 = (a0.s == ((Tstorageloc292812) 3));
if (!(LOC9)) goto LA10;
LOC9 = usesnativegc_169177_2607990831();
LA10: ;
if (!LOC9) goto LA11;
{
NIM_BOOL LOC15;
TY178507 LOC18;
LOC15 = (NIM_BOOL)0;
LOC15 = canformacycle_320123_3876443242(a0.t);
if (!LOC15) goto LA16;
memset((void*)LOC18, 0, sizeof(LOC18));
LOC18[0] = rdloc_538188_839829468(a0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_420), LOC18, 1);
}
goto LA13;
LA16: ;
{
TY178507 LOC20;
memset((void*)LOC20, 0, sizeof(LOC20));
LOC20[0] = rdloc_538188_839829468(a0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_255), LOC20, 1);
}
LA13: ;
b0.r = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_421), args0, 3);
memset((void*)LOC21, 0, sizeof(LOC21));
LOC21[0] = rdloc_538188_839829468(a0);
LOC21[1] = rdloc_538188_839829468(b0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_123), LOC21, 2);
}
goto LA7;
LA11: ;
{
b0.r = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_422), args0, 3);
genassignment_539264_839829468(p0, a0, b0, 0);
}
LA7: ;
bt0 = skiptypes_296099_850551059((*reftype0).sons->data[((NI) 0)], IL64(211106233624832));
genobjectinit_538242_839829468(p0, ((Tcprocsection529011) 2), bt0, a0, NIM_FALSE);
}
N_NIMCALL(void, gennew_554782_839829468)(Tcproc529021* p0, Tnode292802* e0) {
Tloc292816 a0;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
{
NI LOC3;
Tloc292816 se0;
Ropeobj178006* LOC6;
LOC3 = (NI)0;
LOC3 = len_293081_850551059(e0);
if (!(LOC3 == ((NI) 3))) goto LA4;
memset((void*)(&se0), 0, sizeof(se0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&se0));
LOC6 = (Ropeobj178006*)0;
LOC6 = rdloc_538188_839829468(se0);
rawgennew_554741_839829468(p0, a0, LOC6);
}
goto LA1;
LA4: ;
{
rawgennew_554741_839829468(p0, a0, NIM_NIL);
}
LA1: ;
gcusage_554439_839829468(e0);
}
N_NIMCALL(void, gennewfinalize_555110_839829468)(Tcproc529021* p0, Tnode292802* e0) {
Tloc292816 a0;
Tloc292816 b0;
Tloc292816 f0;
Ttype292840* reftype0;
Ttype292840* bt0;
Ropeobj178006* ti0;
TY532811 LOC1;
TY535238 LOC2;
Ttype292840* LOC3;
Ttype292840* LOC4;
Ttype292840* LOC5;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
memset((void*)(&f0), 0, sizeof(f0));
reftype0 = (Ttype292840*)0;
bt0 = (Ttype292840*)0;
ti0 = (Ropeobj178006*)0;
reftype0 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106242013440));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&f0));
initloc_532273_839829468((&b0), ((Tlockind292808) 6), a0.t, ((Tstorageloc292812) 3));
ti0 = gentypeinfo_535941_839829468((*p0).module, reftype0);
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = ti0;
LOC1[1] = rdloc_538188_839829468(f0);
addf_179205_2381377266(&(*(*p0).module).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_423), LOC1, 2);
memset((void*)LOC2, 0, sizeof(LOC2));
LOC2[0] = gettypedesc_535671_839829468((*p0).module, reftype0);
LOC2[1] = ti0;
LOC3 = (Ttype292840*)0;
LOC3 = lastson_295377_850551059(reftype0);
LOC4 = (Ttype292840*)0;
LOC4 = skiptypes_296099_850551059(LOC3, IL64(211106233624832));
LOC2[2] = gettypedesc_535671_839829468((*p0).module, LOC4);
b0.r = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_424), LOC2, 3);
genassignment_539264_839829468(p0, a0, b0, 0);
LOC5 = (Ttype292840*)0;
LOC5 = lastson_295377_850551059(reftype0);
bt0 = skiptypes_296099_850551059(LOC5, IL64(211106233624832));
genobjectinit_538242_839829468(p0, ((Tcprocsection529011) 2), bt0, a0, NIM_FALSE);
gcusage_554439_839829468(e0);
}
N_NIMCALL(void, gennewseqaux_554795_839829468)(Tcproc529021* p0, Tloc292816 dest0, Ropeobj178006* length0) {
Ttype292840* seqtype0;
TY535238 args0;
Tloc292816 call0;
seqtype0 = skiptypes_296099_850551059(dest0.t, IL64(211106242013440));
memset((void*)args0, 0, sizeof(args0));
args0[0] = gettypedesc_535671_839829468((*p0).module, seqtype0);
args0[1] = gentypeinfo_535941_839829468((*p0).module, seqtype0);
args0[2] = length0;
memset((void*)(&call0), 0, sizeof(call0));
initloc_532273_839829468((&call0), ((Tlockind292808) 6), dest0.t, ((Tstorageloc292812) 3));
{
NIM_BOOL LOC3;
TY532811 LOC15;
LOC3 = (NIM_BOOL)0;
LOC3 = (dest0.s == ((Tstorageloc292812) 3));
if (!(LOC3)) goto LA4;
LOC3 = usesnativegc_169177_2607990831();
LA4: ;
if (!LOC3) goto LA5;
{
NIM_BOOL LOC9;
TY178507 LOC12;
LOC9 = (NIM_BOOL)0;
LOC9 = canformacycle_320123_3876443242(dest0.t);
if (!LOC9) goto LA10;
memset((void*)LOC12, 0, sizeof(LOC12));
LOC12[0] = rdloc_538188_839829468(dest0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_420), LOC12, 1);
}
goto LA7;
LA10: ;
{
TY178507 LOC14;
memset((void*)LOC14, 0, sizeof(LOC14));
LOC14[0] = rdloc_538188_839829468(dest0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_255), LOC14, 1);
}
LA7: ;
call0.r = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_425), args0, 3);
memset((void*)LOC15, 0, sizeof(LOC15));
LOC15[0] = rdloc_538188_839829468(dest0);
LOC15[1] = rdloc_538188_839829468(call0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_123), LOC15, 2);
}
goto LA1;
LA5: ;
{
call0.r = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_426), args0, 3);
genassignment_539264_839829468(p0, dest0, call0, 0);
}
LA1: ;
}
N_NIMCALL(void, gennewseq_554824_839829468)(Tcproc529021* p0, Tnode292802* e0) {
Tloc292816 a0;
Tloc292816 b0;
Ropeobj178006* LOC1;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0));
LOC1 = (Ropeobj178006*)0;
LOC1 = rdloc_538188_839829468(b0);
gennewseqaux_554795_839829468(p0, a0, LOC1);
gcusage_554439_839829468(e0);
}
N_NIMCALL(void, gennewseqofcap_554836_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
Ttype292840* seqtype0;
Tloc292816 a0;
TY535238 LOC1;
Ropeobj178006* LOC2;
seqtype0 = skiptypes_296099_850551059((*e0).typ, IL64(211106242013440));
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = gettypedesc_535671_839829468((*p0).module, seqtype0);
LOC1[1] = gentypeinfo_535941_839829468((*p0).module, seqtype0);
LOC1[2] = rdloc_538188_839829468(a0);
LOC2 = (Ropeobj178006*)0;
LOC2 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_427), LOC1, 3);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC2, ((Tstorageloc292812) 0));
gcusage_554439_839829468(e0);
}
N_NIMCALL(Ropeobj178006*, getclosuretype_535683_839829468)(Tcgen529027* m0, Ttype292840* t0, Tclosuretypekind535679 kind0) {
Ropeobj178006* result0;
Intset268030 check0;
Ropeobj178006* rettype0;
Ropeobj178006* desc0;
result0 = (Ropeobj178006*)0;
memset((void*)(&check0), 0, sizeof(check0));
chckNil((void*)(&check0));
memset((void*)(&check0), 0, sizeof(check0));
initintset_268885_2627731572((&check0));
result0 = gettempname_533596_839829468(m0);
rettype0 = (Ropeobj178006*)0;
desc0 = (Ropeobj178006*)0;
genprocparams_534115_839829468(m0, t0, &rettype0, &desc0, (&check0), !((kind0 == ((Tclosuretypekind535679) 0))), NIM_FALSE);
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = isimportedtype_533449_839829468(t0);
if (!!(LOC3)) goto LA4;
{
NIM_BOOL LOC8;
TY535235 LOC12;
LOC8 = (NIM_BOOL)0;
LOC8 = !(((*t0).callconv == ((Tcallingconvention292002) 8)));
if (LOC8) goto LA9;
LOC8 = !((kind0 == ((Tclosuretypekind535679) 2)));
LA9: ;
if (!LOC8) goto LA10;
memset((void*)LOC12, 0, sizeof(LOC12));
LOC12[0] = rope_178277_2381377266(Callingconvtostr_533585_839829468[((*t0).callconv)- 0]);
LOC12[1] = rettype0;
LOC12[2] = result0;
LOC12[3] = desc0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 3))- 0], ((NimStringDesc*) &T839829468_64), LOC12, 4);
}
goto LA6;
LA10: ;
{
TY535238 LOC14;
memset((void*)LOC14, 0, sizeof(LOC14));
LOC14[0] = result0;
LOC14[1] = rettype0;
LOC14[2] = desc0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 3))- 0], ((NimStringDesc*) &T839829468_75), LOC14, 3);
}
LA6: ;
}
LA4: ;
return result0;
}
N_NIMCALL(void, gensomecast_556480_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
Tloc292816 a0;
Ttype292840* etyp0;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
etyp0 = skiptypes_296099_850551059((*e0).typ, IL64(211106233624832));
{
NIM_BOOL LOC3;
TY532811 LOC7;
Ropeobj178006* LOC8;
LOC3 = (NIM_BOOL)0;
LOC3 = ((*etyp0).kind == ((Ttypekind292244) 18) || (*etyp0).kind == ((Ttypekind292244) 17) || (*etyp0).kind == ((Ttypekind292244) 16) || (*etyp0).kind == ((Ttypekind292244) 27) || (*etyp0).kind == ((Ttypekind292244) 48) || (*etyp0).kind == ((Ttypekind292244) 4));
if (!(LOC3)) goto LA4;
LOC3 = !(((a0.flags &(1U<<((NU)(((Tlocflag292810) 0))&15U)))!=0));
LA4: ;
if (!LOC3) goto LA5;
memset((void*)LOC7, 0, sizeof(LOC7));
LOC7[0] = gettypedesc_535671_839829468((*p0).module, (*e0).typ);
LOC7[1] = addrloc_538204_839829468(a0);
LOC8 = (Ropeobj178006*)0;
LOC8 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_429), LOC7, 2);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC8, a0.s);
}
goto LA1;
LA5: ;
{
NIM_BOOL LOC10;
TY532811 LOC14;
Ropeobj178006* LOC15;
LOC10 = (NIM_BOOL)0;
LOC10 = ((*etyp0).kind == ((Ttypekind292244) 25));
if (!(LOC10)) goto LA11;
LOC10 = ((*etyp0).callconv == ((Tcallingconvention292002) 8));
LA11: ;
if (!LOC10) goto LA12;
memset((void*)LOC14, 0, sizeof(LOC14));
LOC14[0] = getclosuretype_535683_839829468((*p0).module, etyp0, ((Tclosuretypekind535679) 1));
LOC14[1] = rdcharloc_538227_839829468(a0);
LOC15 = (Ropeobj178006*)0;
LOC15 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_430), LOC14, 2);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC15, a0.s);
}
goto LA1;
LA12: ;
{
TY532811 LOC17;
Ropeobj178006* LOC18;
memset((void*)LOC17, 0, sizeof(LOC17));
LOC17[0] = gettypedesc_535671_839829468((*p0).module, (*e0).typ);
LOC17[1] = rdcharloc_538227_839829468(a0);
LOC18 = (Ropeobj178006*)0;
LOC18 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_430), LOC17, 2);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC18, a0.s);
}
LA1: ;
}
N_NIMCALL(void, unaryexprchar_551222_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, NimStringDesc* frmt0) {
Tloc292816 a0;
TY178507 LOC1;
Ropeobj178006* LOC2;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = rdcharloc_538227_839829468(a0);
LOC2 = (Ropeobj178006*)0;
LOC2 = ropecg_532407_839829468((*p0).module, frmt0, LOC1, 1);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC2, ((Tstorageloc292812) 0));
}
N_NIMCALL(void, genord_556474_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
unaryexprchar_551222_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_301));
}
N_NIMCALL(void, genarraylen_555415_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 op0) {
Tnode292802* a0;
Ttype292840* typ0;
a0 = (*e0).kindU.S6.sons->data[((NI) 1)];
{
if (!((*a0).kind == ((Tnodekind292020) 64))) goto LA3;
a0 = (*a0).kindU.S6.sons->data[((NI) 0)];
}
LA3: ;
typ0 = skiptypes_296099_850551059((*a0).typ, IL64(211106240964864));
switch ((*typ0).kind) {
case ((Ttypekind292244) 27):
case ((Ttypekind292244) 48):
{
{
if (!(op0 == ((Tmagic292524) 8))) goto LA8;
unaryexpr_551209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_431));
}
goto LA6;
LA8: ;
{
unaryexpr_551209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_432));
}
LA6: ;
}
break;
case ((Ttypekind292244) 29):
{
usestringh_532345_839829468((*p0).module);
{
if (!(op0 == ((Tmagic292524) 8))) goto LA14;
unaryexpr_551209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_433));
}
goto LA12;
LA14: ;
{
unaryexpr_551209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_434));
}
LA12: ;
}
break;
case ((Ttypekind292244) 28):
case ((Ttypekind292244) 24):
{
{
NIM_BOOL LOC20;
LOC20 = (NIM_BOOL)0;
LOC20 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC20) goto LA21;
LOC20 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA21: ;
if (!!(LOC20)) goto LA22;
{
if (!(op0 == ((Tmagic292524) 8))) goto LA26;
unaryexpr_551209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_435));
}
goto LA24;
LA26: ;
{
unaryexpr_551209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_436));
}
LA24: ;
}
goto LA18;
LA22: ;
{
{
if (!(op0 == ((Tmagic292524) 8))) goto LA32;
unaryexpr_551209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_437));
}
goto LA30;
LA32: ;
{
unaryexpr_551209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_438));
}
LA30: ;
}
LA18: ;
}
break;
case ((Ttypekind292244) 16):
case ((Ttypekind292244) 4):
{
{
NI64 LOC40;
Ropeobj178006* LOC41;
if (!(op0 == ((Tmagic292524) 8))) goto LA38;
LOC40 = (NI64)0;
LOC40 = lastord_320004_3876443242(typ0);
LOC41 = (Ropeobj178006*)0;
LOC41 = rope_178401_2381377266(LOC40);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC41, ((Tstorageloc292812) 0));
}
goto LA36;
LA38: ;
{
NI64 LOC43;
Ropeobj178006* LOC44;
LOC43 = (NI64)0;
LOC43 = lengthord_320007_3876443242(typ0);
LOC44 = (Ropeobj178006*)0;
LOC44 = rope_178401_2381377266(LOC43);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC44, ((Tstorageloc292812) 0));
}
LA36: ;
}
break;
default:
{
internalerror_196100_155036129((*e0).info, ((NimStringDesc*) &T839829468_439));
}
break;
}
}
N_NIMCALL(void, unarystmt_550527_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, NimStringDesc* frmt0) {
Tloc292816 a0;
TY178507 LOC5;
memset((void*)(&a0), 0, sizeof(a0));
{
if (!!(((*d0).k == ((Tlockind292808) 0)))) goto LA3;
internalerror_196100_155036129((*e0).info, ((NimStringDesc*) &T839829468_442));
}
LA3: ;
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = rdloc_538188_839829468(a0);
linecg_532707_839829468(p0, ((Tcprocsection529011) 2), frmt0, LOC5, 1);
}
N_NIMCALL(void, gensetlengthstr_555632_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
binarystmt_550501_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_445));
gcusage_554439_839829468(e0);
}
N_NIMCALL(void, gensetlengthseq_555500_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
Tloc292816 a0;
Tloc292816 b0;
Ttype292840* t0;
NimStringDesc* setlenpattern0;
TY535235 LOC8;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0));
t0 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106240964864));
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC3) goto LA4;
LOC3 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA4: ;
if (!!(LOC3)) goto LA5;
setlenpattern0 = copyString(((NimStringDesc*) &T839829468_446));
}
goto LA1;
LA5: ;
{
setlenpattern0 = copyString(((NimStringDesc*) &T839829468_447));
}
LA1: ;
memset((void*)LOC8, 0, sizeof(LOC8));
LOC8[0] = rdloc_538188_839829468(a0);
LOC8[1] = rdloc_538188_839829468(b0);
LOC8[2] = gettypedesc_535671_839829468((*p0).module, t0);
LOC8[3] = gettypedesc_535671_839829468((*p0).module, (*t0).sons->data[((NI) 0)]);
linecg_532707_839829468(p0, ((Tcprocsection529011) 2), setlenpattern0, LOC8, 4);
gcusage_554439_839829468(e0);
}
N_NIMCALL(Ropeobj178006*, rdsetelemloc_555662_839829468)(Tloc292816 a0, Ttype292840* settype0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
result0 = rdcharloc_538227_839829468(a0);
{
NI64 LOC3;
TY532811 LOC6;
NI64 LOC7;
LOC3 = (NI64)0;
LOC3 = firstord_320001_3876443242(settype0);
if (!!((LOC3 == IL64(0)))) goto LA4;
memset((void*)LOC6, 0, sizeof(LOC6));
LOC6[0] = result0;
LOC7 = (NI64)0;
LOC7 = firstord_320001_3876443242(settype0);
LOC6[1] = rope_178401_2381377266(LOC7);
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_448), LOC6, 2);
}
LA4: ;
return result0;
}
N_NIMCALL(void, binarystmtinexcl_555857_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, NimStringDesc* frmt0) {
Tloc292816 a0;
Tloc292816 b0;
TY532811 LOC1;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0));
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = rdloc_538188_839829468(a0);
LOC1[1] = rdsetelemloc_555662_839829468(b0, a0.t);
linef_532700_839829468(p0, ((Tcprocsection529011) 2), frmt0, LOC1, 2);
}
N_NIMCALL(void, binaryexprchar_550809_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, NimStringDesc* frmt0) {
Tloc292816 a0;
Tloc292816 b0;
TY532811 LOC1;
Ropeobj178006* LOC2;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0));
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = rdcharloc_538227_839829468(a0);
LOC1[1] = rdcharloc_538227_839829468(b0);
LOC2 = (Ropeobj178006*)0;
LOC2 = ropecg_532407_839829468((*p0).module, frmt0, LOC1, 2);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC2, ((Tstorageloc292812) 0));
}
N_NIMCALL(NIM_BOOL, fewcmps_555803_839829468)(Tnode292802* s0) {
NIM_BOOL result0;
result0 = (NIM_BOOL)0;
{
if (!!(((*s0).kind == ((Tnodekind292020) 39)))) goto LA3;
internalerror_196100_155036129((*s0).info, ((NimStringDesc*) &T839829468_463));
}
LA3: ;
{
NIM_BOOL LOC7;
NI64 LOC8;
LOC7 = (NIM_BOOL)0;
LOC8 = (NI64)0;
LOC8 = getsize_320135_3876443242((*s0).typ);
LOC7 = (LOC8 <= ((NI64) (intsize_176641_4151366050)));
if (!(LOC7)) goto LA9;
LOC7 = (((*s0).flags &(1U<<((NU)(((Tnodeflag292427) 4))&15U)))!=0);
LA9: ;
if (!LOC7) goto LA10;
result0 = NIM_FALSE;
}
goto LA5;
LA10: ;
{
Ttype292840* LOC13;
LOC13 = (Ttype292840*)0;
LOC13 = elemtype_320394_3876443242((*s0).typ);
if (!((*LOC13).kind == ((Ttypekind292244) 31) || (*LOC13).kind >= ((Ttypekind292244) 33) && (*LOC13).kind <= ((Ttypekind292244) 35))) goto LA14;
result0 = NIM_TRUE;
}
goto LA5;
LA14: ;
{
NI LOC17;
LOC17 = (NI)0;
LOC17 = sonslen_295351_850551059(s0);
result0 = (LOC17 <= ((NI) 8));
}
LA5: ;
return result0;
}
N_NIMCALL(void, binaryexprin_555837_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* a0, Tloc292816* b0, Tloc292816* d0, NimStringDesc* frmt0) {
TY532811 LOC1;
Ropeobj178006* LOC2;
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = rdloc_538188_839829468((*a0));
LOC1[1] = rdsetelemloc_555662_839829468((*b0), (*a0).t);
LOC2 = (Ropeobj178006*)0;
LOC2 = HEX25_178905_2381377266(frmt0, LOC1, 2);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC2, ((Tstorageloc292812) 0));
}
N_NIMCALL(void, geninexpraux_553496_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* a0, Tloc292816* b0, Tloc292816* d0) {
Ttype292840* LOC1;
NI64 LOC2;
LOC1 = (Ttype292840*)0;
LOC1 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106240964864));
LOC2 = (NI64)0;
LOC2 = getsize_320135_3876443242(LOC1);
switch (((NI) (LOC2))) {
case ((NI) 1):
{
binaryexprin_555837_839829468(p0, e0, a0, b0, d0, ((NimStringDesc*) &T839829468_467));
}
break;
case ((NI) 2):
{
binaryexprin_555837_839829468(p0, e0, a0, b0, d0, ((NimStringDesc*) &T839829468_468));
}
break;
case ((NI) 4):
{
binaryexprin_555837_839829468(p0, e0, a0, b0, d0, ((NimStringDesc*) &T839829468_469));
}
break;
case ((NI) 8):
{
binaryexprin_555837_839829468(p0, e0, a0, b0, d0, ((NimStringDesc*) &T839829468_470));
}
break;
default:
{
binaryexprin_555837_839829468(p0, e0, a0, b0, d0, ((NimStringDesc*) &T839829468_471));
}
break;
}
}
N_NIMCALL(void, geninop_556009_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
Tloc292816 a0;
Tloc292816 b0;
Tloc292816 x0;
Tloc292816 y0;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
memset((void*)(&x0), 0, sizeof(x0));
memset((void*)(&y0), 0, sizeof(y0));
{
NIM_BOOL LOC3;
Tnode292802* ea0;
NI length0;
LOC3 = (NIM_BOOL)0;
LOC3 = ((*(*e0).kindU.S6.sons->data[((NI) 1)]).kind == ((Tnodekind292020) 39));
if (!(LOC3)) goto LA4;
LOC3 = fewcmps_555803_839829468((*e0).kindU.S6.sons->data[((NI) 1)]);
LA4: ;
if (!LOC3) goto LA5;
{
if (!((*(*e0).kindU.S6.sons->data[((NI) 2)]).kind == ((Tnodekind292020) 70) || (*(*e0).kindU.S6.sons->data[((NI) 2)]).kind == ((Tnodekind292020) 69))) goto LA9;
ea0 = (*(*e0).kindU.S6.sons->data[((NI) 2)]).kindU.S6.sons->data[((NI) 0)];
}
goto LA7;
LA9: ;
{
ea0 = (*e0).kindU.S6.sons->data[((NI) 2)];
}
LA7: ;
initlocexpr_539283_839829468(p0, ea0, (&a0));
initloc_532273_839829468((&b0), ((Tlockind292808) 6), (*e0).typ, ((Tstorageloc292812) 0));
b0.r = rope_178277_2381377266(((NimStringDesc*) &T839829468_118));
length0 = sonslen_295351_850551059((*e0).kindU.S6.sons->data[((NI) 1)]);
{
NI i_556061_839829468;
NI HEX3Atmp_556412_839829468;
NI res_556415_839829468;
i_556061_839829468 = (NI)0;
HEX3Atmp_556412_839829468 = (NI)0;
HEX3Atmp_556412_839829468 = (NI)(length0 - ((NI) 1));
res_556415_839829468 = ((NI) 0);
{
while (1) {
if (!(res_556415_839829468 <= HEX3Atmp_556412_839829468)) goto LA14;
i_556061_839829468 = res_556415_839829468;
{
TY535238 LOC19;
if (!((*(*(*e0).kindU.S6.sons->data[((NI) 1)]).kindU.S6.sons->data[i_556061_839829468]).kind == ((Tnodekind292020) 44))) goto LA17;
initlocexpr_539283_839829468(p0, (*(*(*e0).kindU.S6.sons->data[((NI) 1)]).kindU.S6.sons->data[i_556061_839829468]).kindU.S6.sons->data[((NI) 0)], (&x0));
initlocexpr_539283_839829468(p0, (*(*(*e0).kindU.S6.sons->data[((NI) 1)]).kindU.S6.sons->data[i_556061_839829468]).kindU.S6.sons->data[((NI) 1)], (&y0));
memset((void*)LOC19, 0, sizeof(LOC19));
LOC19[0] = rdcharloc_538227_839829468(a0);
LOC19[1] = rdcharloc_538227_839829468(x0);
LOC19[2] = rdcharloc_538227_839829468(y0);
addf_179205_2381377266(&b0.r, ((NimStringDesc*) &T839829468_464), LOC19, 3);
}
goto LA15;
LA17: ;
{
TY532811 LOC21;
initlocexpr_539283_839829468(p0, (*(*e0).kindU.S6.sons->data[((NI) 1)]).kindU.S6.sons->data[i_556061_839829468], (&x0));
memset((void*)LOC21, 0, sizeof(LOC21));
LOC21[0] = rdcharloc_538227_839829468(a0);
LOC21[1] = rdcharloc_538227_839829468(x0);
addf_179205_2381377266(&b0.r, ((NimStringDesc*) &T839829468_465), LOC21, 2);
}
LA15: ;
{
if (!(i_556061_839829468 < (NI)(length0 - ((NI) 1)))) goto LA24;
add_178487_2381377266(&b0.r, ((NimStringDesc*) &T839829468_466));
}
LA24: ;
res_556415_839829468 += ((NI) 1);
} LA14: ;
}
}
add_178487_2381377266(&b0.r, ((NimStringDesc*) &T839829468_117));
putintodest_550468_839829468(p0, d0, (*e0).typ, b0.r, ((Tstorageloc292812) 0));
}
goto LA1;
LA5: ;
{
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0));
geninexpraux_553496_839829468(p0, e0, (&a0), (&b0), d0);
}
LA1: ;
}
N_NIMCALL(void, gensetop_556419_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 op0) {
Tloc292816 a0;
Tloc292816 b0;
Tloc292816 i0;
Ttype292840* settype0;
NI size0;
NI64 LOC1;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
memset((void*)(&i0), 0, sizeof(i0));
settype0 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106240964864));
LOC1 = (NI64)0;
LOC1 = getsize_320135_3876443242(settype0);
size0 = ((NI) (LOC1));
switch (size0) {
case ((NI) 1):
case ((NI) 2):
case ((NI) 4):
case ((NI) 8):
{
switch (op0) {
case ((Tmagic292524) 39):
{
NimStringDesc* ts0;
NimStringDesc* LOC4;
NimStringDesc* LOC5;
NimStringDesc* LOC6;
LOC4 = (NimStringDesc*)0;
LOC5 = (NimStringDesc*)0;
LOC5 = nimIntToStr((NI)(size0 * ((NI) 8)));
LOC4 = rawNewString(LOC5->Sup.len + 2);
appendString(LOC4, ((NimStringDesc*) &T839829468_45));
appendString(LOC4, LOC5);
ts0 = LOC4;
LOC6 = (NimStringDesc*)0;
LOC6 = rawNewString(ts0->Sup.len + ts0->Sup.len + 35);
appendString(LOC6, ((NimStringDesc*) &T839829468_449));
appendString(LOC6, ts0);
appendString(LOC6, ((NimStringDesc*) &T839829468_450));
appendString(LOC6, ts0);
appendString(LOC6, ((NimStringDesc*) &T839829468_451));
binarystmtinexcl_555857_839829468(p0, e0, d0, LOC6);
}
break;
case ((Tmagic292524) 40):
{
NimStringDesc* ts0;
NimStringDesc* LOC8;
NimStringDesc* LOC9;
NimStringDesc* LOC10;
LOC8 = (NimStringDesc*)0;
LOC9 = (NimStringDesc*)0;
LOC9 = nimIntToStr((NI)(size0 * ((NI) 8)));
LOC8 = rawNewString(LOC9->Sup.len + 2);
appendString(LOC8, ((NimStringDesc*) &T839829468_45));
appendString(LOC8, LOC9);
ts0 = LOC8;
LOC10 = (NimStringDesc*)0;
LOC10 = rawNewString(ts0->Sup.len + ts0->Sup.len + 42);
appendString(LOC10, ((NimStringDesc*) &T839829468_452));
appendString(LOC10, ts0);
appendString(LOC10, ((NimStringDesc*) &T839829468_453));
appendString(LOC10, ts0);
appendString(LOC10, ((NimStringDesc*) &T839829468_454));
binarystmtinexcl_555857_839829468(p0, e0, d0, LOC10);
}
break;
case ((Tmagic292524) 41):
{
{
if (!(size0 <= ((NI) 4))) goto LA14;
unaryexprchar_551222_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_455));
}
goto LA12;
LA14: ;
{
unaryexprchar_551222_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_456));
}
LA12: ;
}
break;
case ((Tmagic292524) 133):
{
binaryexprchar_550809_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_457));
}
break;
case ((Tmagic292524) 132):
{
binaryexprchar_550809_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_458));
}
break;
case ((Tmagic292524) 131):
{
binaryexpr_550549_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_341));
}
break;
case ((Tmagic292524) 134):
{
binaryexpr_550549_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_459));
}
break;
case ((Tmagic292524) 135):
{
binaryexpr_550549_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_460));
}
break;
case ((Tmagic292524) 136):
{
binaryexpr_550549_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_461));
}
break;
case ((Tmagic292524) 137):
{
binaryexpr_550549_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_462));
}
break;
case ((Tmagic292524) 148):
{
geninop_556009_839829468(p0, e0, d0);
}
break;
default:
{
internalerror_196100_155036129((*e0).info, ((NimStringDesc*) &T839829468_472));
}
break;
}
}
break;
default:
{
switch (op0) {
case ((Tmagic292524) 39):
{
binarystmtinexcl_555857_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_473));
}
break;
case ((Tmagic292524) 40):
{
binarystmtinexcl_555857_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_474));
}
break;
case ((Tmagic292524) 41):
{
NimStringDesc* LOC30;
NimStringDesc* LOC31;
LOC30 = (NimStringDesc*)0;
LOC31 = (NimStringDesc*)0;
LOC31 = nimIntToStr(size0);
LOC30 = rawNewString(LOC31->Sup.len + 14);
appendString(LOC30, ((NimStringDesc*) &T839829468_475));
appendString(LOC30, LOC31);
appendChar(LOC30, 41);
unaryexprchar_551222_839829468(p0, e0, d0, LOC30);
}
break;
case ((Tmagic292524) 133):
case ((Tmagic292524) 132):
{
Ttype292840* LOC33;
TY536475 LOC39;
LOC33 = (Ttype292840*)0;
LOC33 = getsystype_338150_3937434831(((Ttypekind292244) 31));
gettemp_537032_839829468(p0, LOC33, (&i0), NIM_FALSE);
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0));
{
Ttype292840* LOC38;
if (!((*d0).k == ((Tlockind292808) 0))) goto LA36;
LOC38 = (Ttype292840*)0;
LOC38 = getsystype_338150_3937434831(((Ttypekind292244) 1));
gettemp_537032_839829468(p0, LOC38, d0, NIM_FALSE);
}
LA36: ;
memset((void*)LOC39, 0, sizeof(LOC39));
LOC39[0] = rdloc_538188_839829468(i0);
LOC39[1] = rope_178401_2381377266(((NI64) (size0)));
LOC39[2] = rdloc_538188_839829468((*d0));
LOC39[3] = rdloc_538188_839829468(a0);
LOC39[4] = rdloc_538188_839829468(b0);
linef_532700_839829468(p0, ((Tcprocsection529011) 2), lookupopr_556426_839829468[(op0)- 132], LOC39, 5);
}
break;
case ((Tmagic292524) 131):
{
NimStringDesc* LOC41;
NimStringDesc* LOC42;
usestringh_532345_839829468((*p0).module);
LOC41 = (NimStringDesc*)0;
LOC42 = (NimStringDesc*)0;
LOC42 = nimIntToStr(size0);
LOC41 = rawNewString(LOC42->Sup.len + 21);
appendString(LOC41, ((NimStringDesc*) &T839829468_481));
appendString(LOC41, LOC42);
appendString(LOC41, ((NimStringDesc*) &T839829468_482));
binaryexprchar_550809_839829468(p0, e0, d0, LOC41);
}
break;
case ((Tmagic292524) 134):
case ((Tmagic292524) 135):
case ((Tmagic292524) 136):
case ((Tmagic292524) 137):
{
Ttype292840* LOC44;
TY536847 LOC49;
LOC44 = (Ttype292840*)0;
LOC44 = getsystype_338150_3937434831(((Ttypekind292244) 31));
gettemp_537032_839829468(p0, LOC44, (&i0), NIM_FALSE);
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0));
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA47;
gettemp_537032_839829468(p0, a0.t, d0, NIM_FALSE);
}
LA47: ;
memset((void*)LOC49, 0, sizeof(LOC49));
LOC49[0] = rdloc_538188_839829468(i0);
LOC49[1] = rope_178401_2381377266(((NI64) (size0)));
LOC49[2] = rdloc_538188_839829468((*d0));
LOC49[3] = rdloc_538188_839829468(a0);
LOC49[4] = rdloc_538188_839829468(b0);
LOC49[5] = rope_178277_2381377266(lookupopr_556426_839829468[(op0)- 132]);
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_483), LOC49, 6);
}
break;
case ((Tmagic292524) 148):
{
geninop_556009_839829468(p0, e0, d0);
}
break;
default:
{
internalerror_196100_155036129((*e0).info, ((NimStringDesc*) &T839829468_484));
}
break;
}
}
break;
}
}
static N_INLINE(Ropeobj178006*, genargstringtocstring_539776_839829468)(Tcproc529021* p0, Tnode292802* n0) {
Ropeobj178006* result0;
Tloc292816 a0;
TY178507 LOC1;
result0 = (Ropeobj178006*)0;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0));
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = rdloc_538188_839829468(a0);
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_485), LOC1, 1);
return result0;
}
N_NIMCALL(Ropeobj178006*, openarrayloc_539665_839829468)(Tcproc529021* p0, Tnode292802* n0) {
Ropeobj178006* result0;
Tloc292816 a0;
Tnode292802* q0;
result0 = (Ropeobj178006*)0;
memset((void*)(&a0), 0, sizeof(a0));
q0 = skipconv_328882_3876443242(n0);
{
Tmagic292524 LOC3;
Tloc292816 b0;
Tloc292816 c0;
Tnode292802* LOC6;
Tnode292802* LOC7;
Tnode292802* LOC8;
NimStringDesc* fmt0;
Ttype292840* LOC9;
TY535238 LOC25;
LOC3 = (Tmagic292524)0;
LOC3 = getmagic_318502_2616423590(q0);
if (!(LOC3 == ((Tmagic292524) 139))) goto LA4;
memset((void*)(&b0), 0, sizeof(b0));
memset((void*)(&c0), 0, sizeof(c0));
LOC6 = (Tnode292802*)0;
LOC6 = HEX5BHEX5D_293238_850551059(q0, ((NI) 1));
initlocexpr_539283_839829468(p0, LOC6, (&a0));
LOC7 = (Tnode292802*)0;
LOC7 = HEX5BHEX5D_293238_850551059(q0, ((NI) 2));
initlocexpr_539283_839829468(p0, LOC7, (&b0));
LOC8 = (Tnode292802*)0;
LOC8 = HEX5BHEX5D_293238_850551059(q0, ((NI) 3));
initlocexpr_539283_839829468(p0, LOC8, (&c0));
LOC9 = (Ttype292840*)0;
LOC9 = skiptypes_296099_850551059(a0.t, IL64(211106243062016));
switch ((*LOC9).kind) {
case ((Ttypekind292244) 27):
case ((Ttypekind292244) 48):
case ((Ttypekind292244) 16):
case ((Ttypekind292244) 4):
{
fmt0 = copyString(((NimStringDesc*) &T839829468_486));
}
break;
case ((Ttypekind292244) 28):
case ((Ttypekind292244) 24):
{
{
NIM_BOOL LOC14;
Ttype292840* LOC15;
NIM_BOOL LOC17;
LOC14 = (NIM_BOOL)0;
LOC15 = (Ttype292840*)0;
LOC15 = skiptypes_296099_850551059((*n0).typ, IL64(211106232576256));
LOC14 = ((*LOC15).kind == ((Ttypekind292244) 23));
if (!(LOC14)) goto LA16;
LOC17 = (NIM_BOOL)0;
LOC17 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC17) goto LA18;
LOC17 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA18: ;
LOC14 = !(LOC17);
LA16: ;
if (!LOC14) goto LA19;
fmt0 = copyString(((NimStringDesc*) &T839829468_487));
}
goto LA12;
LA19: ;
{
fmt0 = copyString(((NimStringDesc*) &T839829468_488));
}
LA12: ;
}
break;
default:
{
NimStringDesc* LOC23;
NimStringDesc* LOC24;
LOC23 = (NimStringDesc*)0;
LOC24 = (NimStringDesc*)0;
LOC24 = typetostring_320017_3876443242(a0.t, ((Tprefereddesc320011) 0));
LOC23 = rawNewString(LOC24->Sup.len + 14);
appendString(LOC23, ((NimStringDesc*) &T839829468_489));
appendString(LOC23, LOC24);
internalerror_196113_155036129(LOC23);
fmt0 = copyString(((NimStringDesc*) &T839829468_490));
}
break;
}
memset((void*)LOC25, 0, sizeof(LOC25));
LOC25[0] = rdloc_538188_839829468(a0);
LOC25[1] = rdloc_538188_839829468(b0);
LOC25[2] = rdloc_538188_839829468(c0);
result0 = HEX25_178905_2381377266(fmt0, LOC25, 3);
}
goto LA1;
LA4: ;
{
Ttype292840* LOC27;
initlocexpr_539283_839829468(p0, n0, (&a0));
LOC27 = (Ttype292840*)0;
LOC27 = skiptypes_296099_850551059(a0.t, IL64(211106240964864));
switch ((*LOC27).kind) {
case ((Ttypekind292244) 27):
case ((Ttypekind292244) 48):
{
TY178507 LOC29;
memset((void*)LOC29, 0, sizeof(LOC29));
LOC29[0] = rdloc_538188_839829468(a0);
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_378), LOC29, 1);
}
break;
case ((Ttypekind292244) 28):
case ((Ttypekind292244) 24):
{
{
NIM_BOOL LOC33;
Ttype292840* LOC34;
NIM_BOOL LOC36;
TY532811 LOC40;
LOC33 = (NIM_BOOL)0;
LOC34 = (Ttype292840*)0;
LOC34 = skiptypes_296099_850551059((*n0).typ, IL64(211106232576256));
LOC33 = ((*LOC34).kind == ((Ttypekind292244) 23));
if (!(LOC33)) goto LA35;
LOC36 = (NIM_BOOL)0;
LOC36 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC36) goto LA37;
LOC36 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA37: ;
LOC33 = !(LOC36);
LA35: ;
if (!LOC33) goto LA38;
memset((void*)LOC40, 0, sizeof(LOC40));
LOC40[0] = rdloc_538188_839829468(a0);
LOC40[1] = lenfield_539305_839829468(p0);
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_491), LOC40, 2);
}
goto LA31;
LA38: ;
{
TY532811 LOC42;
memset((void*)LOC42, 0, sizeof(LOC42));
LOC42[0] = rdloc_538188_839829468(a0);
LOC42[1] = lenfield_539305_839829468(p0);
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_379), LOC42, 2);
}
LA31: ;
}
break;
case ((Ttypekind292244) 16):
case ((Ttypekind292244) 4):
{
TY532811 LOC44;
NI64 LOC45;
memset((void*)LOC44, 0, sizeof(LOC44));
LOC44[0] = rdloc_538188_839829468(a0);
LOC45 = (NI64)0;
LOC45 = lengthord_320007_3876443242(a0.t);
LOC44[1] = rope_178401_2381377266(LOC45);
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_380), LOC44, 2);
}
break;
case ((Ttypekind292244) 21):
case ((Ttypekind292244) 22):
{
Ttype292840* LOC47;
LOC47 = (Ttype292840*)0;
LOC47 = lastson_295377_850551059(a0.t);
switch ((*LOC47).kind) {
case ((Ttypekind292244) 28):
case ((Ttypekind292244) 24):
{
TY532811 LOC49;
memset((void*)LOC49, 0, sizeof(LOC49));
LOC49[0] = rdloc_538188_839829468(a0);
LOC49[1] = lenfield_539305_839829468(p0);
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_491), LOC49, 2);
}
break;
case ((Ttypekind292244) 16):
case ((Ttypekind292244) 4):
{
TY532811 LOC51;
Ttype292840* LOC52;
NI64 LOC53;
memset((void*)LOC51, 0, sizeof(LOC51));
LOC51[0] = rdloc_538188_839829468(a0);
LOC52 = (Ttype292840*)0;
LOC52 = lastson_295377_850551059(a0.t);
LOC53 = (NI64)0;
LOC53 = lengthord_320007_3876443242(LOC52);
LOC51[1] = rope_178401_2381377266(LOC53);
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_380), LOC51, 2);
}
break;
default:
{
NimStringDesc* LOC55;
NimStringDesc* LOC56;
LOC55 = (NimStringDesc*)0;
LOC56 = (NimStringDesc*)0;
LOC56 = typetostring_320017_3876443242(a0.t, ((Tprefereddesc320011) 0));
LOC55 = rawNewString(LOC56->Sup.len + 14);
appendString(LOC55, ((NimStringDesc*) &T839829468_489));
appendString(LOC55, LOC56);
internalerror_196113_155036129(LOC55);
}
break;
}
}
break;
default:
{
NimStringDesc* LOC58;
NimStringDesc* LOC59;
LOC58 = (NimStringDesc*)0;
LOC59 = (NimStringDesc*)0;
LOC59 = typetostring_320017_3876443242(a0.t, ((Tprefereddesc320011) 0));
LOC58 = rawNewString(LOC59->Sup.len + 14);
appendString(LOC58, ((NimStringDesc*) &T839829468_489));
appendString(LOC58, LOC59);
internalerror_196113_155036129(LOC58);
}
break;
}
}
LA1: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, genarg_539787_839829468)(Tcproc529021* p0, Tnode292802* n_539790_839829468, Tsym292834* param0, Tnode292802* call0) {
Ropeobj178006* result0;
Tloc292816 a0;
result0 = (Ropeobj178006*)0;
memset((void*)(&a0), 0, sizeof(a0));
{
if (!((*n_539790_839829468).kind == ((Tnodekind292020) 71))) goto LA3;
result0 = genargstringtocstring_539776_839829468(p0, n_539790_839829468);
}
goto LA1;
LA3: ;
{
Ttype292840* LOC6;
Tnode292802* n0;
LOC6 = (Ttype292840*)0;
LOC6 = skiptypes_296099_850551059((*param0).typ, IL64(211106240964864));
if (!((*LOC6).kind == ((Ttypekind292244) 27) || (*LOC6).kind == ((Ttypekind292244) 48))) goto LA7;
{
if (!!(((*n_539790_839829468).kind == ((Tnodekind292020) 64)))) goto LA11;
n0 = n_539790_839829468;
}
goto LA9;
LA11: ;
{
n0 = (*n_539790_839829468).kindU.S6.sons->data[((NI) 0)];
}
LA9: ;
result0 = openarrayloc_539665_839829468(p0, n0);
}
goto LA1;
LA7: ;
{
NIM_BOOL LOC15;
LOC15 = (NIM_BOOL)0;
LOC15 = ccgintroducedptr_533609_839829468(param0);
if (!LOC15) goto LA16;
initlocexpr_539283_839829468(p0, n_539790_839829468, (&a0));
result0 = addrloc_538204_839829468(a0);
}
goto LA1;
LA16: ;
{
NIM_BOOL LOC19;
NIM_BOOL LOC20;
NIM_BOOL LOC21;
Tnode292802* callee0;
LOC19 = (NIM_BOOL)0;
LOC20 = (NIM_BOOL)0;
LOC21 = (NIM_BOOL)0;
LOC21 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC21) goto LA22;
LOC21 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA22: ;
LOC20 = LOC21;
if (!(LOC20)) goto LA23;
LOC20 = ((*(*param0).typ).kind == ((Ttypekind292244) 23));
LA23: ;
LOC19 = LOC20;
if (!(LOC19)) goto LA24;
LOC19 = ((*n_539790_839829468).kind == ((Tnodekind292020) 64));
LA24: ;
if (!LOC19) goto LA25;
initlocexprsingleuse_539289_839829468(p0, (*n_539790_839829468).kindU.S6.sons->data[((NI) 0)], (&a0));
callee0 = (*call0).kindU.S6.sons->data[((NI) 0)];
{
NIM_BOOL LOC29;
NIM_BOOL LOC30;
LOC29 = (NIM_BOOL)0;
LOC30 = (NIM_BOOL)0;
LOC30 = ((*callee0).kind == ((Tnodekind292020) 3));
if (!(LOC30)) goto LA31;
LOC30 = ((134283296 & (*(*callee0).kindU.S4.sym).flags) == 32);
LA31: ;
LOC29 = LOC30;
if (!(LOC29)) goto LA32;
LOC29 = !(((72 & (*(*callee0).kindU.S4.sym).loc.flags) == 0));
LA32: ;
if (!LOC29) goto LA33;
result0 = addrloc_538204_839829468(a0);
}
goto LA27;
LA33: ;
{
result0 = rdloc_538188_839829468(a0);
}
LA27: ;
}
goto LA1;
LA25: ;
{
initlocexprsingleuse_539289_839829468(p0, n_539790_839829468, (&a0));
result0 = rdloc_538188_839829468(a0);
}
LA1: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, genargnoparam_539938_839829468)(Tcproc529021* p0, Tnode292802* n0) {
Ropeobj178006* result0;
Tloc292816 a0;
result0 = (Ropeobj178006*)0;
memset((void*)(&a0), 0, sizeof(a0));
{
if (!((*n0).kind == ((Tnodekind292020) 71))) goto LA3;
result0 = genargstringtocstring_539776_839829468(p0, n0);
}
goto LA1;
LA3: ;
{
initlocexprsingleuse_539289_839829468(p0, n0, (&a0));
result0 = rdloc_538188_839829468(a0);
}
LA1: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, getrawproctype_540459_839829468)(Tcproc529021* p0, Ttype292840* t0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
result0 = getclosuretype_535683_839829468((*p0).module, t0, ((Tclosuretypekind535679) 0));
return result0;
}
N_NIMCALL(NIM_BOOL, leftappearsonrightside_539329_839829468)(Tnode292802* le0, Tnode292802* ri0) {
NIM_BOOL result0;
{ result0 = (NIM_BOOL)0;
{
if (!!((le0 == NIM_NIL))) goto LA3;
{
NI i_539364_839829468;
NI HEX3Atmp_539376_839829468;
NI LOC6;
NI res_539379_839829468;
i_539364_839829468 = (NI)0;
HEX3Atmp_539376_839829468 = (NI)0;
LOC6 = (NI)0;
LOC6 = len_293081_850551059(ri0);
HEX3Atmp_539376_839829468 = (LOC6 - 1);
res_539379_839829468 = ((NI) 1);
{
while (1) {
Tnode292802* r0;
if (!(res_539379_839829468 <= HEX3Atmp_539376_839829468)) goto LA8;
i_539364_839829468 = res_539379_839829468;
r0 = HEX5BHEX5D_293238_850551059(ri0, i_539364_839829468);
{
Tanalysisresult473003 LOC11;
LOC11 = (Tanalysisresult473003)0;
LOC11 = ispartof_473340_788060399(le0, r0);
if (!!((LOC11 == ((Tanalysisresult473003) 0)))) goto LA12;
result0 = NIM_TRUE;
goto BeforeRet;
}
LA12: ;
res_539379_839829468 += ((NI) 1);
} LA8: ;
}
}
}
LA3: ;
}BeforeRet: ;
return result0;
}
static N_INLINE(NIM_BOOL, hasnoinit_539383_839829468)(Tnode292802* call0) {
NIM_BOOL result0;
NIM_BOOL LOC1;
result0 = (NIM_BOOL)0;
LOC1 = (NIM_BOOL)0;
LOC1 = ((*(*call0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 3));
if (!(LOC1)) goto LA2;
LOC1 = (((*(*(*call0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).flags &(1U<<((NU)(((Tsymflag292184) 12))&31U)))!=0);
LA2: ;
result0 = LOC1;
return result0;
}
N_NIMCALL(void, resetloc_538350_839829468)(Tcproc529021* p0, Tloc292816* loc0) {
NIM_BOOL containsgcref0;
Ttype292840* typ0;
{ containsgcref0 = containsgarbagecollectedref_320117_3876443242((*loc0).t);
typ0 = skiptypes_296099_850551059((*loc0).t, IL64(211106242013440));
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = isimportedcpptype_533476_839829468(typ0);
if (!LOC3) goto LA4;
goto BeforeRet;
}
LA4: ;
{
NIM_BOOL LOC8;
LOC8 = (NIM_BOOL)0;
LOC8 = iscomplexvaluetype_538317_839829468(typ0);
if (!!(LOC8)) goto LA9;
{
Tloc292816 nilloc0;
if (!containsgcref0) goto LA13;
memset((void*)(&nilloc0), 0, sizeof(nilloc0));
initloc_532273_839829468((&nilloc0), ((Tlockind292808) 1), (*loc0).t, ((Tstorageloc292812) 2));
nilloc0.r = rope_178277_2381377266(((NimStringDesc*) &T839829468_174));
genrefassign_538311_839829468(p0, (*loc0), nilloc0, 8);
}
goto LA11;
LA13: ;
{
TY178507 LOC16;
memset((void*)LOC16, 0, sizeof(LOC16));
LOC16[0] = rdloc_538188_839829468((*loc0));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_494), LOC16, 1);
}
LA11: ;
}
goto LA6;
LA9: ;
{
{
TY178507 LOC22;
if (!(((*p0).options &(1U<<((NU)(((Toption169009) 6))&31U)))!=0)) goto LA20;
memset((void*)LOC22, 0, sizeof(LOC22));
LOC22[0] = addrloc_538204_839829468((*loc0));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_495), LOC22, 1);
}
LA20: ;
{
TY532811 LOC27;
if (!!(((*loc0).s == ((Tstorageloc292812) 2)))) goto LA25;
memset((void*)LOC27, 0, sizeof(LOC27));
LOC27[0] = addrloc_538204_839829468((*loc0));
LOC27[1] = gentypeinfo_535941_839829468((*p0).module, (*loc0).t);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_496), LOC27, 2);
genobjectinit_538242_839829468(p0, ((Tcprocsection529011) 2), (*loc0).t, (*loc0), NIM_TRUE);
}
goto LA23;
LA25: ;
{
TY532811 LOC29;
usestringh_532345_839829468((*p0).module);
memset((void*)LOC29, 0, sizeof(LOC29));
LOC29[0] = addrloc_538204_839829468((*loc0));
LOC29[1] = rdloc_538188_839829468((*loc0));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_152), LOC29, 2);
genobjectinit_538242_839829468(p0, ((Tcprocsection529011) 2), (*loc0).t, (*loc0), NIM_TRUE);
}
LA23: ;
}
LA6: ;
}BeforeRet: ;
}
N_NIMCALL(Ropeobj178006*, addcomma_540464_839829468)(Ropeobj178006* r0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
{
if (!(r0 == NIM_NIL)) goto LA3;
result0 = r0;
}
goto LA1;
LA3: ;
{
TY533289 LOC6;
Ropeobj178006* LOC7;
memset((void*)LOC6, 0, sizeof(LOC6));
LOC7 = (Ropeobj178006*)0;
LOC7 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_110), LOC6, 0);
result0 = HEX26_178418_2381377266(r0, LOC7);
}
LA1: ;
return result0;
}
N_NIMCALL(void, genclosurecall_540452_839829468)(Tcproc529021* p0, Tnode292802* le0, Tnode292802* ri0, Tloc292816* d0) {
Tloc292816 op0;
Ropeobj178006* pl0;
Ttype292840* typ0;
NI length0;
Ropeobj178006* rawproc0;
NimStringDesc* callpattern0;
memset((void*)(&op0), 0, sizeof(op0));
initlocexpr_539283_839829468(p0, (*ri0).kindU.S6.sons->data[((NI) 0)], (&op0));
pl0 = (Ropeobj178006*)0;
typ0 = skiptypes_296099_850551059((*(*ri0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256));
length0 = sonslen_295351_850551059(ri0);
{
NI i_540613_839829468;
NI HEX3Atmp_541214_839829468;
NI res_541217_839829468;
i_540613_839829468 = (NI)0;
HEX3Atmp_541214_839829468 = (NI)0;
HEX3Atmp_541214_839829468 = (NI)(length0 - ((NI) 1));
res_541217_839829468 = ((NI) 1);
{
while (1) {
if (!(res_541217_839829468 <= HEX3Atmp_541214_839829468)) goto LA3;
i_540613_839829468 = res_541217_839829468;
{
NI LOC6;
Tnode292802* paramtype0;
LOC6 = (NI)0;
LOC6 = sonslen_295327_850551059(typ0);
if (!(i_540613_839829468 < LOC6)) goto LA7;
paramtype0 = (*(*typ0).n).kindU.S6.sons->data[i_540613_839829468];
{
NIM_BOOL LOC11;
Ropeobj178006* LOC20;
LOC11 = (NIM_BOOL)0;
LOC11 = iscompiletimeonly_328706_3876443242((*paramtype0).typ);
if (!!(LOC11)) goto LA12;
{
TY533289 LOC18;
Ropeobj178006* LOC19;
if (!!((pl0 == NIM_NIL))) goto LA16;
memset((void*)LOC18, 0, sizeof(LOC18));
LOC19 = (Ropeobj178006*)0;
LOC19 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_110), LOC18, 0);
add_178482_2381377266(&pl0, LOC19);
}
LA16: ;
LOC20 = (Ropeobj178006*)0;
LOC20 = genarg_539787_839829468(p0, (*ri0).kindU.S6.sons->data[i_540613_839829468], (*paramtype0).kindU.S4.sym, ri0);
add_178482_2381377266(&pl0, LOC20);
}
LA12: ;
}
goto LA4;
LA7: ;
{
Ropeobj178006* LOC28;
{
TY533289 LOC26;
Ropeobj178006* LOC27;
if (!!((pl0 == NIM_NIL))) goto LA24;
memset((void*)LOC26, 0, sizeof(LOC26));
LOC27 = (Ropeobj178006*)0;
LOC27 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_110), LOC26, 0);
add_178482_2381377266(&pl0, LOC27);
}
LA24: ;
LOC28 = (Ropeobj178006*)0;
LOC28 = genargnoparam_539938_839829468(p0, (*ri0).kindU.S6.sons->data[i_540613_839829468]);
add_178482_2381377266(&pl0, LOC28);
}
LA4: ;
res_541217_839829468 += ((NI) 1);
} LA3: ;
}
}
rawproc0 = getrawproctype_540459_839829468(p0, typ0);
{
if (!(((*typ0).flags &(1U<<((NU)(((Ttypeflag292431) 14))&31U)))!=0)) goto LA31;
callpattern0 = copyString(((NimStringDesc*) &T839829468_492));
}
goto LA29;
LA31: ;
{
callpattern0 = copyString(((NimStringDesc*) &T839829468_493));
}
LA29: ;
{
if (!!(((*typ0).sons->data[((NI) 0)] == NIM_NIL))) goto LA36;
{
NIM_BOOL LOC40;
LOC40 = (NIM_BOOL)0;
LOC40 = isinvalidreturntype_533548_839829468((*typ0).sons->data[((NI) 0)]);
if (!LOC40) goto LA41;
{
NI LOC45;
TY533289 LOC48;
Ropeobj178006* LOC49;
LOC45 = (NI)0;
LOC45 = sonslen_295351_850551059(ri0);
if (!(((NI) 1) < LOC45)) goto LA46;
memset((void*)LOC48, 0, sizeof(LOC48));
LOC49 = (Ropeobj178006*)0;
LOC49 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_110), LOC48, 0);
add_178482_2381377266(&pl0, LOC49);
}
LA46: ;
{
NIM_BOOL LOC52;
NIM_BOOL LOC54;
Ropeobj178006* LOC67;
NimStringDesc* LOC68;
TY535235 LOC69;
LOC52 = (NIM_BOOL)0;
LOC52 = ((3 &(1U<<((NU)((*d0).k)&15U)))!=0);
if (LOC52) goto LA53;
LOC54 = (NIM_BOOL)0;
LOC54 = leftappearsonrightside_539329_839829468(le0, ri0);
LOC52 = !(LOC54);
LA53: ;
if (!LOC52) goto LA55;
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA59;
gettemp_537032_839829468(p0, (*typ0).sons->data[((NI) 0)], d0, NIM_TRUE);
}
goto LA57;
LA59: ;
{
NIM_BOOL LOC62;
NIM_BOOL LOC64;
LOC62 = (NIM_BOOL)0;
LOC62 = !(((66 &(1U<<((NU)((*d0).k)&15U)))!=0));
if (!(LOC62)) goto LA63;
LOC64 = (NIM_BOOL)0;
LOC64 = hasnoinit_539383_839829468(ri0);
LOC62 = !(LOC64);
LA63: ;
if (!LOC62) goto LA65;
resetloc_538350_839829468(p0, d0);
}
goto LA57;
LA65: ;
LA57: ;
LOC67 = (Ropeobj178006*)0;
LOC67 = addrloc_538204_839829468((*d0));
add_178482_2381377266(&pl0, LOC67);
LOC68 = (NimStringDesc*)0;
LOC68 = rawNewString(callpattern0->Sup.len + 3);
appendString(LOC68, callpattern0);
appendString(LOC68, ((NimStringDesc*) &T839829468_497));
memset((void*)LOC69, 0, sizeof(LOC69));
LOC69[0] = op0.r;
LOC69[1] = pl0;
LOC69[2] = addcomma_540464_839829468(pl0);
LOC69[3] = rawproc0;
linef_532700_839829468(p0, ((Tcprocsection529011) 2), LOC68, LOC69, 4);
}
goto LA50;
LA55: ;
{
Tloc292816 tmp0;
Ropeobj178006* LOC71;
NimStringDesc* LOC72;
TY535235 LOC73;
memset((void*)(&tmp0), 0, sizeof(tmp0));
gettemp_537032_839829468(p0, (*typ0).sons->data[((NI) 0)], (&tmp0), NIM_TRUE);
LOC71 = (Ropeobj178006*)0;
LOC71 = addrloc_538204_839829468(tmp0);
add_178482_2381377266(&pl0, LOC71);
LOC72 = (NimStringDesc*)0;
LOC72 = rawNewString(callpattern0->Sup.len + 3);
appendString(LOC72, callpattern0);
appendString(LOC72, ((NimStringDesc*) &T839829468_497));
memset((void*)LOC73, 0, sizeof(LOC73));
LOC73[0] = op0.r;
LOC73[1] = pl0;
LOC73[2] = addcomma_540464_839829468(pl0);
LOC73[3] = rawproc0;
linef_532700_839829468(p0, ((Tcprocsection529011) 2), LOC72, LOC73, 4);
genassignment_539264_839829468(p0, (*d0), tmp0, 0);
}
LA50: ;
}
goto LA38;
LA41: ;
{
Tloc292816 list0;
TY535235 LOC79;
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA77;
gettemp_537032_839829468(p0, (*typ0).sons->data[((NI) 0)], d0, NIM_FALSE);
}
LA77: ;
memset((void*)(&list0), 0, sizeof(list0));
initloc_532273_839829468((&list0), ((Tlockind292808) 9), (*d0).t, ((Tstorageloc292812) 0));
memset((void*)LOC79, 0, sizeof(LOC79));
LOC79[0] = op0.r;
LOC79[1] = pl0;
LOC79[2] = addcomma_540464_839829468(pl0);
LOC79[3] = rawproc0;
list0.r = HEX25_178905_2381377266(callpattern0, LOC79, 4);
genassignment_539264_839829468(p0, (*d0), list0, 0);
}
LA38: ;
}
goto LA34;
LA36: ;
{
NimStringDesc* LOC81;
TY535235 LOC82;
LOC81 = (NimStringDesc*)0;
LOC81 = rawNewString(callpattern0->Sup.len + 3);
appendString(LOC81, callpattern0);
appendString(LOC81, ((NimStringDesc*) &T839829468_497));
memset((void*)LOC82, 0, sizeof(LOC82));
LOC82[0] = op0.r;
LOC82[1] = pl0;
LOC82[2] = addcomma_540464_839829468(pl0);
LOC82[3] = rawproc0;
linef_532700_839829468(p0, ((Tcprocsection529011) 2), LOC81, LOC82, 4);
}
LA34: ;
}
N_NIMCALL(Ropeobj178006*, genotherarg_539277_839829468)(Tcproc529021* p0, Tnode292802* ri0, NI i0, Ttype292840* typ0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
{
NI LOC3;
Tnode292802* paramtype0;
LOC3 = (NI)0;
LOC3 = sonslen_295327_850551059(typ0);
if (!(i0 < LOC3)) goto LA4;
paramtype0 = (*(*typ0).n).kindU.S6.sons->data[i0];
{
NIM_BOOL LOC8;
LOC8 = (NIM_BOOL)0;
LOC8 = iscompiletimeonly_328706_3876443242((*paramtype0).typ);
if (!LOC8) goto LA9;
result0 = NIM_NIL;
}
goto LA6;
LA9: ;
{
NIM_BOOL LOC12;
Tnode292802* LOC16;
LOC12 = (NIM_BOOL)0;
LOC12 = ((*(*typ0).sons->data[i0]).kind == ((Ttypekind292244) 23));
if (!(LOC12)) goto LA13;
LOC12 = ((*(*ri0).kindU.S6.sons->data[i0]).kind == ((Tnodekind292020) 64));
LA13: ;
if (!LOC12) goto LA14;
LOC16 = (Tnode292802*)0;
LOC16 = HEX5BHEX5D_293238_850551059((*ri0).kindU.S6.sons->data[i0], ((NI) 0));
result0 = genargnoparam_539938_839829468(p0, LOC16);
}
goto LA6;
LA14: ;
{
result0 = genargnoparam_539938_839829468(p0, (*ri0).kindU.S6.sons->data[i0]);
}
LA6: ;
}
goto LA1;
LA4: ;
{
{
if (!!((((*typ0).flags &(1U<<((NU)(((Ttypeflag292431) 0))&31U)))!=0))) goto LA21;
localerror_196085_155036129((*ri0).info, ((NimStringDesc*) &T839829468_501));
result0 = NIM_NIL;
}
goto LA19;
LA21: ;
{
result0 = genargnoparam_539938_839829468(p0, (*ri0).kindU.S6.sons->data[i0]);
}
LA19: ;
}
LA1: ;
return result0;
}
N_NIMCALL(Tnode292802*, skipaddrderef_541433_839829468)(Tnode292802* node0) {
Tnode292802* result0;
Tnode292802* n0;
NIM_BOOL isaddr0;
{ result0 = (Tnode292802*)0;
n0 = node0;
isaddr0 = NIM_FALSE;
switch ((*n0).kind) {
case ((Tnodekind292020) 63):
case ((Tnodekind292020) 64):
{
n0 = (*n0).kindU.S6.sons->data[((NI) 0)];
isaddr0 = NIM_TRUE;
}
break;
case ((Tnodekind292020) 47):
case ((Tnodekind292020) 65):
{
n0 = (*n0).kindU.S6.sons->data[((NI) 0)];
}
break;
default:
{
result0 = n0;
goto BeforeRet;
}
break;
}
{
if (!((*n0).kind == ((Tnodekind292020) 66))) goto LA6;
n0 = (*n0).kindU.S6.sons->data[((NI) 0)];
}
LA6: ;
{
NIM_BOOL LOC10;
LOC10 = (NIM_BOOL)0;
LOC10 = isaddr0;
if (!(LOC10)) goto LA11;
LOC10 = ((*n0).kind == ((Tnodekind292020) 47) || (*n0).kind == ((Tnodekind292020) 65));
LA11: ;
if (!LOC10) goto LA12;
result0 = (*n0).kindU.S6.sons->data[((NI) 0)];
}
goto LA8;
LA12: ;
{
if (!((*n0).kind == ((Tnodekind292020) 63) || (*n0).kind == ((Tnodekind292020) 64))) goto LA15;
result0 = (*n0).kindU.S6.sons->data[((NI) 0)];
}
goto LA8;
LA15: ;
{
result0 = node0;
}
LA8: ;
}BeforeRet: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, genthisarg_541475_839829468)(Tcproc529021* p0, Tnode292802* ri_541478_839829468, NI i0, Ttype292840* typ0) {
Ropeobj178006* result0;
Tnode292802* ri0;
Ttype292840* t0;
result0 = (Ropeobj178006*)0;
{
NI LOC3;
NimStringDesc* LOC6;
LOC3 = (NI)0;
LOC3 = sonslen_295327_850551059(typ0);
if (!!((i0 < LOC3))) goto LA4;
LOC6 = (NimStringDesc*)0;
LOC6 = HEX24_196185_1689653243(T839829468_503);
internalerror_196113_155036129(LOC6);
}
LA4: ;
ri0 = HEX5BHEX5D_293238_850551059(ri_541478_839829468, i0);
{
while (1) {
if (!((*ri0).kind == ((Tnodekind292020) 66))) goto LA8;
ri0 = HEX5BHEX5D_293238_850551059(ri0, ((NI) 0));
} LA8: ;
}
t0 = skiptypes_296099_850551059((*typ0).sons->data[i0], 2048);
{
Tnode292802* x0;
if (!((*t0).kind == ((Ttypekind292244) 23))) goto LA11;
{
if (!((*ri0).kind == ((Tnodekind292020) 64))) goto LA15;
x0 = HEX5BHEX5D_293238_850551059(ri0, ((NI) 0));
}
goto LA13;
LA15: ;
{
x0 = ri0;
}
LA13: ;
{
if (!((*(*x0).typ).kind == ((Ttypekind292244) 21))) goto LA20;
result0 = genargnoparam_539938_839829468(p0, x0);
add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_504));
}
goto LA18;
LA20: ;
{
NIM_BOOL LOC23;
Tnode292802* LOC25;
Tnode292802* LOC28;
LOC23 = (NIM_BOOL)0;
LOC23 = ((*x0).kind == ((Tnodekind292020) 65) || (*x0).kind == ((Tnodekind292020) 47));
if (!(LOC23)) goto LA24;
LOC25 = (Tnode292802*)0;
LOC25 = HEX5BHEX5D_293238_850551059(x0, ((NI) 0));
LOC23 = ((*(*LOC25).typ).kind == ((Ttypekind292244) 21));
LA24: ;
if (!LOC23) goto LA26;
LOC28 = (Tnode292802*)0;
LOC28 = HEX5BHEX5D_293238_850551059(x0, ((NI) 0));
result0 = genargnoparam_539938_839829468(p0, LOC28);
add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_504));
}
goto LA18;
LA26: ;
{
result0 = genargnoparam_539938_839829468(p0, x0);
add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_257));
}
LA18: ;
}
goto LA9;
LA11: ;
{
if (!((*t0).kind == ((Ttypekind292244) 21))) goto LA31;
{
Tnode292802* LOC37;
if (!((*ri0).kind == ((Tnodekind292020) 63) || (*ri0).kind == ((Tnodekind292020) 64))) goto LA35;
LOC37 = (Tnode292802*)0;
LOC37 = HEX5BHEX5D_293238_850551059(ri0, ((NI) 0));
result0 = genargnoparam_539938_839829468(p0, LOC37);
add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_257));
}
goto LA33;
LA35: ;
{
result0 = genargnoparam_539938_839829468(p0, ri0);
add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_504));
}
LA33: ;
}
goto LA9;
LA31: ;
{
ri0 = skipaddrderef_541433_839829468(ri0);
{
if (!((*ri0).kind == ((Tnodekind292020) 63) || (*ri0).kind == ((Tnodekind292020) 64))) goto LA42;
ri0 = HEX5BHEX5D_293238_850551059(ri0, ((NI) 0));
}
LA42: ;
result0 = genargnoparam_539938_839829468(p0, ri0);
add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_257));
}
LA9: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, genpatterncall_541699_839829468)(Tcproc529021* p0, Tnode292802* ri_541702_839829468, NimStringDesc* pat0, Ttype292840* typ_541704_839829468) {
Ropeobj178006* result0;
NI i0;
NI j0;
result0 = (Ropeobj178006*)0;
i0 = ((NI) 0);
j0 = ((NI) 1);
{
while (1) {
if (!(i0 < (pat0 ? pat0->Sup.len : 0))) goto LA2;
switch (((NU8)(pat0->data[i0]))) {
case 64:
{
{
NI LOC6;
Ropeobj178006* LOC9;
LOC6 = (NI)0;
LOC6 = len_293081_850551059(ri_541702_839829468);
if (!(j0 < LOC6)) goto LA7;
LOC9 = (Ropeobj178006*)0;
LOC9 = genotherarg_539277_839829468(p0, ri_541702_839829468, j0, typ_541704_839829468);
add_178482_2381377266(&result0, LOC9);
{
NI k_541728_839829468;
NI HEX3Atmp_541904_839829468;
NI HEX3Atmp_541905_839829468;
NI LOC11;
NI res_541908_839829468;
k_541728_839829468 = (NI)0;
HEX3Atmp_541904_839829468 = (NI)0;
HEX3Atmp_541905_839829468 = (NI)0;
HEX3Atmp_541904_839829468 = (NI)(j0 + ((NI) 1));
LOC11 = (NI)0;
LOC11 = len_293081_850551059(ri_541702_839829468);
HEX3Atmp_541905_839829468 = (LOC11 - 1);
res_541908_839829468 = HEX3Atmp_541904_839829468;
{
while (1) {
TY533289 LOC14;
Ropeobj178006* LOC15;
Ropeobj178006* LOC16;
if (!(res_541908_839829468 <= HEX3Atmp_541905_839829468)) goto LA13;
k_541728_839829468 = res_541908_839829468;
memset((void*)LOC14, 0, sizeof(LOC14));
LOC15 = (Ropeobj178006*)0;
LOC15 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_110), LOC14, 0);
add_178482_2381377266(&result0, LOC15);
LOC16 = (Ropeobj178006*)0;
LOC16 = genotherarg_539277_839829468(p0, ri_541702_839829468, k_541728_839829468, typ_541704_839829468);
add_178482_2381377266(&result0, LOC16);
res_541908_839829468 += ((NI) 1);
} LA13: ;
}
}
}
LA7: ;
i0 += ((NI) 1);
}
break;
case 35:
{
{
Tnode292802* ri0;
if (!(((NU8)(pat0->data[(NI)(i0 + ((NI) 1))])) == ((NU8)(43)) || ((NU8)(pat0->data[(NI)(i0 + ((NI) 1))])) == ((NU8)(64)))) goto LA20;
ri0 = HEX5BHEX5D_293238_850551059(ri_541702_839829468, j0);
{
Ttype292840* typ0;
TY533289 LOC31;
Ropeobj178006* LOC32;
TY533289 LOC46;
Ropeobj178006* LOC47;
if (!((*ri0).kind == ((Tnodekind292020) 27) || (*ri0).kind == ((Tnodekind292020) 29) || (*ri0).kind == ((Tnodekind292020) 30) || (*ri0).kind == ((Tnodekind292020) 31) || (*ri0).kind == ((Tnodekind292020) 26) || (*ri0).kind == ((Tnodekind292020) 28) || (*ri0).kind == ((Tnodekind292020) 32))) goto LA24;
typ0 = skiptypes_296099_850551059((*(*ri0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256));
{
Ropeobj178006* LOC30;
if (!((NU8)(pat0->data[(NI)(i0 + ((NI) 1))]) == (NU8)(43))) goto LA28;
LOC30 = (Ropeobj178006*)0;
LOC30 = genargnoparam_539938_839829468(p0, (*ri0).kindU.S6.sons->data[((NI) 0)]);
add_178482_2381377266(&result0, LOC30);
}
LA28: ;
memset((void*)LOC31, 0, sizeof(LOC31));
LOC32 = (Ropeobj178006*)0;
LOC32 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_118), LOC31, 0);
add_178482_2381377266(&result0, LOC32);
{
NI LOC35;
Ropeobj178006* LOC38;
LOC35 = (NI)0;
LOC35 = len_293081_850551059(ri0);
if (!(((NI) 1) < LOC35)) goto LA36;
LOC38 = (Ropeobj178006*)0;
LOC38 = genotherarg_539277_839829468(p0, ri0, ((NI) 1), typ0);
add_178482_2381377266(&result0, LOC38);
}
LA36: ;
{
NI k_541793_839829468;
NI HEX3Atmp_541915_839829468;
NI HEX3Atmp_541916_839829468;
NI LOC40;
NI res_541919_839829468;
k_541793_839829468 = (NI)0;
HEX3Atmp_541915_839829468 = (NI)0;
HEX3Atmp_541916_839829468 = (NI)0;
HEX3Atmp_541915_839829468 = (NI)(j0 + ((NI) 1));
LOC40 = (NI)0;
LOC40 = len_293081_850551059(ri0);
HEX3Atmp_541916_839829468 = (LOC40 - 1);
res_541919_839829468 = HEX3Atmp_541915_839829468;
{
while (1) {
TY533289 LOC43;
Ropeobj178006* LOC44;
Ropeobj178006* LOC45;
if (!(res_541919_839829468 <= HEX3Atmp_541916_839829468)) goto LA42;
k_541793_839829468 = res_541919_839829468;
memset((void*)LOC43, 0, sizeof(LOC43));
LOC44 = (Ropeobj178006*)0;
LOC44 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_110), LOC43, 0);
add_178482_2381377266(&result0, LOC44);
LOC45 = (Ropeobj178006*)0;
LOC45 = genotherarg_539277_839829468(p0, ri0, k_541793_839829468, typ0);
add_178482_2381377266(&result0, LOC45);
res_541919_839829468 += ((NI) 1);
} LA42: ;
}
}
memset((void*)LOC46, 0, sizeof(LOC46));
LOC47 = (Ropeobj178006*)0;
LOC47 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_117), LOC46, 0);
add_178482_2381377266(&result0, LOC47);
}
goto LA22;
LA24: ;
{
localerror_196085_155036129((*ri0).info, ((NimStringDesc*) &T839829468_502));
}
LA22: ;
i0 += ((NI) 1);
}
goto LA18;
LA20: ;
{
Ropeobj178006* LOC52;
if (!((NU8)(pat0->data[(NI)(i0 + ((NI) 1))]) == (NU8)(46))) goto LA50;
LOC52 = (Ropeobj178006*)0;
LOC52 = genthisarg_541475_839829468(p0, ri_541702_839829468, j0, typ_541704_839829468);
add_178482_2381377266(&result0, LOC52);
i0 += ((NI) 1);
}
goto LA18;
LA50: ;
{
Tnode292802* arg0;
Ropeobj178006* LOC58;
if (!((NU8)(pat0->data[(NI)(i0 + ((NI) 1))]) == (NU8)(91))) goto LA54;
arg0 = skipaddrderef_541433_839829468((*ri_541702_839829468).kindU.S6.sons->data[j0]);
{
while (1) {
if (!((*arg0).kind == ((Tnodekind292020) 63) || (*arg0).kind == ((Tnodekind292020) 64) || (*arg0).kind == ((Tnodekind292020) 66))) goto LA57;
arg0 = HEX5BHEX5D_293238_850551059(arg0, ((NI) 0));
} LA57: ;
}
LOC58 = (Ropeobj178006*)0;
LOC58 = genargnoparam_539938_839829468(p0, arg0);
add_178482_2381377266(&result0, LOC58);
}
goto LA18;
LA54: ;
{
Ropeobj178006* LOC60;
LOC60 = (Ropeobj178006*)0;
LOC60 = genotherarg_539277_839829468(p0, ri_541702_839829468, j0, typ_541704_839829468);
add_178482_2381377266(&result0, LOC60);
}
LA18: ;
j0 += ((NI) 1);
i0 += ((NI) 1);
}
break;
case 39:
{
NI idx0;
NI stars0;
idx0 = (NI)0;
stars0 = (NI)0;
{
NIM_BOOL LOC64;
Ttype292840* t0;
LOC64 = (NIM_BOOL)0;
LOC64 = scancppgenericslot_534827_839829468(pat0, (&i0), (&idx0), (&stars0));
if (!LOC64) goto LA65;
t0 = resolvestarsincpptype_534891_839829468(typ_541704_839829468, idx0, stars0);
{
TY533289 LOC71;
Ropeobj178006* LOC72;
if (!(t0 == NIM_NIL)) goto LA69;
memset((void*)LOC71, 0, sizeof(LOC71));
LOC72 = (Ropeobj178006*)0;
LOC72 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_26), LOC71, 0);
add_178482_2381377266(&result0, LOC72);
}
goto LA67;
LA69: ;
{
Ropeobj178006* LOC74;
LOC74 = (Ropeobj178006*)0;
LOC74 = gettypedesc_535671_839829468((*p0).module, t0);
add_178482_2381377266(&result0, LOC74);
}
LA67: ;
}
LA65: ;
}
break;
default:
{
NI start0;
start0 = i0;
{
while (1) {
if (!(i0 < (pat0 ? pat0->Sup.len : 0))) goto LA77;
{
if (!!((((NU8)(pat0->data[i0])) == ((NU8)(64)) || ((NU8)(pat0->data[i0])) == ((NU8)(35)) || ((NU8)(pat0->data[i0])) == ((NU8)(39))))) goto LA80;
i0 += ((NI) 1);
}
goto LA78;
LA80: ;
{
goto LA76;
}
LA78: ;
} LA77: ;
} LA76: ;
{
NimStringDesc* LOC87;
if (!(start0 <= (NI)(i0 - ((NI) 1)))) goto LA85;
LOC87 = (NimStringDesc*)0;
LOC87 = copyStrLast(pat0, start0, (NI)(i0 - ((NI) 1)));
add_178487_2381377266(&result0, LOC87);
}
LA85: ;
}
break;
}
} LA2: ;
}
return result0;
}
N_NIMCALL(void, fixupcall_539410_839829468)(Tcproc529021* p0, Tnode292802* le0, Tnode292802* ri0, Tloc292816* d0, Ropeobj178006* callee0, Ropeobj178006* params0) {
Ropeobj178006* pl0;
TY533289 LOC1;
Ropeobj178006* LOC2;
Ropeobj178006* LOC3;
Ttype292840* typ0;
memset((void*)LOC1, 0, sizeof(LOC1));
LOC2 = (Ropeobj178006*)0;
LOC2 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_118), LOC1, 0);
LOC3 = (Ropeobj178006*)0;
LOC3 = HEX26_178418_2381377266(callee0, LOC2);
pl0 = HEX26_178418_2381377266(LOC3, params0);
typ0 = skiptypes_296099_850551059((*(*ri0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256));
{
if (!!(((*typ0).sons->data[((NI) 0)] == NIM_NIL))) goto LA6;
{
NIM_BOOL LOC10;
LOC10 = (NIM_BOOL)0;
LOC10 = isinvalidreturntype_533548_839829468((*typ0).sons->data[((NI) 0)]);
if (!LOC10) goto LA11;
{
TY533289 LOC17;
Ropeobj178006* LOC18;
if (!!((params0 == NIM_NIL))) goto LA15;
memset((void*)LOC17, 0, sizeof(LOC17));
LOC18 = (Ropeobj178006*)0;
LOC18 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_110), LOC17, 0);
add_178482_2381377266(&pl0, LOC18);
}
LA15: ;
{
NIM_BOOL LOC21;
NIM_BOOL LOC23;
Ropeobj178006* LOC36;
TY533289 LOC37;
Ropeobj178006* LOC38;
LOC21 = (NIM_BOOL)0;
LOC21 = ((3 &(1U<<((NU)((*d0).k)&15U)))!=0);
if (LOC21) goto LA22;
LOC23 = (NIM_BOOL)0;
LOC23 = leftappearsonrightside_539329_839829468(le0, ri0);
LOC21 = !(LOC23);
LA22: ;
if (!LOC21) goto LA24;
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA28;
gettemp_537032_839829468(p0, (*typ0).sons->data[((NI) 0)], d0, NIM_TRUE);
}
goto LA26;
LA28: ;
{
NIM_BOOL LOC31;
NIM_BOOL LOC33;
LOC31 = (NIM_BOOL)0;
LOC31 = !(((66 &(1U<<((NU)((*d0).k)&15U)))!=0));
if (!(LOC31)) goto LA32;
LOC33 = (NIM_BOOL)0;
LOC33 = hasnoinit_539383_839829468(ri0);
LOC31 = !(LOC33);
LA32: ;
if (!LOC31) goto LA34;
resetloc_538350_839829468(p0, d0);
}
goto LA26;
LA34: ;
LA26: ;
LOC36 = (Ropeobj178006*)0;
LOC36 = addrloc_538204_839829468((*d0));
add_178482_2381377266(&pl0, LOC36);
memset((void*)LOC37, 0, sizeof(LOC37));
LOC38 = (Ropeobj178006*)0;
LOC38 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_505), LOC37, 0);
add_178482_2381377266(&pl0, LOC38);
line_532690_839829468(p0, ((Tcprocsection529011) 2), pl0);
}
goto LA19;
LA24: ;
{
Tloc292816 tmp0;
Ropeobj178006* LOC40;
TY533289 LOC41;
Ropeobj178006* LOC42;
memset((void*)(&tmp0), 0, sizeof(tmp0));
gettemp_537032_839829468(p0, (*typ0).sons->data[((NI) 0)], (&tmp0), NIM_TRUE);
LOC40 = (Ropeobj178006*)0;
LOC40 = addrloc_538204_839829468(tmp0);
add_178482_2381377266(&pl0, LOC40);
memset((void*)LOC41, 0, sizeof(LOC41));
LOC42 = (Ropeobj178006*)0;
LOC42 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_505), LOC41, 0);
add_178482_2381377266(&pl0, LOC42);
line_532690_839829468(p0, ((Tcprocsection529011) 2), pl0);
genassignment_539264_839829468(p0, (*d0), tmp0, 0);
}
LA19: ;
}
goto LA8;
LA11: ;
{
TY533289 LOC44;
Ropeobj178006* LOC45;
memset((void*)LOC44, 0, sizeof(LOC44));
LOC45 = (Ropeobj178006*)0;
LOC45 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_117), LOC44, 0);
add_178482_2381377266(&pl0, LOC45);
{
NIM_BOOL LOC48;
NIM_BOOL LOC49;
LOC48 = (NIM_BOOL)0;
LOC49 = (NIM_BOOL)0;
LOC49 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC49) goto LA50;
LOC49 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA50: ;
LOC48 = LOC49;
if (!(LOC48)) goto LA51;
LOC48 = (((*d0).flags &(1U<<((NU)(((Tlocflag292810) 8))&15U)))!=0);
LA51: ;
if (!LOC48) goto LA52;
(*d0).k = ((Tlockind292808) 9);
unsureAsgnRef((void**) (&(*d0).r), pl0);
(*d0).flags &= ~(((NU16)1) << ((((Tlocflag292810) 8)) % (sizeof(NU16)*8)));
}
goto LA46;
LA52: ;
{
Tloc292816 list0;
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA57;
gettemp_537032_839829468(p0, (*typ0).sons->data[((NI) 0)], d0, NIM_FALSE);
}
LA57: ;
memset((void*)(&list0), 0, sizeof(list0));
initloc_532273_839829468((&list0), ((Tlockind292808) 9), (*d0).t, ((Tstorageloc292812) 0));
list0.r = pl0;
genassignment_539264_839829468(p0, (*d0), list0, 0);
}
LA46: ;
}
LA8: ;
}
goto LA4;
LA6: ;
{
TY533289 LOC60;
Ropeobj178006* LOC61;
memset((void*)LOC60, 0, sizeof(LOC60));
LOC61 = (Ropeobj178006*)0;
LOC61 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_505), LOC60, 0);
add_178482_2381377266(&pl0, LOC61);
line_532690_839829468(p0, ((Tcprocsection529011) 2), pl0);
}
LA4: ;
}
N_NIMCALL(void, geninfixcall_541929_839829468)(Tcproc529021* p0, Tnode292802* le0, Tnode292802* ri0, Tloc292816* d0) {
Tloc292816 op0;
Ttype292840* typ_541940_839829468;
NI length0;
NimStringDesc* pat0;
memset((void*)(&op0), 0, sizeof(op0));
initlocexpr_539283_839829468(p0, (*ri0).kindU.S6.sons->data[((NI) 0)], (&op0));
typ_541940_839829468 = skiptypes_296099_850551059((*(*ri0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256));
length0 = sonslen_295351_850551059(ri0);
pat0 = (*(*(*(*ri0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).loc.r).data;
{
NimStringDesc* LOC5;
if (!!(!((pat0 == NIM_NIL)))) goto LA3;
LOC5 = (NimStringDesc*)0;
LOC5 = HEX24_196185_1689653243(T839829468_498);
internalerror_196113_155036129(LOC5);
}
LA3: ;
{
NIM_BOOL LOC8;
Ropeobj178006* pl0;
Ttype292840* typ0;
LOC8 = (NIM_BOOL)0;
LOC8 = contains_110056_4286263276(pat0, T839829468_500);
if (!LOC8) goto LA9;
pl0 = genpatterncall_541699_839829468(p0, ri0, pat0, typ_541940_839829468);
typ0 = skiptypes_296099_850551059((*(*ri0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256));
{
if (!!(((*typ0).sons->data[((NI) 0)] == NIM_NIL))) goto LA13;
{
NIM_BOOL LOC17;
NIM_BOOL LOC18;
LOC17 = (NIM_BOOL)0;
LOC18 = (NIM_BOOL)0;
LOC18 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC18) goto LA19;
LOC18 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA19: ;
LOC17 = LOC18;
if (!(LOC17)) goto LA20;
LOC17 = (((*d0).flags &(1U<<((NU)(((Tlocflag292810) 8))&15U)))!=0);
LA20: ;
if (!LOC17) goto LA21;
(*d0).k = ((Tlockind292808) 9);
unsureAsgnRef((void**) (&(*d0).r), pl0);
(*d0).flags &= ~(((NU16)1) << ((((Tlocflag292810) 8)) % (sizeof(NU16)*8)));
}
goto LA15;
LA21: ;
{
Tloc292816 list0;
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA26;
gettemp_537032_839829468(p0, (*typ0).sons->data[((NI) 0)], d0, NIM_FALSE);
}
LA26: ;
memset((void*)(&list0), 0, sizeof(list0));
initloc_532273_839829468((&list0), ((Tlockind292808) 9), (*d0).t, ((Tstorageloc292812) 0));
list0.r = pl0;
genassignment_539264_839829468(p0, (*d0), list0, 0);
}
LA15: ;
}
goto LA11;
LA13: ;
{
TY533289 LOC29;
Ropeobj178006* LOC30;
memset((void*)LOC29, 0, sizeof(LOC29));
LOC30 = (Ropeobj178006*)0;
LOC30 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_497), LOC29, 0);
add_178482_2381377266(&pl0, LOC30);
line_532690_839829468(p0, ((Tcprocsection529011) 2), pl0);
}
LA11: ;
}
goto LA6;
LA9: ;
{
Ropeobj178006* pl0;
Ropeobj178006* params0;
pl0 = NIM_NIL;
{
NI LOC34;
Ropeobj178006* LOC37;
LOC34 = (NI)0;
LOC34 = len_293081_850551059(ri0);
if (!(((NI) 1) < LOC34)) goto LA35;
LOC37 = (Ropeobj178006*)0;
LOC37 = genthisarg_541475_839829468(p0, ri0, ((NI) 1), typ_541940_839829468);
add_178482_2381377266(&pl0, LOC37);
}
LA35: ;
add_178482_2381377266(&pl0, op0.r);
params0 = (Ropeobj178006*)0;
{
NI i_542425_839829468;
NI HEX3Atmp_542609_839829468;
NI res_542612_839829468;
i_542425_839829468 = (NI)0;
HEX3Atmp_542609_839829468 = (NI)0;
HEX3Atmp_542609_839829468 = (NI)(length0 - ((NI) 1));
res_542612_839829468 = ((NI) 2);
{
while (1) {
Ropeobj178006* LOC47;
if (!(res_542612_839829468 <= HEX3Atmp_542609_839829468)) goto LA40;
i_542425_839829468 = res_542612_839829468;
{
TY533289 LOC45;
Ropeobj178006* LOC46;
if (!!((params0 == NIM_NIL))) goto LA43;
memset((void*)LOC45, 0, sizeof(LOC45));
LOC46 = (Ropeobj178006*)0;
LOC46 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_110), LOC45, 0);
add_178482_2381377266(¶ms0, LOC46);
}
LA43: ;
LOC47 = (Ropeobj178006*)0;
LOC47 = genotherarg_539277_839829468(p0, ri0, i_542425_839829468, typ_541940_839829468);
add_178482_2381377266(¶ms0, LOC47);
res_542612_839829468 += ((NI) 1);
} LA40: ;
}
}
fixupcall_539410_839829468(p0, le0, ri0, d0, pl0, params0);
}
LA6: ;
}
N_NIMCALL(void, gennamedparamcall_542616_839829468)(Tcproc529021* p0, Tnode292802* ri0, Tloc292816* d0) {
Tloc292816 op0;
Ropeobj178006* pl0;
TY533289 LOC1;
Ttype292840* typ0;
NI length0;
NimStringDesc* pat0;
NI start0;
memset((void*)(&op0), 0, sizeof(op0));
initlocexpr_539283_839829468(p0, (*ri0).kindU.S6.sons->data[((NI) 0)], (&op0));
memset((void*)LOC1, 0, sizeof(LOC1));
pl0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_506), LOC1, 0);
typ0 = skiptypes_296099_850551059((*(*ri0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256));
length0 = sonslen_295351_850551059(ri0);
pat0 = (*(*(*(*ri0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).loc.r).data;
{
NimStringDesc* LOC6;
if (!!(!((pat0 == NIM_NIL)))) goto LA4;
LOC6 = (NimStringDesc*)0;
LOC6 = HEX24_196185_1689653243(T839829468_507);
internalerror_196113_155036129(LOC6);
}
LA4: ;
start0 = ((NI) 3);
{
NIM_BOOL LOC9;
LOC9 = (NIM_BOOL)0;
LOC9 = contains_110046_4286263276(pat0, 32);
if (!LOC9) goto LA10;
start0 = ((NI) 1);
add_178482_2381377266(&pl0, op0.r);
{
TY533289 LOC16;
Ropeobj178006* LOC17;
Ropeobj178006* LOC18;
if (!(((NI) 1) < length0)) goto LA14;
memset((void*)LOC16, 0, sizeof(LOC16));
LOC17 = (Ropeobj178006*)0;
LOC17 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_244), LOC16, 0);
add_178482_2381377266(&pl0, LOC17);
LOC18 = (Ropeobj178006*)0;
LOC18 = genarg_539787_839829468(p0, (*ri0).kindU.S6.sons->data[((NI) 1)], (*(*(*typ0).n).kindU.S6.sons->data[((NI) 1)]).kindU.S4.sym, ri0);
add_178482_2381377266(&pl0, LOC18);
start0 = ((NI) 2);
}
LA14: ;
}
goto LA7;
LA10: ;
{
{
Ropeobj178006* LOC24;
TY533289 LOC25;
Ropeobj178006* LOC26;
if (!(((NI) 1) < length0)) goto LA22;
LOC24 = (Ropeobj178006*)0;
LOC24 = genarg_539787_839829468(p0, (*ri0).kindU.S6.sons->data[((NI) 1)], (*(*(*typ0).n).kindU.S6.sons->data[((NI) 1)]).kindU.S4.sym, ri0);
add_178482_2381377266(&pl0, LOC24);
memset((void*)LOC25, 0, sizeof(LOC25));
LOC26 = (Ropeobj178006*)0;
LOC26 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_111), LOC25, 0);
add_178482_2381377266(&pl0, LOC26);
}
LA22: ;
add_178482_2381377266(&pl0, op0.r);
{
TY533289 LOC31;
Ropeobj178006* LOC32;
Ropeobj178006* LOC33;
if (!(((NI) 2) < length0)) goto LA29;
memset((void*)LOC31, 0, sizeof(LOC31));
LOC32 = (Ropeobj178006*)0;
LOC32 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_244), LOC31, 0);
add_178482_2381377266(&pl0, LOC32);
LOC33 = (Ropeobj178006*)0;
LOC33 = genarg_539787_839829468(p0, (*ri0).kindU.S6.sons->data[((NI) 2)], (*(*(*typ0).n).kindU.S6.sons->data[((NI) 2)]).kindU.S4.sym, ri0);
add_178482_2381377266(&pl0, LOC33);
}
LA29: ;
}
LA7: ;
{
NI i_543051_839829468;
NI HEX3Atmp_543617_839829468;
NI res_543620_839829468;
i_543051_839829468 = (NI)0;
HEX3Atmp_543617_839829468 = (NI)0;
HEX3Atmp_543617_839829468 = (NI)(length0 - ((NI) 1));
res_543620_839829468 = start0;
{
while (1) {
Tsym292834* param0;
TY533289 LOC42;
Ropeobj178006* LOC43;
TY533289 LOC44;
Ropeobj178006* LOC45;
Ropeobj178006* LOC46;
if (!(res_543620_839829468 <= HEX3Atmp_543617_839829468)) goto LA36;
i_543051_839829468 = res_543620_839829468;
{
NI LOC39;
LOC39 = (NI)0;
LOC39 = sonslen_295327_850551059(typ0);
if (!(LOC39 <= i_543051_839829468)) goto LA40;
internalerror_196100_155036129((*ri0).info, ((NimStringDesc*) &T839829468_508));
}
LA40: ;
param0 = (*(*(*typ0).n).kindU.S6.sons->data[i_543051_839829468]).kindU.S4.sym;
memset((void*)LOC42, 0, sizeof(LOC42));
LOC43 = (Ropeobj178006*)0;
LOC43 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_111), LOC42, 0);
add_178482_2381377266(&pl0, LOC43);
add_178487_2381377266(&pl0, (*(*param0).name).s);
memset((void*)LOC44, 0, sizeof(LOC44));
LOC45 = (Ropeobj178006*)0;
LOC45 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_244), LOC44, 0);
add_178482_2381377266(&pl0, LOC45);
LOC46 = (Ropeobj178006*)0;
LOC46 = genarg_539787_839829468(p0, (*ri0).kindU.S6.sons->data[i_543051_839829468], param0, ri0);
add_178482_2381377266(&pl0, LOC46);
res_543620_839829468 += ((NI) 1);
} LA36: ;
}
}
{
if (!!(((*typ0).sons->data[((NI) 0)] == NIM_NIL))) goto LA49;
{
NIM_BOOL LOC53;
LOC53 = (NIM_BOOL)0;
LOC53 = isinvalidreturntype_533548_839829468((*typ0).sons->data[((NI) 0)]);
if (!LOC53) goto LA54;
{
NI LOC58;
TY533289 LOC61;
Ropeobj178006* LOC62;
LOC58 = (NI)0;
LOC58 = sonslen_295351_850551059(ri0);
if (!(((NI) 1) < LOC58)) goto LA59;
memset((void*)LOC61, 0, sizeof(LOC61));
LOC62 = (Ropeobj178006*)0;
LOC62 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_111), LOC61, 0);
add_178482_2381377266(&pl0, LOC62);
}
LA59: ;
{
TY533289 LOC71;
Ropeobj178006* LOC72;
Ropeobj178006* LOC73;
TY533289 LOC74;
Ropeobj178006* LOC75;
if (!((3 &(1U<<((NU)((*d0).k)&15U)))!=0)) goto LA65;
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA69;
gettemp_537032_839829468(p0, (*typ0).sons->data[((NI) 0)], d0, NIM_TRUE);
}
LA69: ;
memset((void*)LOC71, 0, sizeof(LOC71));
LOC72 = (Ropeobj178006*)0;
LOC72 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_509), LOC71, 0);
add_178482_2381377266(&pl0, LOC72);
LOC73 = (Ropeobj178006*)0;
LOC73 = addrloc_538204_839829468((*d0));
add_178482_2381377266(&pl0, LOC73);
memset((void*)LOC74, 0, sizeof(LOC74));
LOC75 = (Ropeobj178006*)0;
LOC75 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_510), LOC74, 0);
add_178482_2381377266(&pl0, LOC75);
line_532690_839829468(p0, ((Tcprocsection529011) 2), pl0);
}
goto LA63;
LA65: ;
{
Tloc292816 tmp0;
Ropeobj178006* LOC77;
TY533289 LOC78;
Ropeobj178006* LOC79;
memset((void*)(&tmp0), 0, sizeof(tmp0));
gettemp_537032_839829468(p0, (*typ0).sons->data[((NI) 0)], (&tmp0), NIM_TRUE);
LOC77 = (Ropeobj178006*)0;
LOC77 = addrloc_538204_839829468(tmp0);
add_178482_2381377266(&pl0, LOC77);
memset((void*)LOC78, 0, sizeof(LOC78));
LOC79 = (Ropeobj178006*)0;
LOC79 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_510), LOC78, 0);
add_178482_2381377266(&pl0, LOC79);
line_532690_839829468(p0, ((Tcprocsection529011) 2), pl0);
genassignment_539264_839829468(p0, (*d0), tmp0, 0);
}
LA63: ;
}
goto LA51;
LA54: ;
{
TY533289 LOC81;
Ropeobj178006* LOC82;
Tloc292816 list0;
memset((void*)LOC81, 0, sizeof(LOC81));
LOC82 = (Ropeobj178006*)0;
LOC82 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_511), LOC81, 0);
add_178482_2381377266(&pl0, LOC82);
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA85;
gettemp_537032_839829468(p0, (*typ0).sons->data[((NI) 0)], d0, NIM_FALSE);
}
LA85: ;
memset((void*)(&list0), 0, sizeof(list0));
initloc_532273_839829468((&list0), ((Tlockind292808) 9), NIM_NIL, ((Tstorageloc292812) 0));
list0.r = pl0;
genassignment_539264_839829468(p0, (*d0), list0, 0);
}
LA51: ;
}
goto LA47;
LA49: ;
{
TY533289 LOC88;
Ropeobj178006* LOC89;
memset((void*)LOC88, 0, sizeof(LOC88));
LOC89 = (Ropeobj178006*)0;
LOC89 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_510), LOC88, 0);
add_178482_2381377266(&pl0, LOC89);
line_532690_839829468(p0, ((Tcprocsection529011) 2), pl0);
}
LA47: ;
}
N_NIMCALL(void, genprefixcall_539960_839829468)(Tcproc529021* p0, Tnode292802* le0, Tnode292802* ri0, Tloc292816* d0) {
Tloc292816 op0;
Ropeobj178006* params0;
Ttype292840* typ0;
NI length0;
memset((void*)(&op0), 0, sizeof(op0));
initlocexpr_539283_839829468(p0, (*ri0).kindU.S6.sons->data[((NI) 0)], (&op0));
params0 = (Ropeobj178006*)0;
typ0 = skiptypes_296099_850551059((*(*ri0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256));
length0 = sonslen_295351_850551059(ri0);
{
NI i_540213_839829468;
NI HEX3Atmp_540445_839829468;
NI res_540448_839829468;
i_540213_839829468 = (NI)0;
HEX3Atmp_540445_839829468 = (NI)0;
HEX3Atmp_540445_839829468 = (NI)(length0 - ((NI) 1));
res_540448_839829468 = ((NI) 1);
{
while (1) {
if (!(res_540448_839829468 <= HEX3Atmp_540445_839829468)) goto LA3;
i_540213_839829468 = res_540448_839829468;
{
NI LOC6;
Tnode292802* paramtype0;
LOC6 = (NI)0;
LOC6 = sonslen_295327_850551059(typ0);
if (!(i_540213_839829468 < LOC6)) goto LA7;
paramtype0 = (*(*typ0).n).kindU.S6.sons->data[i_540213_839829468];
{
NIM_BOOL LOC11;
Ropeobj178006* LOC20;
LOC11 = (NIM_BOOL)0;
LOC11 = iscompiletimeonly_328706_3876443242((*paramtype0).typ);
if (!!(LOC11)) goto LA12;
{
TY533289 LOC18;
Ropeobj178006* LOC19;
if (!!((params0 == NIM_NIL))) goto LA16;
memset((void*)LOC18, 0, sizeof(LOC18));
LOC19 = (Ropeobj178006*)0;
LOC19 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_110), LOC18, 0);
add_178482_2381377266(¶ms0, LOC19);
}
LA16: ;
LOC20 = (Ropeobj178006*)0;
LOC20 = genarg_539787_839829468(p0, (*ri0).kindU.S6.sons->data[i_540213_839829468], (*paramtype0).kindU.S4.sym, ri0);
add_178482_2381377266(¶ms0, LOC20);
}
LA12: ;
}
goto LA4;
LA7: ;
{
Ropeobj178006* LOC28;
{
TY533289 LOC26;
Ropeobj178006* LOC27;
if (!!((params0 == NIM_NIL))) goto LA24;
memset((void*)LOC26, 0, sizeof(LOC26));
LOC27 = (Ropeobj178006*)0;
LOC27 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_110), LOC26, 0);
add_178482_2381377266(¶ms0, LOC27);
}
LA24: ;
LOC28 = (Ropeobj178006*)0;
LOC28 = genargnoparam_539938_839829468(p0, (*ri0).kindU.S6.sons->data[i_540213_839829468]);
add_178482_2381377266(¶ms0, LOC28);
}
LA4: ;
res_540448_839829468 += ((NI) 1);
} LA3: ;
}
}
fixupcall_539410_839829468(p0, le0, ri0, d0, op0.r, params0);
}
static N_INLINE(void, poststmtactions_532942_839829468)(Tcproc529021* p0) {
Ropeobj178006** LOC1;
LOC1 = (Ropeobj178006**)0;
LOC1 = s_529179_3723162438(p0, ((Tcprocsection529011) 2));
add_178482_2381377266(LOC1, (*(*p0).module).injectstmt);
}
N_NIMCALL(void, gencall_543632_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
{
Ttype292840* LOC3;
LOC3 = (Ttype292840*)0;
LOC3 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 0)]).typ, 2048);
if (!((*LOC3).callconv == ((Tcallingconvention292002) 8))) goto LA4;
genclosurecall_540452_839829468(p0, NIM_NIL, e0, d0);
}
goto LA1;
LA4: ;
{
NIM_BOOL LOC7;
LOC7 = (NIM_BOOL)0;
LOC7 = ((*(*e0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 3));
if (!(LOC7)) goto LA8;
LOC7 = (((*(*(*e0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA8: ;
if (!LOC7) goto LA9;
geninfixcall_541929_839829468(p0, NIM_NIL, e0, d0);
}
goto LA1;
LA9: ;
{
NIM_BOOL LOC12;
LOC12 = (NIM_BOOL)0;
LOC12 = ((*(*e0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 3));
if (!(LOC12)) goto LA13;
LOC12 = (((*(*(*e0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).flags &(1U<<((NU)(((Tsymflag292184) 28))&31U)))!=0);
LA13: ;
if (!LOC12) goto LA14;
gennamedparamcall_542616_839829468(p0, e0, d0);
}
goto LA1;
LA14: ;
{
genprefixcall_539960_839829468(p0, NIM_NIL, e0, d0);
}
LA1: ;
poststmtactions_532942_839829468(p0);
}
N_NIMCALL(void, genreset_554731_839829468)(Tcproc529021* p0, Tnode292802* n0) {
Tloc292816 a0;
TY532811 LOC1;
Ttype292840* LOC2;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 1)], (&a0));
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = addrloc_538204_839829468(a0);
LOC2 = (Ttype292840*)0;
LOC2 = skiptypes_296099_850551059(a0.t, IL64(211106242013440));
LOC1[1] = gentypeinfo_535941_839829468((*p0).module, LOC2);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_496), LOC1, 2);
}
N_NIMCALL(void, genecho_554369_839829468)(Tcproc529021* p0, Tnode292802* n0) {
NIM_BOOL LOC6;
Ropeobj178006* args0;
Tloc292816 a0;
TY532811 LOC18;
NimStringDesc* LOC19;
NI LOC20;
NimStringDesc* LOC21;
TY533289 LOC22;
{
NimStringDesc* LOC5;
if (!!(((*n0).kind == ((Tnodekind292020) 41)))) goto LA3;
LOC5 = (NimStringDesc*)0;
LOC5 = HEX24_196185_1689653243(T839829468_512);
internalerror_196113_155036129(LOC5);
}
LA3: ;
LOC6 = (NIM_BOOL)0;
LOC6 = includestr_147249_3771138726((&(*(*p0).module).headerfiles), ((NimStringDesc*) &T839829468_513));
args0 = NIM_NIL;
memset((void*)(&a0), 0, sizeof(a0));
{
NI i_554404_839829468;
NI HEX3Atmp_554431_839829468;
NI LOC8;
NI res_554434_839829468;
i_554404_839829468 = (NI)0;
HEX3Atmp_554431_839829468 = (NI)0;
LOC8 = (NI)0;
LOC8 = len_293081_850551059(n0);
HEX3Atmp_554431_839829468 = (NI)(LOC8 - ((NI) 1));
res_554434_839829468 = ((NI) 0);
{
while (1) {
if (!(res_554434_839829468 <= HEX3Atmp_554431_839829468)) goto LA10;
i_554404_839829468 = res_554434_839829468;
{
Tnode292802* LOC13;
LOC13 = (Tnode292802*)0;
LOC13 = skipconv_328882_3876443242((*n0).kindU.S6.sons->data[i_554404_839829468]);
if (!((*LOC13).kind == ((Tnodekind292020) 23))) goto LA14;
add_178487_2381377266(&args0, ((NimStringDesc*) &T839829468_514));
}
goto LA11;
LA14: ;
{
TY178507 LOC17;
initlocexpr_539283_839829468(p0, (*n0).kindU.S6.sons->data[i_554404_839829468], (&a0));
memset((void*)LOC17, 0, sizeof(LOC17));
LOC17[0] = rdloc_538188_839829468(a0);
addf_179205_2381377266(&args0, ((NimStringDesc*) &T839829468_515), LOC17, 1);
}
LA11: ;
res_554434_839829468 += ((NI) 1);
} LA10: ;
}
}
memset((void*)LOC18, 0, sizeof(LOC18));
LOC19 = (NimStringDesc*)0;
LOC20 = (NI)0;
LOC20 = len_293081_850551059(n0);
LOC21 = (NimStringDesc*)0;
LOC21 = nsuRepeatStr(((NimStringDesc*) &T839829468_517), ((NI) (LOC20)));
LOC19 = rawNewString(LOC21->Sup.len + tnl_176644_4151366050->Sup.len + 0);
appendString(LOC19, LOC21);
appendString(LOC19, tnl_176644_4151366050);
LOC18[0] = makecstring_191638_155036129(LOC19);
LOC18[1] = args0;
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_516), LOC18, 2);
memset((void*)LOC22, 0, sizeof(LOC22));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_518), LOC22, 0);
}
N_NIMCALL(void, genseqconstr_555004_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0) {
Tloc292816 arr0;
NI LOC5;
Ropeobj178006* LOC6;
memset((void*)(&arr0), 0, sizeof(arr0));
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA3;
gettemp_537032_839829468(p0, (*t0).typ, d0, NIM_FALSE);
}
LA3: ;
LOC5 = (NI)0;
LOC5 = sonslen_295351_850551059(t0);
LOC6 = (Ropeobj178006*)0;
LOC6 = intliteral_539270_839829468(((NI64) (LOC5)));
gennewseqaux_554795_839829468(p0, (*d0), LOC6);
{
NI i_555031_839829468;
NI HEX3Atmp_555039_839829468;
NI LOC8;
NI res_555042_839829468;
i_555031_839829468 = (NI)0;
HEX3Atmp_555039_839829468 = (NI)0;
LOC8 = (NI)0;
LOC8 = sonslen_295351_850551059(t0);
HEX3Atmp_555039_839829468 = (NI)(LOC8 - ((NI) 1));
res_555042_839829468 = ((NI) 0);
{
while (1) {
Ttype292840* LOC11;
Ttype292840* LOC12;
TY532811 LOC13;
if (!(res_555042_839829468 <= HEX3Atmp_555039_839829468)) goto LA10;
i_555031_839829468 = res_555042_839829468;
LOC11 = (Ttype292840*)0;
LOC11 = skiptypes_296099_850551059((*t0).typ, IL64(211106232576256));
LOC12 = (Ttype292840*)0;
LOC12 = elemtype_320394_3876443242(LOC11);
initloc_532273_839829468((&arr0), ((Tlockind292808) 6), LOC12, ((Tstorageloc292812) 3));
memset((void*)LOC13, 0, sizeof(LOC13));
LOC13[0] = rdloc_538188_839829468((*d0));
LOC13[1] = intliteral_539270_839829468(((NI64) (i_555031_839829468)));
arr0.r = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_187), LOC13, 2);
arr0.s = ((Tstorageloc292812) 3);
expr_539248_839829468(p0, (*t0).kindU.S6.sons->data[i_555031_839829468], (&arr0));
res_555042_839829468 += ((NI) 1);
} LA10: ;
}
}
gcusage_554439_839829468(t0);
}
N_NIMCALL(void, genarrtoseq_555046_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0) {
Tloc292816 elem0;
Tloc292816 a0;
Tloc292816 arr0;
NI L0;
NI64 LOC9;
Ropeobj178006* LOC10;
{ memset((void*)(&elem0), 0, sizeof(elem0));
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&arr0), 0, sizeof(arr0));
{
if (!((*t0).kind == ((Tnodekind292020) 41))) goto LA3;
asgnRefNoCycle((void**) (&(*(*t0).kindU.S6.sons->data[((NI) 1)]).typ), (*t0).typ);
genseqconstr_555004_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 1)], d0);
goto BeforeRet;
}
LA3: ;
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA7;
gettemp_537032_839829468(p0, (*t0).typ, d0, NIM_FALSE);
}
LA7: ;
LOC9 = (NI64)0;
LOC9 = lengthord_320007_3876443242((*(*t0).kindU.S6.sons->data[((NI) 1)]).typ);
L0 = ((NI) (LOC9));
LOC10 = (Ropeobj178006*)0;
LOC10 = intliteral_539270_839829468(((NI64) (L0)));
gennewseqaux_554795_839829468(p0, (*d0), LOC10);
initlocexpr_539283_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 1)], (&a0));
{
NI i_555090_839829468;
NI HEX3Atmp_555103_839829468;
NI res_555106_839829468;
i_555090_839829468 = (NI)0;
HEX3Atmp_555103_839829468 = (NI)0;
HEX3Atmp_555103_839829468 = (NI)(L0 - ((NI) 1));
res_555106_839829468 = ((NI) 0);
{
while (1) {
Ttype292840* LOC14;
Ttype292840* LOC15;
TY532811 LOC16;
Ttype292840* LOC17;
Ttype292840* LOC18;
TY532811 LOC19;
if (!(res_555106_839829468 <= HEX3Atmp_555103_839829468)) goto LA13;
i_555090_839829468 = res_555106_839829468;
LOC14 = (Ttype292840*)0;
LOC14 = skiptypes_296099_850551059((*t0).typ, IL64(211106232576256));
LOC15 = (Ttype292840*)0;
LOC15 = elemtype_320394_3876443242(LOC14);
initloc_532273_839829468((&elem0), ((Tlockind292808) 6), LOC15, ((Tstorageloc292812) 3));
memset((void*)LOC16, 0, sizeof(LOC16));
LOC16[0] = rdloc_538188_839829468((*d0));
LOC16[1] = intliteral_539270_839829468(((NI64) (i_555090_839829468)));
elem0.r = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_187), LOC16, 2);
elem0.s = ((Tstorageloc292812) 3);
LOC17 = (Ttype292840*)0;
LOC17 = skiptypes_296099_850551059((*(*t0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106232576256));
LOC18 = (Ttype292840*)0;
LOC18 = elemtype_320394_3876443242(LOC17);
initloc_532273_839829468((&arr0), ((Tlockind292808) 6), LOC18, a0.s);
memset((void*)LOC19, 0, sizeof(LOC19));
LOC19[0] = rdloc_538188_839829468(a0);
LOC19[1] = intliteral_539270_839829468(((NI64) (i_555090_839829468)));
arr0.r = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_138), LOC19, 2);
genassignment_539264_839829468(p0, elem0, arr0, 3);
res_555106_839829468 += ((NI) 1);
} LA13: ;
}
}
}BeforeRet: ;
}
N_NIMCALL(void, gendeepcopy_550374_839829468)(Tcproc529021* p0, Tloc292816 dest0, Tloc292816 src0) {
Ttype292840* ty0;
ty0 = skiptypes_296099_850551059(dest0.t, IL64(211106242013440));
switch ((*ty0).kind) {
case ((Ttypekind292244) 21):
case ((Ttypekind292244) 22):
case ((Ttypekind292244) 25):
case ((Ttypekind292244) 18):
case ((Ttypekind292244) 17):
case ((Ttypekind292244) 16):
case ((Ttypekind292244) 4):
{
TY535238 LOC2;
memset((void*)LOC2, 0, sizeof(LOC2));
LOC2[0] = addrloc_538204_839829468(dest0);
LOC2[1] = addrloc_538204_839829468(src0);
LOC2[2] = gentypeinfo_535941_839829468((*p0).module, dest0.t);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_519), LOC2, 3);
}
break;
case ((Ttypekind292244) 24):
case ((Ttypekind292244) 28):
{
TY535238 LOC4;
memset((void*)LOC4, 0, sizeof(LOC4));
LOC4[0] = addrloc_538204_839829468(dest0);
LOC4[1] = rdloc_538188_839829468(src0);
LOC4[2] = gentypeinfo_535941_839829468((*p0).module, dest0.t);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_520), LOC4, 3);
}
break;
case ((Ttypekind292244) 27):
case ((Ttypekind292244) 48):
{
TY535238 LOC6;
memset((void*)LOC6, 0, sizeof(LOC6));
LOC6[0] = addrloc_538204_839829468(dest0);
LOC6[1] = addrloc_538204_839829468(src0);
LOC6[2] = gentypeinfo_535941_839829468((*p0).module, dest0.t);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_521), LOC6, 3);
}
break;
case ((Ttypekind292244) 19):
{
{
Tctypekind529007 LOC10;
TY535238 LOC13;
NI64 LOC14;
LOC10 = (Tctypekind529007)0;
LOC10 = maptype_533393_839829468(ty0);
if (!(LOC10 == ((Tctypekind529007) 17))) goto LA11;
usestringh_532345_839829468((*p0).module);
memset((void*)LOC13, 0, sizeof(LOC13));
LOC13[0] = rdloc_538188_839829468(dest0);
LOC13[1] = rdloc_538188_839829468(src0);
LOC14 = (NI64)0;
LOC14 = getsize_320135_3876443242(dest0.t);
LOC13[2] = rope_178401_2381377266(LOC14);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_268), LOC13, 3);
}
goto LA8;
LA11: ;
{
TY532811 LOC16;
memset((void*)LOC16, 0, sizeof(LOC16));
LOC16[0] = rdloc_538188_839829468(dest0);
LOC16[1] = rdloc_538188_839829468(src0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_123), LOC16, 2);
}
LA8: ;
}
break;
case ((Ttypekind292244) 26):
case ((Ttypekind292244) 2):
case ((Ttypekind292244) 1):
case ((Ttypekind292244) 14):
case ((Ttypekind292244) 29):
case ((Ttypekind292244) 31) ... ((Ttypekind292244) 44):
case ((Ttypekind292244) 20):
case ((Ttypekind292244) 23):
{
TY532811 LOC18;
memset((void*)LOC18, 0, sizeof(LOC18));
LOC18[0] = rdloc_538188_839829468(dest0);
LOC18[1] = rdloc_538188_839829468(src0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_123), LOC18, 2);
}
break;
default:
{
NimStringDesc* LOC20;
LOC20 = (NimStringDesc*)0;
LOC20 = rawNewString(reprEnum((NI)(*ty0).kind, (&NTI292244))->Sup.len + 13);
appendString(LOC20, ((NimStringDesc*) &T839829468_522));
appendString(LOC20, reprEnum((NI)(*ty0).kind, (&NTI292244)));
internalerror_196113_155036129(LOC20);
}
break;
}
}
N_NIMCALL(void, genmagicexpr_557033_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 op0) {
switch (op0) {
case ((Tmagic292524) 127):
case ((Tmagic292524) 126):
{
genandor_554311_839829468(p0, e0, d0, op0);
}
break;
case ((Tmagic292524) 99) ... ((Tmagic292524) 117):
{
unaryarith_552646_839829468(p0, e0, d0, op0);
}
break;
case ((Tmagic292524) 96) ... ((Tmagic292524) 98):
{
unaryarithoverflow_551633_839829468(p0, e0, d0, op0);
}
break;
case ((Tmagic292524) 52) ... ((Tmagic292524) 55):
{
binaryfloatarith_556728_839829468(p0, e0, d0, op0);
}
break;
case ((Tmagic292524) 56) ... ((Tmagic292524) 93):
{
binaryarith_551819_839829468(p0, e0, d0, op0);
}
break;
case ((Tmagic292524) 95):
{
geneqproc_552214_839829468(p0, e0, d0);
}
break;
case ((Tmagic292524) 45) ... ((Tmagic292524) 51):
{
binaryarithoverflow_551262_839829468(p0, e0, d0, op0);
}
break;
case ((Tmagic292524) 149):
{
genrepr_555339_839829468(p0, e0, d0);
}
break;
case ((Tmagic292524) 259):
{
gengettypeinfo_555383_839829468(p0, e0, d0);
}
break;
case ((Tmagic292524) 156):
{
genswap_555638_839829468(p0, e0, d0);
}
break;
case ((Tmagic292524) 25):
{
{
if (!!((((*p0).options &(1U<<((NU)(((Toption169009) 5))&31U)))!=0))) goto LA14;
unaryexpr_551209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_385));
}
goto LA12;
LA14: ;
{
unaryexpr_551209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_386));
}
LA12: ;
}
break;
case ((Tmagic292524) 26):
case ((Tmagic292524) 27):
{
Ttype292840* underlying0;
underlying0 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, 9439232);
{
NIM_BOOL LOC20;
LOC20 = (NIM_BOOL)0;
LOC20 = !((((*p0).options &(1U<<((NU)(((Toption169009) 5))&31U)))!=0));
if (LOC20) goto LA21;
LOC20 = ((*underlying0).kind >= ((Ttypekind292244) 40) && (*underlying0).kind <= ((Ttypekind292244) 44));
LA21: ;
if (!LOC20) goto LA22;
binarystmt_550501_839829468(p0, e0, d0, opr_557050_839829468[(op0)- 26]);
}
goto LA18;
LA22: ;
{
Tloc292816 a0;
Tloc292816 b0;
Ttype292840* ranged0;
Ropeobj178006* res0;
NimStringDesc* LOC25;
TY532811 LOC31;
Ropeobj178006* LOC32;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0));
ranged0 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, 8390656);
LOC25 = (NimStringDesc*)0;
{
if (!((*underlying0).kind == ((Ttypekind292244) 35))) goto LA28;
LOC25 = copyString(fun64_557055_839829468[(op0)- 26]);
}
goto LA26;
LA28: ;
{
LOC25 = copyString(fun_557060_839829468[(op0)- 26]);
}
LA26: ;
res0 = binaryarithoverflowraw_551235_839829468(p0, ranged0, a0, b0, LOC25);
memset((void*)LOC31, 0, sizeof(LOC31));
LOC31[0] = gettypedesc_535671_839829468((*p0).module, ranged0);
LOC31[1] = res0;
LOC32 = (Ropeobj178006*)0;
LOC32 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_370), LOC31, 2);
putintodest_550468_839829468(p0, (&a0), ranged0, LOC32, ((Tstorageloc292812) 0));
}
LA18: ;
}
break;
case ((Tmagic292524) 138):
{
genstrconcat_554452_839829468(p0, e0, d0);
}
break;
case ((Tmagic292524) 144):
{
binarystmt_550501_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_394));
}
break;
case ((Tmagic292524) 145):
{
genstrappend_554554_839829468(p0, e0, d0);
}
break;
case ((Tmagic292524) 146):
{
genseqelemappend_554683_839829468(p0, e0, d0);
}
break;
case ((Tmagic292524) 128):
{
genstrequals_556666_839829468(p0, e0, d0);
}
break;
case ((Tmagic292524) 129):
{
binaryexpr_550549_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_402));
}
break;
case ((Tmagic292524) 130):
{
binaryexpr_550549_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_403));
}
break;
case ((Tmagic292524) 157):
{
genisnil_552620_839829468(p0, e0, d0);
}
break;
case ((Tmagic292524) 120):
{
gendollar_555391_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_406));
}
break;
case ((Tmagic292524) 121):
{
gendollar_555391_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_407));
}
break;
case ((Tmagic292524) 119):
{
gendollar_555391_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_408));
}
break;
case ((Tmagic292524) 118):
{
gendollar_555391_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_409));
}
break;
case ((Tmagic292524) 122):
{
gendollar_555391_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_410));
}
break;
case ((Tmagic292524) 123):
{
gendollar_555391_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_411));
}
break;
case ((Tmagic292524) 124):
{
expr_539248_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], d0);
}
break;
case ((Tmagic292524) 125):
{
genrepr_555339_839829468(p0, e0, d0);
}
break;
case ((Tmagic292524) 12):
{
genof_555331_839829468(p0, e0, d0);
}
break;
case ((Tmagic292524) 29):
{
gennew_554782_839829468(p0, e0);
}
break;
case ((Tmagic292524) 30):
{
gennewfinalize_555110_839829468(p0, e0);
}
break;
case ((Tmagic292524) 31):
{
gennewseq_554824_839829468(p0, e0);
}
break;
case ((Tmagic292524) 32):
{
gennewseqofcap_554836_839829468(p0, e0, d0);
}
break;
case ((Tmagic292524) 9):
{
Ttype292840* t0;
TY178507 LOC55;
Ropeobj178006* LOC56;
t0 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, 256);
memset((void*)LOC55, 0, sizeof(LOC55));
LOC55[0] = gettypedesc_535671_839829468((*p0).module, t0);
LOC56 = (Ropeobj178006*)0;
LOC56 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_428), LOC55, 1);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC56, ((Tstorageloc292812) 0));
}
break;
case ((Tmagic292524) 42):
{
gensomecast_556480_839829468(p0, e0, d0);
}
break;
case ((Tmagic292524) 28):
{
genord_556474_839829468(p0, e0, d0);
}
break;
case ((Tmagic292524) 35):
case ((Tmagic292524) 8):
case ((Tmagic292524) 34):
case ((Tmagic292524) 36):
case ((Tmagic292524) 33):
{
genarraylen_555415_839829468(p0, e0, d0, op0);
}
break;
case ((Tmagic292524) 37):
case ((Tmagic292524) 38):
{
{
NIM_BOOL LOC63;
LOC63 = (NIM_BOOL)0;
LOC63 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC63) goto LA64;
LOC63 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA64: ;
if (!!(LOC63)) goto LA65;
unaryexpr_551209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_440));
}
goto LA61;
LA65: ;
{
unaryexpr_551209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_441));
}
LA61: ;
}
break;
case ((Tmagic292524) 43):
{
unarystmt_550527_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_443));
}
break;
case ((Tmagic292524) 44):
{
unarystmt_550527_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_444));
}
break;
case ((Tmagic292524) 151):
{
gensetlengthstr_555632_839829468(p0, e0, d0);
}
break;
case ((Tmagic292524) 152):
{
gensetlengthseq_555500_839829468(p0, e0, d0);
}
break;
case ((Tmagic292524) 39):
case ((Tmagic292524) 40):
case ((Tmagic292524) 41):
case ((Tmagic292524) 133):
case ((Tmagic292524) 132):
case ((Tmagic292524) 131):
case ((Tmagic292524) 134):
case ((Tmagic292524) 135):
case ((Tmagic292524) 136):
case ((Tmagic292524) 148):
{
gensetop_556419_839829468(p0, e0, d0, op0);
}
break;
case ((Tmagic292524) 161):
case ((Tmagic292524) 162):
case ((Tmagic292524) 159):
case ((Tmagic292524) 160):
case ((Tmagic292524) 150):
case ((Tmagic292524) 163):
{
Tsym292834* opr0;
opr0 = (*(*e0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym;
{
NimStringDesc* LOC78;
Ropeobj178006* LOC79;
if (!!((((*opr0).loc.flags &(1U<<((NU)(((Tlocflag292810) 3))&15U)))!=0))) goto LA76;
LOC78 = (NimStringDesc*)0;
LOC78 = HEX24_178856_2381377266((*opr0).loc.r);
LOC79 = (Ropeobj178006*)0;
LOC79 = cgsym_532403_839829468((*p0).module, LOC78);
}
LA76: ;
gencall_543632_839829468(p0, e0, d0);
}
break;
case ((Tmagic292524) 164):
{
genreset_554731_839829468(p0, e0);
}
break;
case ((Tmagic292524) 17):
{
Tnode292802* LOC82;
Tnode292802* LOC83;
LOC82 = (Tnode292802*)0;
LOC82 = HEX5BHEX5D_293238_850551059(e0, ((NI) 1));
LOC83 = (Tnode292802*)0;
LOC83 = skipconv_328882_3876443242(LOC82);
genecho_554369_839829468(p0, LOC83);
}
break;
case ((Tmagic292524) 158):
{
genarrtoseq_555046_839829468(p0, e0, d0);
}
break;
case ((Tmagic292524) 223) ... ((Tmagic292524) 257):
case ((Tmagic292524) 19) ... ((Tmagic292524) 24):
{
localerror_196080_155036129((*e0).info, ((Tmsgkind191002) 229), (*(*(*(*e0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).name).s);
}
break;
case ((Tmagic292524) 208):
{
Tnode292802* n0;
n0 = wrapprocforspawn_435501_2218250499((*(*p0).module).module, e0, (*e0).typ, NIM_NIL, NIM_NIL);
expr_539248_839829468(p0, n0, d0);
}
break;
case ((Tmagic292524) 155):
{
Tnode292802* n0;
n0 = liftparallel_478822_1773027539((*(*p0).module).module, e0);
expr_539248_839829468(p0, n0, d0);
}
break;
case ((Tmagic292524) 209):
{
Tloc292816 a0;
Tloc292816 b0;
Tnode292802* x0;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
{
Tnode292802* LOC91;
Tnode292802* LOC94;
LOC91 = (Tnode292802*)0;
LOC91 = HEX5BHEX5D_293238_850551059(e0, ((NI) 1));
if (!((*LOC91).kind == ((Tnodekind292020) 63) || (*LOC91).kind == ((Tnodekind292020) 64))) goto LA92;
LOC94 = (Tnode292802*)0;
LOC94 = HEX5BHEX5D_293238_850551059(e0, ((NI) 1));
x0 = HEX5BHEX5D_293238_850551059(LOC94, ((NI) 0));
}
goto LA89;
LA92: ;
{
x0 = HEX5BHEX5D_293238_850551059(e0, ((NI) 1));
}
LA89: ;
initlocexpr_539283_839829468(p0, x0, (&a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0));
gendeepcopy_550374_839829468(p0, a0, b0);
}
break;
case ((Tmagic292524) 140):
case ((Tmagic292524) 94):
{
gencall_543632_839829468(p0, e0, d0);
}
break;
default:
{
NimStringDesc* LOC98;
LOC98 = (NimStringDesc*)0;
LOC98 = rawNewString(reprEnum((NI)op0, (&NTI292524))->Sup.len + 14);
appendString(LOC98, ((NimStringDesc*) &T839829468_523));
appendString(LOC98, reprEnum((NI)op0, (&NTI292524)));
internalerror_196100_155036129((*e0).info, LOC98);
}
break;
}
}
N_NIMCALL(Ropeobj178006*, gensetnode_549664_839829468)(Tcproc529021* p0, Tnode292802* n0) {
Ropeobj178006* result0;
Tbitset339004* cs0;
NI size0;
NI64 LOC1;
result0 = (Ropeobj178006*)0;
cs0 = (Tbitset339004*)0;
LOC1 = (NI64)0;
LOC1 = getsize_320135_3876443242((*n0).typ);
size0 = ((NI) (LOC1));
tobitset_340001_452470228(n0, (&cs0));
{
NI id0;
Ropeobj178006* LOC6;
if (!(((NI) 8) < size0)) goto LA4;
id0 = nodetabletestorset_342682_1142335848((&(*(*p0).module).datacache), n0, ((NI) ((*(*p0).module).labels)));
LOC6 = (Ropeobj178006*)0;
LOC6 = rope_178401_2381377266(((NI64) (id0)));
result0 = HEX26_178418_2381377266((*(*p0).module).tmpbase, LOC6);
{
TY535238 LOC11;
if (!(id0 == ((NI) ((*(*p0).module).labels)))) goto LA9;
(*(*p0).module).labels += ((NI) 1);
memset((void*)LOC11, 0, sizeof(LOC11));
LOC11[0] = gettypedesc_535671_839829468((*p0).module, (*n0).typ);
LOC11[1] = result0;
LOC11[2] = genrawsetdata_549629_839829468(cs0, size0);
addf_179205_2381377266(&(*(*p0).module).s[(((Tcfilesection529005) 8))- 0], ((NimStringDesc*) &T839829468_524), LOC11, 3);
}
LA9: ;
}
goto LA2;
LA4: ;
{
result0 = genrawsetdata_549629_839829468(cs0, size0);
}
LA2: ;
return result0;
}
N_NIMCALL(void, gensetconstr_557496_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
Tloc292816 a0;
Tloc292816 b0;
Tloc292816 idx0;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
memset((void*)(&idx0), 0, sizeof(idx0));
{
Ropeobj178006* LOC5;
if (!(((*e0).flags &(1U<<((NU)(((Tnodeflag292427) 4))&15U)))!=0)) goto LA3;
LOC5 = (Ropeobj178006*)0;
LOC5 = gensetnode_549664_839829468(p0, e0);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC5, ((Tstorageloc292812) 0));
}
goto LA1;
LA3: ;
{
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA9;
gettemp_537032_839829468(p0, (*e0).typ, d0, NIM_FALSE);
}
LA9: ;
{
NI64 LOC13;
TY178507 LOC16;
LOC13 = (NI64)0;
LOC13 = getsize_320135_3876443242((*e0).typ);
if (!(IL64(8) < LOC13)) goto LA14;
usestringh_532345_839829468((*p0).module);
memset((void*)LOC16, 0, sizeof(LOC16));
LOC16[0] = rdloc_538188_839829468((*d0));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_525), LOC16, 1);
{
NI i_557537_839829468;
NI HEX3Atmp_557603_839829468;
NI LOC18;
NI res_557606_839829468;
i_557537_839829468 = (NI)0;
HEX3Atmp_557603_839829468 = (NI)0;
LOC18 = (NI)0;
LOC18 = sonslen_295351_850551059(e0);
HEX3Atmp_557603_839829468 = (NI)(LOC18 - ((NI) 1));
res_557606_839829468 = ((NI) 0);
{
while (1) {
if (!(res_557606_839829468 <= HEX3Atmp_557603_839829468)) goto LA20;
i_557537_839829468 = res_557606_839829468;
{
Ttype292840* LOC25;
TY535235 LOC26;
if (!((*(*e0).kindU.S6.sons->data[i_557537_839829468]).kind == ((Tnodekind292020) 44))) goto LA23;
LOC25 = (Ttype292840*)0;
LOC25 = getsystype_338150_3937434831(((Ttypekind292244) 31));
gettemp_537032_839829468(p0, LOC25, (&idx0), NIM_FALSE);
initlocexpr_539283_839829468(p0, (*(*e0).kindU.S6.sons->data[i_557537_839829468]).kindU.S6.sons->data[((NI) 0)], (&a0));
initlocexpr_539283_839829468(p0, (*(*e0).kindU.S6.sons->data[i_557537_839829468]).kindU.S6.sons->data[((NI) 1)], (&b0));
memset((void*)LOC26, 0, sizeof(LOC26));
LOC26[0] = rdloc_538188_839829468(idx0);
LOC26[1] = rdloc_538188_839829468((*d0));
LOC26[2] = rdsetelemloc_555662_839829468(a0, (*e0).typ);
LOC26[3] = rdsetelemloc_555662_839829468(b0, (*e0).typ);
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_526), LOC26, 4);
}
goto LA21;
LA23: ;
{
TY532811 LOC28;
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[i_557537_839829468], (&a0));
memset((void*)LOC28, 0, sizeof(LOC28));
LOC28[0] = rdloc_538188_839829468((*d0));
LOC28[1] = rdsetelemloc_555662_839829468(a0, (*e0).typ);
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_527), LOC28, 2);
}
LA21: ;
res_557606_839829468 += ((NI) 1);
} LA20: ;
}
}
}
goto LA11;
LA14: ;
{
NimStringDesc* ts0;
NimStringDesc* LOC30;
NI64 LOC31;
NimStringDesc* LOC32;
TY178507 LOC33;
LOC30 = (NimStringDesc*)0;
LOC31 = (NI64)0;
LOC31 = getsize_320135_3876443242((*e0).typ);
LOC32 = (NimStringDesc*)0;
LOC32 = nimInt64ToStr((NI64)(LOC31 * IL64(8)));
LOC30 = rawNewString(LOC32->Sup.len + 2);
appendString(LOC30, ((NimStringDesc*) &T839829468_45));
appendString(LOC30, LOC32);
ts0 = LOC30;
memset((void*)LOC33, 0, sizeof(LOC33));
LOC33[0] = rdloc_538188_839829468((*d0));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_494), LOC33, 1);
{
NI i_557575_839829468;
NI HEX3Atmp_557611_839829468;
NI LOC35;
NI res_557614_839829468;
i_557575_839829468 = (NI)0;
HEX3Atmp_557611_839829468 = (NI)0;
LOC35 = (NI)0;
LOC35 = sonslen_295351_850551059(e0);
HEX3Atmp_557611_839829468 = (NI)(LOC35 - ((NI) 1));
res_557614_839829468 = ((NI) 0);
{
while (1) {
if (!(res_557614_839829468 <= HEX3Atmp_557611_839829468)) goto LA37;
i_557575_839829468 = res_557614_839829468;
{
Ttype292840* LOC42;
NimStringDesc* LOC43;
TY535235 LOC44;
if (!((*(*e0).kindU.S6.sons->data[i_557575_839829468]).kind == ((Tnodekind292020) 44))) goto LA40;
LOC42 = (Ttype292840*)0;
LOC42 = getsystype_338150_3937434831(((Ttypekind292244) 31));
gettemp_537032_839829468(p0, LOC42, (&idx0), NIM_FALSE);
initlocexpr_539283_839829468(p0, (*(*e0).kindU.S6.sons->data[i_557575_839829468]).kindU.S6.sons->data[((NI) 0)], (&a0));
initlocexpr_539283_839829468(p0, (*(*e0).kindU.S6.sons->data[i_557575_839829468]).kindU.S6.sons->data[((NI) 1)], (&b0));
LOC43 = (NimStringDesc*)0;
LOC43 = rawNewString(ts0->Sup.len + ts0->Sup.len + 68);
appendString(LOC43, ((NimStringDesc*) &T839829468_528));
appendString(LOC43, ts0);
appendString(LOC43, ((NimStringDesc*) &T839829468_529));
appendString(LOC43, ts0);
appendString(LOC43, ((NimStringDesc*) &T839829468_454));
memset((void*)LOC44, 0, sizeof(LOC44));
LOC44[0] = rdloc_538188_839829468(idx0);
LOC44[1] = rdloc_538188_839829468((*d0));
LOC44[2] = rdsetelemloc_555662_839829468(a0, (*e0).typ);
LOC44[3] = rdsetelemloc_555662_839829468(b0, (*e0).typ);
linef_532700_839829468(p0, ((Tcprocsection529011) 2), LOC43, LOC44, 4);
}
goto LA38;
LA40: ;
{
NimStringDesc* LOC46;
TY532811 LOC47;
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[i_557575_839829468], (&a0));
LOC46 = (NimStringDesc*)0;
LOC46 = rawNewString(ts0->Sup.len + ts0->Sup.len + 36);
appendString(LOC46, ((NimStringDesc*) &T839829468_530));
appendString(LOC46, ts0);
appendString(LOC46, ((NimStringDesc*) &T839829468_531));
appendString(LOC46, ts0);
appendString(LOC46, ((NimStringDesc*) &T839829468_454));
memset((void*)LOC47, 0, sizeof(LOC47));
LOC47[0] = rdloc_538188_839829468((*d0));
LOC47[1] = rdsetelemloc_555662_839829468(a0, (*e0).typ);
linef_532700_839829468(p0, ((Tcprocsection529011) 2), LOC46, LOC47, 2);
}
LA38: ;
res_557614_839829468 += ((NI) 1);
} LA37: ;
}
}
}
LA11: ;
}
LA1: ;
}
N_NIMCALL(void, exprcomplexconst_558684_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0) {
Ttype292840* t0;
Ropeobj178006* LOC1;
NI id0;
Ropeobj178006* tmp0;
Ropeobj178006* LOC2;
t0 = getuniquetype_528640_2036603609((*n0).typ);
LOC1 = (Ropeobj178006*)0;
LOC1 = gettypedesc_535671_839829468((*p0).module, t0);
id0 = nodetabletestorset_342682_1142335848((&(*(*p0).module).datacache), n0, ((NI) ((*(*p0).module).labels)));
LOC2 = (Ropeobj178006*)0;
LOC2 = rope_178401_2381377266(((NI64) (id0)));
tmp0 = HEX26_178418_2381377266((*(*p0).module).tmpbase, LOC2);
{
TY535238 LOC7;
if (!(id0 == ((NI) ((*(*p0).module).labels)))) goto LA5;
(*(*p0).module).labels += ((NI) 1);
memset((void*)LOC7, 0, sizeof(LOC7));
LOC7[0] = gettypedesc_535671_839829468((*p0).module, t0);
LOC7[1] = tmp0;
LOC7[2] = genconstexpr_554849_839829468(p0, n0);
addf_179205_2381377266(&(*(*p0).module).s[(((Tcfilesection529005) 8))- 0], ((NimStringDesc*) &T839829468_272), LOC7, 3);
}
LA5: ;
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA10;
fillloc_532282_839829468(d0, ((Tlockind292808) 8), t0, tmp0, ((Tstorageloc292812) 1));
}
goto LA8;
LA10: ;
{
putdataintodest_550436_839829468(p0, d0, t0, tmp0);
{
if (!!(((*t0).kind == ((Ttypekind292244) 24) || (*t0).kind == ((Ttypekind292244) 28)))) goto LA15;
(*d0).s = ((Tstorageloc292812) 1);
}
LA15: ;
}
LA8: ;
}
N_NIMCALL(NIM_BOOL, handleconstexpr_554853_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0) {
NIM_BOOL result0;
result0 = (NIM_BOOL)0;
{
NIM_BOOL LOC3;
NIM_BOOL LOC4;
NI LOC6;
Ttype292840* t0;
Ropeobj178006* LOC10;
NI id0;
Ropeobj178006* LOC11;
Ropeobj178006* LOC12;
LOC3 = (NIM_BOOL)0;
LOC4 = (NIM_BOOL)0;
LOC4 = ((*d0).k == ((Tlockind292808) 0));
if (!(LOC4)) goto LA5;
LOC6 = (NI)0;
LOC6 = len_293081_850551059(n0);
LOC4 = (((NI) (((*n0).kind == ((Tnodekind292020) 38)))) < LOC6);
LA5: ;
LOC3 = LOC4;
if (!(LOC3)) goto LA7;
LOC3 = isdeepconstexpr_318566_2616423590(n0);
LA7: ;
if (!LOC3) goto LA8;
t0 = getuniquetype_528640_2036603609((*n0).typ);
LOC10 = (Ropeobj178006*)0;
LOC10 = gettypedesc_535671_839829468((*p0).module, t0);
id0 = nodetabletestorset_342682_1142335848((&(*(*p0).module).datacache), n0, ((NI) ((*(*p0).module).labels)));
LOC11 = (Ropeobj178006*)0;
LOC11 = rope_178401_2381377266(((NI64) (id0)));
LOC12 = (Ropeobj178006*)0;
LOC12 = HEX26_178418_2381377266((*(*p0).module).tmpbase, LOC11);
fillloc_532282_839829468(d0, ((Tlockind292808) 8), t0, LOC12, ((Tstorageloc292812) 1));
{
TY535238 LOC17;
if (!(id0 == ((NI) ((*(*p0).module).labels)))) goto LA15;
(*(*p0).module).labels += ((NI) 1);
memset((void*)LOC17, 0, sizeof(LOC17));
LOC17[0] = gettypedesc_535671_839829468((*p0).module, t0);
LOC17[1] = (*d0).r;
LOC17[2] = genconstexpr_554849_839829468(p0, n0);
addf_179205_2381377266(&(*(*p0).module).s[(((Tcfilesection529005) 8))- 0], ((NimStringDesc*) &T839829468_272), LOC17, 3);
}
LA15: ;
result0 = NIM_TRUE;
}
goto LA1;
LA8: ;
{
result0 = NIM_FALSE;
}
LA1: ;
return result0;
}
N_NIMCALL(void, genarrayconstr_558207_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0) {
Tloc292816 arr0;
memset((void*)(&arr0), 0, sizeof(arr0));
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = handleconstexpr_554853_839829468(p0, n0, d0);
if (!!(LOC3)) goto LA4;
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA8;
gettemp_537032_839829468(p0, (*n0).typ, d0, NIM_FALSE);
}
LA8: ;
{
NI i_558234_839829468;
NI HEX3Atmp_558242_839829468;
NI LOC11;
NI res_558245_839829468;
i_558234_839829468 = (NI)0;
HEX3Atmp_558242_839829468 = (NI)0;
LOC11 = (NI)0;
LOC11 = sonslen_295351_850551059(n0);
HEX3Atmp_558242_839829468 = (NI)(LOC11 - ((NI) 1));
res_558245_839829468 = ((NI) 0);
{
while (1) {
Ttype292840* LOC14;
Ttype292840* LOC15;
TY532811 LOC16;
if (!(res_558245_839829468 <= HEX3Atmp_558242_839829468)) goto LA13;
i_558234_839829468 = res_558245_839829468;
LOC14 = (Ttype292840*)0;
LOC14 = skiptypes_296099_850551059((*n0).typ, IL64(211106232576256));
LOC15 = (Ttype292840*)0;
LOC15 = elemtype_320394_3876443242(LOC14);
initloc_532273_839829468((&arr0), ((Tlockind292808) 6), LOC15, (*d0).s);
memset((void*)LOC16, 0, sizeof(LOC16));
LOC16[0] = rdloc_538188_839829468((*d0));
LOC16[1] = intliteral_539270_839829468(((NI64) (i_558234_839829468)));
arr0.r = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_138), LOC16, 2);
expr_539248_839829468(p0, (*n0).kindU.S6.sons->data[i_558234_839829468], (&arr0));
res_558245_839829468 += ((NI) 1);
} LA13: ;
}
}
}
LA4: ;
}
N_NIMCALL(void, gentupleconstr_557618_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0) {
Tloc292816 rec0;
memset((void*)(&rec0), 0, sizeof(rec0));
{
NIM_BOOL LOC3;
Ttype292840* t0;
Ropeobj178006* LOC6;
LOC3 = (NIM_BOOL)0;
LOC3 = handleconstexpr_554853_839829468(p0, n0, d0);
if (!!(LOC3)) goto LA4;
t0 = getuniquetype_528640_2036603609((*n0).typ);
LOC6 = (Ropeobj178006*)0;
LOC6 = gettypedesc_535671_839829468((*p0).module, t0);
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA9;
gettemp_537032_839829468(p0, t0, d0, NIM_FALSE);
}
LA9: ;
{
NI i_557646_839829468;
NI HEX3Atmp_557803_839829468;
NI LOC12;
NI res_557806_839829468;
i_557646_839829468 = (NI)0;
HEX3Atmp_557803_839829468 = (NI)0;
LOC12 = (NI)0;
LOC12 = sonslen_295351_850551059(n0);
HEX3Atmp_557803_839829468 = (NI)(LOC12 - ((NI) 1));
res_557806_839829468 = ((NI) 0);
{
while (1) {
Tnode292802* it0;
TY532811 LOC19;
if (!(res_557806_839829468 <= HEX3Atmp_557803_839829468)) goto LA14;
i_557646_839829468 = res_557806_839829468;
it0 = (*n0).kindU.S6.sons->data[i_557646_839829468];
{
if (!((*it0).kind == ((Tnodekind292020) 34))) goto LA17;
it0 = (*it0).kindU.S6.sons->data[((NI) 1)];
}
LA17: ;
initloc_532273_839829468((&rec0), ((Tlockind292808) 6), (*it0).typ, (*d0).s);
memset((void*)LOC19, 0, sizeof(LOC19));
LOC19[0] = rdloc_538188_839829468((*d0));
LOC19[1] = rope_178401_2381377266(((NI64) (i_557646_839829468)));
rec0.r = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_185), LOC19, 2);
expr_539248_839829468(p0, it0, (&rec0));
res_557806_839829468 += ((NI) 1);
} LA14: ;
}
}
}
LA4: ;
}
N_NIMCALL(Tsym292834*, lookupfieldagain_553153_839829468)(Tcproc529021* p0, Ttype292840* ty_553156_839829468, Tsym292834* field0, Ropeobj178006** r0) {
Tsym292834* result0;
Ttype292840* ty0;
result0 = (Tsym292834*)0;
ty0 = ty_553156_839829468;
{
while (1) {
if (!!((ty0 == NIM_NIL))) goto LA2;
ty0 = skiptypes_296099_850551059(ty0, IL64(211106247215360));
result0 = lookupinrecord_299119_2984716966((*ty0).n, (*field0).name);
{
if (!!((result0 == NIM_NIL))) goto LA5;
goto LA1;
}
LA5: ;
{
NIM_BOOL LOC9;
LOC9 = (NIM_BOOL)0;
LOC9 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC9) goto LA10;
LOC9 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA10: ;
if (!!(LOC9)) goto LA11;
add_178487_2381377266(r0, ((NimStringDesc*) &T839829468_153));
}
LA11: ;
ty0 = getuniquetype_528640_2036603609((*ty0).sons->data[((NI) 0)]);
} LA2: ;
} LA1: ;
{
if (!(result0 == NIM_NIL)) goto LA15;
internalerror_196100_155036129((*field0).info, ((NimStringDesc*) &T839829468_532));
}
LA15: ;
return result0;
}
N_NIMCALL(void, genfieldcheck_553504_839829468)(Tcproc529021* p0, Tnode292802* e0, Ropeobj178006* obj0, Tsym292834* field0, Ttype292840* origty0) {
Tloc292816 test0;
Tloc292816 u0;
Tloc292816 v0;
memset((void*)(&test0), 0, sizeof(test0));
memset((void*)(&u0), 0, sizeof(u0));
memset((void*)(&v0), 0, sizeof(v0));
{
NI i_553525_839829468;
NI HEX3Atmp_554039_839829468;
NI LOC2;
NI res_554042_839829468;
i_553525_839829468 = (NI)0;
HEX3Atmp_554039_839829468 = (NI)0;
LOC2 = (NI)0;
LOC2 = sonslen_295351_850551059(e0);
HEX3Atmp_554039_839829468 = (NI)(LOC2 - ((NI) 1));
res_554042_839829468 = ((NI) 1);
{
while (1) {
Tnode292802* it0;
Tsym292834* op0;
Tnode292802* disc0;
Ropeobj178006* o0;
Tsym292834* d0;
NI id0;
Tnode292802* LOC9;
Ropeobj178006* strlit0;
if (!(res_554042_839829468 <= HEX3Atmp_554039_839829468)) goto LA4;
i_553525_839829468 = res_554042_839829468;
it0 = (*e0).kindU.S6.sons->data[i_553525_839829468];
op0 = (*(*it0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym;
{
if (!((*op0).magic == ((Tmagic292524) 99))) goto LA7;
it0 = (*it0).kindU.S6.sons->data[((NI) 1)];
}
LA7: ;
disc0 = skipconv_328882_3876443242((*it0).kindU.S6.sons->data[((NI) 2)]);
initloc_532273_839829468((&test0), ((Tlockind292808) 0), (*it0).typ, ((Tstorageloc292812) 2));
initlocexpr_539283_839829468(p0, (*it0).kindU.S6.sons->data[((NI) 1)], (&u0));
o0 = obj0;
d0 = lookupfieldagain_553153_839829468(p0, origty0, (*disc0).kindU.S4.sym, &o0);
initloc_532273_839829468((&v0), ((Tlockind292808) 6), (*d0).typ, ((Tstorageloc292812) 0));
v0.r = o0;
add_178487_2381377266(&v0.r, ((NimStringDesc*) &T839829468_257));
add_178482_2381377266(&v0.r, (*d0).loc.r);
geninexpraux_553496_839829468(p0, it0, (&u0), (&v0), (&test0));
LOC9 = (Tnode292802*)0;
LOC9 = newstrnode_293678_850551059(((Tnodekind292020) 20), (*(*field0).name).s);
id0 = nodetabletestorset_342682_1142335848((&(*(*p0).module).datacache), LOC9, ((NI) ((*(*p0).module).labels)));
{
if (!(id0 == ((NI) ((*(*p0).module).labels)))) goto LA12;
strlit0 = getstrlit_549468_839829468((*p0).module, (*(*field0).name).s);
}
goto LA10;
LA12: ;
{
Ropeobj178006* LOC15;
LOC15 = (Ropeobj178006*)0;
LOC15 = rope_178401_2381377266(((NI64) (id0)));
strlit0 = HEX26_178418_2381377266((*(*p0).module).tmpbase, LOC15);
}
LA10: ;
{
TY532811 LOC20;
if (!((*op0).magic == ((Tmagic292524) 99))) goto LA18;
memset((void*)LOC20, 0, sizeof(LOC20));
LOC20[0] = rdloc_538188_839829468(test0);
LOC20[1] = strlit0;
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_534), LOC20, 2);
}
goto LA16;
LA18: ;
{
TY532811 LOC22;
memset((void*)LOC22, 0, sizeof(LOC22));
LOC22[0] = rdloc_538188_839829468(test0);
LOC22[1] = strlit0;
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_535), LOC22, 2);
}
LA16: ;
res_554042_839829468 += ((NI) 1);
} LA4: ;
}
}
}
N_NIMCALL(void, genobjconstr_554903_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
Tloc292816 tmp0;
Ttype292840* t0;
NIM_BOOL isref0;
Ropeobj178006* r0;
Ropeobj178006* LOC13;
Ttype292840* ty0;
{ {
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = handleconstexpr_554853_839829468(p0, e0, d0);
if (!LOC3) goto LA4;
goto BeforeRet;
}
LA4: ;
memset((void*)(&tmp0), 0, sizeof(tmp0));
t0 = skiptypes_296099_850551059((*e0).typ, IL64(211106232576256));
gettemp_537032_839829468(p0, t0, (&tmp0), NIM_FALSE);
isref0 = ((*t0).kind == ((Ttypekind292244) 22));
r0 = rdloc_538188_839829468(tmp0);
{
Ttype292840* LOC10;
TY178507 LOC11;
if (!isref0) goto LA8;
rawgennew_554741_839829468(p0, tmp0, NIM_NIL);
LOC10 = (Ttype292840*)0;
LOC10 = lastson_295377_850551059(t0);
t0 = skiptypes_296099_850551059(LOC10, IL64(211106232576256));
memset((void*)LOC11, 0, sizeof(LOC11));
LOC11[0] = r0;
r0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_124), LOC11, 1);
gcusage_554439_839829468(e0);
}
goto LA6;
LA8: ;
{
constructloc_538388_839829468(p0, tmp0, NIM_FALSE);
}
LA6: ;
LOC13 = (Ropeobj178006*)0;
LOC13 = gettypedesc_535671_839829468((*p0).module, t0);
ty0 = getuniquetype_528640_2036603609(t0);
{
NI i_554944_839829468;
NI HEX3Atmp_554997_839829468;
NI LOC15;
NI res_555000_839829468;
i_554944_839829468 = (NI)0;
HEX3Atmp_554997_839829468 = (NI)0;
LOC15 = (NI)0;
LOC15 = len_293081_850551059(e0);
HEX3Atmp_554997_839829468 = (LOC15 - 1);
res_555000_839829468 = ((NI) 1);
{
while (1) {
Tnode292802* it0;
Tloc292816 tmp20;
Tsym292834* field0;
if (!(res_555000_839829468 <= HEX3Atmp_554997_839829468)) goto LA17;
i_554944_839829468 = res_555000_839829468;
it0 = (*e0).kindU.S6.sons->data[i_554944_839829468];
memset((void*)(&tmp20), 0, sizeof(tmp20));
tmp20.r = r0;
field0 = lookupfieldagain_553153_839829468(p0, ty0, (*(*it0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym, &tmp20.r);
{
if (!((*field0).loc.r == NIM_NIL)) goto LA20;
internalerror_196100_155036129((*e0).info, ((NimStringDesc*) &T839829468_533));
}
LA20: ;
{
NIM_BOOL LOC24;
NI LOC25;
LOC24 = (NIM_BOOL)0;
LOC25 = (NI)0;
LOC25 = len_293081_850551059(it0);
LOC24 = (LOC25 == ((NI) 3));
if (!(LOC24)) goto LA26;
LOC24 = (((*p0).options &(1U<<((NU)(((Toption169009) 2))&31U)))!=0);
LA26: ;
if (!LOC24) goto LA27;
genfieldcheck_553504_839829468(p0, (*it0).kindU.S6.sons->data[((NI) 2)], r0, field0, ty0);
}
LA27: ;
add_178487_2381377266(&tmp20.r, ((NimStringDesc*) &T839829468_257));
add_178482_2381377266(&tmp20.r, (*field0).loc.r);
tmp20.k = ((Tlockind292808) 1);
tmp20.t = (*field0).loc.t;
{
if (!isref0) goto LA31;
tmp20.s = ((Tstorageloc292812) 3);
}
goto LA29;
LA31: ;
{
tmp20.s = ((Tstorageloc292812) 2);
}
LA29: ;
expr_539248_839829468(p0, (*it0).kindU.S6.sons->data[((NI) 1)], (&tmp20));
res_555000_839829468 += ((NI) 1);
} LA17: ;
}
}
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA36;
genericAssign((void*)(&(*d0)), (void*)(&tmp0), (&NTI292816));
}
goto LA34;
LA36: ;
{
genassignment_539264_839829468(p0, (*d0), tmp0, 0);
}
LA34: ;
}BeforeRet: ;
}
N_NIMCALL(void, gencast_556537_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
Ttype292840* destt0;
Ttype292840* srct0;
destt0 = skiptypes_296099_850551059((*e0).typ, IL64(211106233624832));
srct0 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106233624832));
{
NIM_BOOL LOC3;
Ropeobj178006* lbl0;
Tloc292816 tmp0;
TY178507 LOC7;
TY535238 LOC8;
TY178507 LOC9;
Ropeobj178006* LOC10;
LOC3 = (NIM_BOOL)0;
LOC3 = ((*destt0).kind >= ((Ttypekind292244) 36) && (*destt0).kind <= ((Ttypekind292244) 39) || (*destt0).kind == ((Ttypekind292244) 18) || (*destt0).kind == ((Ttypekind292244) 17) || (*destt0).kind == ((Ttypekind292244) 16) || (*destt0).kind == ((Ttypekind292244) 4));
if (LOC3) goto LA4;
LOC3 = ((*srct0).kind >= ((Ttypekind292244) 36) && (*srct0).kind <= ((Ttypekind292244) 39) || (*srct0).kind == ((Ttypekind292244) 18) || (*srct0).kind == ((Ttypekind292244) 17) || (*srct0).kind == ((Ttypekind292244) 16) || (*srct0).kind == ((Ttypekind292244) 4));
LA4: ;
if (!LOC3) goto LA5;
(*p0).labels += ((NI) 1);
lbl0 = rope_178401_2381377266(((NI64) ((*p0).labels)));
memset((void*)(&tmp0), 0, sizeof(tmp0));
memset((void*)LOC7, 0, sizeof(LOC7));
LOC7[0] = lbl0;
tmp0.r = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_536), LOC7, 1);
memset((void*)LOC8, 0, sizeof(LOC8));
LOC8[0] = gettypedesc_535671_839829468((*p0).module, srct0);
LOC8[1] = gettypedesc_535671_839829468((*p0).module, destt0);
LOC8[2] = lbl0;
linefmt_532714_839829468(p0, ((Tcprocsection529011) 0), ((NimStringDesc*) &T839829468_537), LOC8, 3);
tmp0.k = ((Tlockind292808) 6);
tmp0.t = srct0;
tmp0.s = ((Tstorageloc292812) 2);
tmp0.flags = 0;
expr_539248_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&tmp0));
memset((void*)LOC9, 0, sizeof(LOC9));
LOC9[0] = lbl0;
LOC10 = (Ropeobj178006*)0;
LOC10 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_538), LOC9, 1);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC10, tmp0.s);
}
goto LA1;
LA5: ;
{
gensomecast_556480_839829468(p0, e0, d0);
}
LA1: ;
}
N_NIMCALL(void, genconv_556632_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
Ttype292840* desttype0;
desttype0 = skiptypes_296099_850551059((*e0).typ, 8390656);
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = comparetypes_326214_3876443242(desttype0, (*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, ((Tdistinctcompare324427) 1), 0);
if (!LOC3) goto LA4;
expr_539248_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], d0);
}
goto LA1;
LA4: ;
{
gensomecast_556480_839829468(p0, e0, d0);
}
LA1: ;
}
static N_INLINE(NIM_BOOL, iscppref_552807_839829468)(Tcproc529021* p0, Ttype292840* typ0) {
NIM_BOOL result0;
NIM_BOOL LOC1;
NIM_BOOL LOC2;
NIM_BOOL LOC3;
Ttype292840* LOC6;
Ttype292840* LOC8;
result0 = (NIM_BOOL)0;
LOC1 = (NIM_BOOL)0;
LOC2 = (NIM_BOOL)0;
LOC3 = (NIM_BOOL)0;
LOC3 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC3) goto LA4;
LOC3 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA4: ;
LOC2 = LOC3;
if (!(LOC2)) goto LA5;
LOC6 = (Ttype292840*)0;
LOC6 = skiptypes_296099_850551059(typ0, IL64(211106232576256));
LOC2 = ((*LOC6).kind == ((Ttypekind292244) 23));
LA5: ;
LOC1 = LOC2;
if (!(LOC1)) goto LA7;
LOC8 = (Ttype292840*)0;
LOC8 = skiptypes_296099_850551059(typ0, IL64(211106232576256));
LOC1 = !((((*LOC8).flags &(1U<<((NU)(((Ttypeflag292431) 18))&31U)))!=0));
LA7: ;
result0 = LOC1;
return result0;
}
N_NIMCALL(void, genaddr_553051_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
{
Ttype292840* LOC3;
Tloc292816 a0;
Ropeobj178006* LOC6;
LOC3 = (Ttype292840*)0;
LOC3 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256));
if (!((*LOC3).kind == ((Ttypekind292244) 22) || (*LOC3).kind == ((Ttypekind292244) 21))) goto LA4;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], (&a0));
LOC6 = (Ropeobj178006*)0;
LOC6 = HEX26_178452_2381377266(((NimStringDesc*) &T839829468_52), a0.r);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC6, a0.s);
}
goto LA1;
LA4: ;
{
NIM_BOOL LOC8;
Tctypekind529007 LOC9;
LOC8 = (NIM_BOOL)0;
LOC9 = (Tctypekind529007)0;
LOC9 = maptype_533393_839829468((*(*e0).kindU.S6.sons->data[((NI) 0)]).typ);
LOC8 = (LOC9 == ((Tctypekind529007) 17));
if (LOC8) goto LA10;
LOC8 = iscppref_552807_839829468(p0, (*(*e0).kindU.S6.sons->data[((NI) 0)]).typ);
LA10: ;
if (!LOC8) goto LA11;
expr_539248_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], d0);
}
goto LA1;
LA11: ;
{
Tloc292816 a0;
Ropeobj178006* LOC14;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], (&a0));
LOC14 = (Ropeobj178006*)0;
LOC14 = addrloc_538204_839829468(a0);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC14, a0.s);
}
LA1: ;
}
N_NIMCALL(void, genarrayelem_554093_839829468)(Tcproc529021* p0, Tnode292802* x0, Tnode292802* y0, Tloc292816* d0) {
Tloc292816 a0;
Tloc292816 b0;
Ttype292840* ty0;
Ttype292840* LOC1;
Ropeobj178006* first0;
NI64 LOC2;
Ttype292840* LOC47;
Ttype292840* LOC48;
TY535238 LOC49;
Ropeobj178006* LOC50;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
initlocexpr_539283_839829468(p0, x0, (&a0));
initlocexpr_539283_839829468(p0, y0, (&b0));
LOC1 = (Ttype292840*)0;
LOC1 = skiptypes_296099_850551059(a0.t, IL64(211106242013440));
ty0 = skiptypes_296099_850551059(LOC1, IL64(211106247256320));
LOC2 = (NI64)0;
LOC2 = firstord_320001_3876443242(ty0);
first0 = intliteral_539270_839829468(LOC2);
{
NIM_BOOL LOC5;
LOC5 = (NIM_BOOL)0;
LOC5 = (((*p0).options &(1U<<((NU)(((Toption169009) 4))&31U)))!=0);
if (!(LOC5)) goto LA6;
LOC5 = !((((*ty0).flags &(1U<<((NU)(((Ttypeflag292431) 0))&31U)))!=0));
LA6: ;
if (!LOC5) goto LA7;
{
NIM_BOOL LOC11;
LOC11 = (NIM_BOOL)0;
LOC11 = isconstexpr_318510_2616423590(y0);
if (!!(LOC11)) goto LA12;
{
NI64 LOC16;
LOC16 = (NI64)0;
LOC16 = firstord_320001_3876443242(ty0);
if (!(LOC16 == IL64(0))) goto LA17;
{
NIM_BOOL LOC21;
NI64 LOC22;
NI64 LOC23;
NI64 LOC25;
NI64 LOC26;
TY532811 LOC29;
NI64 LOC30;
LOC21 = (NIM_BOOL)0;
LOC22 = (NI64)0;
LOC22 = firstord_320001_3876443242(b0.t);
LOC23 = (NI64)0;
LOC23 = firstord_320001_3876443242(ty0);
LOC21 = (LOC22 < LOC23);
if (LOC21) goto LA24;
LOC25 = (NI64)0;
LOC25 = lastord_320004_3876443242(ty0);
LOC26 = (NI64)0;
LOC26 = lastord_320004_3876443242(b0.t);
LOC21 = (LOC25 < LOC26);
LA24: ;
if (!LOC21) goto LA27;
memset((void*)LOC29, 0, sizeof(LOC29));
LOC29[0] = rdcharloc_538227_839829468(b0);
LOC30 = (NI64)0;
LOC30 = lastord_320004_3876443242(ty0);
LOC29[1] = intliteral_539270_839829468(LOC30);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_539), LOC29, 2);
}
LA27: ;
}
goto LA14;
LA17: ;
{
TY535238 LOC32;
NI64 LOC33;
memset((void*)LOC32, 0, sizeof(LOC32));
LOC32[0] = rdcharloc_538227_839829468(b0);
LOC32[1] = first0;
LOC33 = (NI64)0;
LOC33 = lastord_320004_3876443242(ty0);
LOC32[2] = intliteral_539270_839829468(LOC33);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_540), LOC32, 3);
}
LA14: ;
}
goto LA9;
LA12: ;
{
NI64 idx0;
idx0 = getordvalue_320129_3876443242(y0);
{
NIM_BOOL LOC37;
NI64 LOC38;
NI64 LOC40;
LOC37 = (NIM_BOOL)0;
LOC38 = (NI64)0;
LOC38 = firstord_320001_3876443242(ty0);
LOC37 = (idx0 < LOC38);
if (LOC37) goto LA39;
LOC40 = (NI64)0;
LOC40 = lastord_320004_3876443242(ty0);
LOC37 = (LOC40 < idx0);
LA39: ;
if (!LOC37) goto LA41;
localerror_196080_155036129((*x0).info, ((Tmsgkind191002) 86), ((NimStringDesc*) &T839829468_490));
}
LA41: ;
}
LA9: ;
}
LA7: ;
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA45;
(*d0).s = a0.s;
}
LA45: ;
LOC47 = (Ttype292840*)0;
LOC47 = skiptypes_296099_850551059(ty0, IL64(211106240964864));
LOC48 = (Ttype292840*)0;
LOC48 = elemtype_320394_3876443242(LOC47);
memset((void*)LOC49, 0, sizeof(LOC49));
LOC49[0] = rdloc_538188_839829468(a0);
LOC49[1] = rdcharloc_538227_839829468(b0);
LOC49[2] = first0;
LOC50 = (Ropeobj178006*)0;
LOC50 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_541), LOC49, 3);
putintodest_550468_839829468(p0, d0, LOC48, LOC50, a0.s);
}
N_NIMCALL(void, genopenarrayelem_554169_839829468)(Tcproc529021* p0, Tnode292802* x0, Tnode292802* y0, Tloc292816* d0) {
Tloc292816 a0;
Tloc292816 b0;
Ttype292840* LOC10;
Ttype292840* LOC11;
TY532811 LOC12;
Ropeobj178006* LOC13;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
initlocexpr_539283_839829468(p0, x0, (&a0));
initlocexpr_539283_839829468(p0, y0, (&b0));
{
TY532811 LOC5;
if (!(((*p0).options &(1U<<((NU)(((Toption169009) 4))&31U)))!=0)) goto LA3;
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = rdloc_538188_839829468(b0);
LOC5[1] = rdloc_538188_839829468(a0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_542), LOC5, 2);
}
LA3: ;
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA8;
(*d0).s = a0.s;
}
LA8: ;
LOC10 = (Ttype292840*)0;
LOC10 = skiptypes_296099_850551059(a0.t, IL64(211106240964864));
LOC11 = (Ttype292840*)0;
LOC11 = elemtype_320394_3876443242(LOC10);
memset((void*)LOC12, 0, sizeof(LOC12));
LOC12[0] = rdloc_538188_839829468(a0);
LOC12[1] = rdcharloc_538227_839829468(b0);
LOC13 = (Ropeobj178006*)0;
LOC13 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_138), LOC12, 2);
putintodest_550468_839829468(p0, d0, LOC11, LOC13, a0.s);
}
N_NIMCALL(void, genseqelem_554205_839829468)(Tcproc529021* p0, Tnode292802* x0, Tnode292802* y0, Tloc292816* d0) {
Tloc292816 a0;
Tloc292816 b0;
Ttype292840* ty0;
Ttype292840* LOC27;
Ttype292840* LOC28;
TY532811 LOC29;
Ropeobj178006* LOC30;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
initlocexpr_539283_839829468(p0, x0, (&a0));
initlocexpr_539283_839829468(p0, y0, (&b0));
ty0 = skiptypes_296099_850551059(a0.t, IL64(211106242013440));
{
Ttype292840* LOC5;
if (!((*ty0).kind == ((Ttypekind292244) 22) || (*ty0).kind == ((Ttypekind292244) 21))) goto LA3;
LOC5 = (Ttype292840*)0;
LOC5 = lastson_295377_850551059(ty0);
ty0 = skiptypes_296099_850551059(LOC5, IL64(211106242013440));
}
LA3: ;
{
if (!(((*p0).options &(1U<<((NU)(((Toption169009) 4))&31U)))!=0)) goto LA8;
{
TY535238 LOC14;
if (!((*ty0).kind == ((Ttypekind292244) 28))) goto LA12;
memset((void*)LOC14, 0, sizeof(LOC14));
LOC14[0] = rdloc_538188_839829468(b0);
LOC14[1] = rdloc_538188_839829468(a0);
LOC14[2] = lenfield_539305_839829468(p0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_543), LOC14, 3);
}
goto LA10;
LA12: ;
{
TY535238 LOC16;
memset((void*)LOC16, 0, sizeof(LOC16));
LOC16[0] = rdloc_538188_839829468(b0);
LOC16[1] = rdloc_538188_839829468(a0);
LOC16[2] = lenfield_539305_839829468(p0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_544), LOC16, 3);
}
LA10: ;
}
LA8: ;
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA19;
(*d0).s = ((Tstorageloc292812) 3);
}
LA19: ;
{
Ttype292840* LOC23;
TY178507 LOC26;
LOC23 = (Ttype292840*)0;
LOC23 = skiptypes_296099_850551059(a0.t, IL64(211106240964864));
if (!((*LOC23).kind == ((Ttypekind292244) 22) || (*LOC23).kind == ((Ttypekind292244) 21))) goto LA24;
memset((void*)LOC26, 0, sizeof(LOC26));
LOC26[0] = a0.r;
a0.r = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_124), LOC26, 1);
}
LA24: ;
LOC27 = (Ttype292840*)0;
LOC27 = skiptypes_296099_850551059(a0.t, IL64(211106240964864));
LOC28 = (Ttype292840*)0;
LOC28 = elemtype_320394_3876443242(LOC27);
memset((void*)LOC29, 0, sizeof(LOC29));
LOC29[0] = rdloc_538188_839829468(a0);
LOC29[1] = rdcharloc_538227_839829468(b0);
LOC30 = (Ropeobj178006*)0;
LOC30 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_187), LOC29, 2);
putintodest_550468_839829468(p0, d0, LOC28, LOC30, a0.s);
}
N_NIMCALL(void, gencstringelem_554144_839829468)(Tcproc529021* p0, Tnode292802* x0, Tnode292802* y0, Tloc292816* d0) {
Tloc292816 a0;
Tloc292816 b0;
Ttype292840* ty0;
Ttype292840* LOC5;
Ttype292840* LOC6;
TY532811 LOC7;
Ropeobj178006* LOC8;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
initlocexpr_539283_839829468(p0, x0, (&a0));
initlocexpr_539283_839829468(p0, y0, (&b0));
ty0 = skiptypes_296099_850551059(a0.t, IL64(211106242013440));
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA3;
(*d0).s = a0.s;
}
LA3: ;
LOC5 = (Ttype292840*)0;
LOC5 = skiptypes_296099_850551059(ty0, IL64(211106240964864));
LOC6 = (Ttype292840*)0;
LOC6 = elemtype_320394_3876443242(LOC5);
memset((void*)LOC7, 0, sizeof(LOC7));
LOC7[0] = rdloc_538188_839829468(a0);
LOC7[1] = rdcharloc_538227_839829468(b0);
LOC8 = (Ropeobj178006*)0;
LOC8 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_138), LOC7, 2);
putintodest_550468_839829468(p0, d0, LOC6, LOC8, a0.s);
}
N_NIMCALL(void, gentupleelem_553124_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
Tloc292816 a0;
NI i0;
Ropeobj178006* LOC5;
Ttype292840* ty0;
Ropeobj178006* r0;
TY178507 LOC8;
memset((void*)(&a0), 0, sizeof(a0));
i0 = (NI)0;
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], (&a0));
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA3;
(*d0).s = a0.s;
}
LA3: ;
LOC5 = (Ropeobj178006*)0;
LOC5 = gettypedesc_535671_839829468((*p0).module, a0.t);
ty0 = getuniquetype_528640_2036603609(a0.t);
r0 = rdloc_538188_839829468(a0);
switch ((*(*e0).kindU.S6.sons->data[((NI) 1)]).kind) {
case ((Tnodekind292020) 6) ... ((Tnodekind292020) 15):
{
i0 = ((NI) ((*(*e0).kindU.S6.sons->data[((NI) 1)]).kindU.S1.intval));
}
break;
default:
{
internalerror_196100_155036129((*e0).info, ((NimStringDesc*) &T839829468_545));
}
break;
}
memset((void*)LOC8, 0, sizeof(LOC8));
LOC8[0] = rope_178401_2381377266(((NI64) (i0)));
addf_179205_2381377266(&r0, ((NimStringDesc*) &T839829468_546), LOC8, 1);
putintodest_550468_839829468(p0, d0, (*ty0).sons->data[i0], r0, a0.s);
}
N_NIMCALL(void, genbracketexpr_554277_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0) {
Ttype292840* ty0;
ty0 = skiptypes_296099_850551059((*(*n0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106242013440));
{
Ttype292840* LOC5;
if (!((*ty0).kind == ((Ttypekind292244) 22) || (*ty0).kind == ((Ttypekind292244) 21))) goto LA3;
LOC5 = (Ttype292840*)0;
LOC5 = lastson_295377_850551059(ty0);
ty0 = skiptypes_296099_850551059(LOC5, IL64(211106242013440));
}
LA3: ;
switch ((*ty0).kind) {
case ((Ttypekind292244) 16):
case ((Ttypekind292244) 4):
{
genarrayelem_554093_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (*n0).kindU.S6.sons->data[((NI) 1)], d0);
}
break;
case ((Ttypekind292244) 27):
case ((Ttypekind292244) 48):
{
genopenarrayelem_554169_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (*n0).kindU.S6.sons->data[((NI) 1)], d0);
}
break;
case ((Ttypekind292244) 24):
case ((Ttypekind292244) 28):
{
genseqelem_554205_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (*n0).kindU.S6.sons->data[((NI) 1)], d0);
}
break;
case ((Ttypekind292244) 29):
{
gencstringelem_554144_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (*n0).kindU.S6.sons->data[((NI) 1)], d0);
}
break;
case ((Ttypekind292244) 18):
{
gentupleelem_553124_839829468(p0, n0, d0);
}
break;
default:
{
NimStringDesc* LOC12;
LOC12 = (NimStringDesc*)0;
LOC12 = rawNewString(reprEnum((NI)(*ty0).kind, (&NTI292244))->Sup.len + 21);
appendString(LOC12, ((NimStringDesc*) &T839829468_547));
appendString(LOC12, reprEnum((NI)(*ty0).kind, (&NTI292244)));
appendChar(LOC12, 41);
internalerror_196100_155036129((*n0).info, LOC12);
}
break;
}
}
N_NIMCALL(void, genderef_543921_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, NIM_BOOL enforcederef0) {
Tctypekind529007 mt0;
{ mt0 = maptype_533393_839829468((*(*e0).kindU.S6.sons->data[((NI) 0)]).typ);
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = ((393216 &(1U<<((NU)(mt0)&31U)))!=0);
if (!(LOC3)) goto LA4;
LOC3 = !(enforcederef0);
LA4: ;
if (!LOC3) goto LA5;
expr_539248_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], d0);
{
Ttype292840* LOC9;
LOC9 = (Ttype292840*)0;
LOC9 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256));
if (!((*LOC9).kind == ((Ttypekind292244) 22))) goto LA10;
(*d0).s = ((Tstorageloc292812) 3);
}
LA10: ;
}
goto LA1;
LA5: ;
{
Tloc292816 a0;
Ttype292840* typ0;
memset((void*)(&a0), 0, sizeof(a0));
typ0 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256));
{
NIM_BOOL LOC15;
NIM_BOOL LOC16;
NIM_BOOL LOC17;
NIM_BOOL LOC20;
Tnode292802* LOC25;
Tnode292802* LOC26;
LOC15 = (NIM_BOOL)0;
LOC16 = (NIM_BOOL)0;
LOC17 = (NIM_BOOL)0;
LOC17 = ((*typ0).kind == ((Ttypekind292244) 23));
if (!(LOC17)) goto LA18;
LOC17 = !((((*typ0).flags &(1U<<((NU)(((Ttypeflag292431) 18))&31U)))!=0));
LA18: ;
LOC16 = LOC17;
if (!(LOC16)) goto LA19;
LOC20 = (NIM_BOOL)0;
LOC20 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC20) goto LA21;
LOC20 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA21: ;
LOC16 = LOC20;
LA19: ;
LOC15 = LOC16;
if (!(LOC15)) goto LA22;
LOC15 = ((*(*e0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 64));
LA22: ;
if (!LOC15) goto LA23;
LOC25 = (Tnode292802*)0;
LOC25 = HEX5BHEX5D_293238_850551059(e0, ((NI) 0));
LOC26 = (Tnode292802*)0;
LOC26 = HEX5BHEX5D_293238_850551059(LOC25, ((NI) 0));
initlocexprsingleuse_539289_839829468(p0, LOC26, d0);
goto BeforeRet;
}
goto LA13;
LA23: ;
{
initlocexprsingleuse_539289_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], (&a0));
}
LA13: ;
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA30;
switch ((*typ0).kind) {
case ((Ttypekind292244) 22):
{
(*d0).s = ((Tstorageloc292812) 3);
}
break;
case ((Ttypekind292244) 23):
{
(*d0).s = ((Tstorageloc292812) 0);
{
NIM_BOOL LOC36;
NIM_BOOL LOC37;
NIM_BOOL LOC39;
Ropeobj178006* LOC44;
LOC36 = (NIM_BOOL)0;
LOC37 = (NIM_BOOL)0;
LOC37 = !((((*typ0).flags &(1U<<((NU)(((Ttypeflag292431) 18))&31U)))!=0));
if (!(LOC37)) goto LA38;
LOC39 = (NIM_BOOL)0;
LOC39 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC39) goto LA40;
LOC39 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA40: ;
LOC37 = LOC39;
LA38: ;
LOC36 = LOC37;
if (!(LOC36)) goto LA41;
LOC36 = ((*e0).kind == ((Tnodekind292020) 65));
LA41: ;
if (!LOC36) goto LA42;
LOC44 = (Ropeobj178006*)0;
LOC44 = rdloc_538188_839829468(a0);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC44, a0.s);
goto BeforeRet;
}
LA42: ;
}
break;
case ((Ttypekind292244) 21):
{
(*d0).s = ((Tstorageloc292812) 0);
}
break;
default:
{
NimStringDesc* LOC47;
LOC47 = (NimStringDesc*)0;
LOC47 = rawNewString(reprEnum((NI)(*typ0).kind, (&NTI292244))->Sup.len + 9);
appendString(LOC47, ((NimStringDesc*) &T839829468_548));
appendString(LOC47, reprEnum((NI)(*typ0).kind, (&NTI292244)));
internalerror_196100_155036129((*e0).info, LOC47);
}
break;
}
}
goto LA28;
LA30: ;
{
NIM_BOOL LOC49;
LOC49 = (NIM_BOOL)0;
LOC49 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC49) goto LA50;
LOC49 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA50: ;
if (!LOC49) goto LA51;
{
NIM_BOOL LOC55;
NIM_BOOL LOC56;
Ropeobj178006* LOC61;
LOC55 = (NIM_BOOL)0;
LOC56 = (NIM_BOOL)0;
LOC56 = ((*typ0).kind == ((Ttypekind292244) 23));
if (!(LOC56)) goto LA57;
LOC56 = !((((*typ0).flags &(1U<<((NU)(((Ttypeflag292431) 18))&31U)))!=0));
LA57: ;
LOC55 = LOC56;
if (!(LOC55)) goto LA58;
LOC55 = ((*e0).kind == ((Tnodekind292020) 65));
LA58: ;
if (!LOC55) goto LA59;
LOC61 = (Ropeobj178006*)0;
LOC61 = rdloc_538188_839829468(a0);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC61, a0.s);
goto BeforeRet;
}
LA59: ;
}
goto LA28;
LA51: ;
LA28: ;
{
NIM_BOOL LOC64;
Ropeobj178006* LOC68;
LOC64 = (NIM_BOOL)0;
LOC64 = enforcederef0;
if (!(LOC64)) goto LA65;
LOC64 = (mt0 == ((Tctypekind529007) 18));
LA65: ;
if (!LOC64) goto LA66;
LOC68 = (Ropeobj178006*)0;
LOC68 = rdloc_538188_839829468(a0);
putintodest_550468_839829468(p0, d0, (*a0.t).sons->data[((NI) 0)], LOC68, a0.s);
}
goto LA62;
LA66: ;
{
TY178507 LOC70;
Ropeobj178006* LOC71;
memset((void*)LOC70, 0, sizeof(LOC70));
LOC70[0] = rdloc_538188_839829468(a0);
LOC71 = (Ropeobj178006*)0;
LOC71 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_124), LOC70, 1);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC71, a0.s);
}
LA62: ;
}
LA1: ;
}BeforeRet: ;
}
N_NIMCALL(Ttype292840*, genrecordfieldaux_553096_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tloc292816* a0) {
Ttype292840* result0;
Ropeobj178006* LOC9;
result0 = (Ttype292840*)0;
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], a0);
{
if (!!(((*(*e0).kindU.S6.sons->data[((NI) 1)]).kind == ((Tnodekind292020) 3)))) goto LA3;
internalerror_196100_155036129((*e0).info, ((NimStringDesc*) &T839829468_549));
}
LA3: ;
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA7;
(*d0).s = (*a0).s;
}
LA7: ;
LOC9 = (Ropeobj178006*)0;
LOC9 = gettypedesc_535671_839829468((*p0).module, (*a0).t);
result0 = getuniquetype_528640_2036603609((*a0).t);
return result0;
}
N_NIMCALL(void, genrecordfield_553448_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
Tloc292816 a0;
Ttype292840* ty0;
Ropeobj178006* r0;
Tsym292834* f0;
memset((void*)(&a0), 0, sizeof(a0));
ty0 = genrecordfieldaux_553096_839829468(p0, e0, d0, (&a0));
r0 = rdloc_538188_839829468(a0);
f0 = (*(*e0).kindU.S6.sons->data[((NI) 1)]).kindU.S4.sym;
{
TY178507 LOC5;
if (!((*ty0).kind == ((Ttypekind292244) 18))) goto LA3;
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = rope_178401_2381377266(((NI64) ((*f0).position)));
addf_179205_2381377266(&r0, ((NimStringDesc*) &T839829468_546), LOC5, 1);
putintodest_550468_839829468(p0, d0, (*f0).typ, r0, a0.s);
}
goto LA1;
LA3: ;
{
Tsym292834* field0;
TY178507 LOC11;
field0 = lookupfieldagain_553153_839829468(p0, ty0, f0, &r0);
{
if (!((*field0).loc.r == NIM_NIL)) goto LA9;
internalerror_196100_155036129((*e0).info, ((NimStringDesc*) &T839829468_550));
}
LA9: ;
memset((void*)LOC11, 0, sizeof(LOC11));
LOC11[0] = (*field0).loc.r;
addf_179205_2381377266(&r0, ((NimStringDesc*) &T839829468_551), LOC11, 1);
putintodest_550468_839829468(p0, d0, (*field0).typ, r0, a0.s);
}
LA1: ;
}
N_NIMCALL(void, gencheckedrecordfield_554046_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
{
Tloc292816 a0;
Ttype292840* ty0;
Ropeobj178006* r0;
Tsym292834* f0;
Tsym292834* field0;
TY178507 LOC9;
Ropeobj178006* LOC10;
if (!(((*p0).options &(1U<<((NU)(((Toption169009) 2))&31U)))!=0)) goto LA3;
memset((void*)(&a0), 0, sizeof(a0));
ty0 = genrecordfieldaux_553096_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], d0, (&a0));
r0 = rdloc_538188_839829468(a0);
f0 = (*(*(*e0).kindU.S6.sons->data[((NI) 0)]).kindU.S6.sons->data[((NI) 1)]).kindU.S4.sym;
field0 = lookupfieldagain_553153_839829468(p0, ty0, f0, &r0);
{
if (!((*field0).loc.r == NIM_NIL)) goto LA7;
internalerror_196100_155036129((*e0).info, ((NimStringDesc*) &T839829468_532));
}
LA7: ;
genfieldcheck_553504_839829468(p0, e0, r0, field0, ty0);
memset((void*)LOC9, 0, sizeof(LOC9));
LOC9[0] = (*field0).loc.r;
LOC10 = (Ropeobj178006*)0;
LOC10 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_551), LOC9, 1);
add_178482_2381377266(&r0, LOC10);
putintodest_550468_839829468(p0, d0, (*field0).typ, r0, a0.s);
}
goto LA1;
LA3: ;
{
genrecordfield_553448_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], d0);
}
LA1: ;
}
N_NIMCALL(NI, startblock_543978_839829468)(Tcproc529021* p0, NimStringDesc* start0, Ropeobj178006** args0, NI args0Len0) {
NI result0;
result0 = (NI)0;
linecg_532707_839829468(p0, ((Tcprocsection529011) 2), start0, args0, args0Len0);
(*p0).labels += ((NI) 1);
result0 = ((*p0).blocks ? (*p0).blocks->Sup.len : 0);
(*p0).blocks = (TY529095*) setLengthSeq(&((*p0).blocks)->Sup, sizeof(Tblock529019), ((NI) ((NI)(result0 + ((NI) 1)))));
(*p0).blocks->data[result0].id = ((NI) ((*p0).labels));
(*p0).blocks->data[result0].nestedtrystmts = ((NI16) (((*p0).nestedtrystmts ? (*p0).nestedtrystmts->Sup.len : 0)));
(*p0).blocks->data[result0].nestedexceptstmts = ((NI16) ((*p0).inexceptblock));
return result0;
}
N_NIMCALL(Ropeobj178006*, blockbody_544025_839829468)(Tblock529019* b0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
result0 = (*b0).sections[(((Tcprocsection529011) 0))- 0];
{
TY178507 LOC5;
if (!(((NI16) 0) < (*b0).framelen)) goto LA3;
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = rope_178401_2381377266(((NI64) ((*b0).framelen)));
addf_179205_2381377266(&result0, ((NimStringDesc*) &T839829468_554), LOC5, 1);
}
LA3: ;
add_178482_2381377266(&result0, (*b0).sections[(((Tcprocsection529011) 1))- 0]);
add_178482_2381377266(&result0, (*b0).sections[(((Tcprocsection529011) 2))- 0]);
return result0;
}
N_NIMCALL(void, endblock_544035_839829468)(Tcproc529021* p0, Ropeobj178006* blockend0) {
NI topblock0;
Ropeobj178006* LOC1;
topblock0 = (NI)(((*p0).blocks ? (*p0).blocks->Sup.len : 0) - ((NI) 1));
LOC1 = (Ropeobj178006*)0;
LOC1 = blockbody_544025_839829468((&(*p0).blocks->data[topblock0]));
add_178482_2381377266(&(*p0).blocks->data[(NI)(topblock0 - ((NI) 1))].sections[(((Tcprocsection529011) 2))- 0], LOC1);
(*p0).blocks = (TY529095*) setLengthSeq(&((*p0).blocks)->Sup, sizeof(Tblock529019), ((NI) (topblock0)));
line_532690_839829468(p0, ((Tcprocsection529011) 2), blockend0);
}
N_NIMCALL(void, endblock_544060_839829468)(Tcproc529021* p0) {
NI topblock0;
Ropeobj178006* blockend0;
NI16 framelen0;
topblock0 = (NI)(((*p0).blocks ? (*p0).blocks->Sup.len : 0) - ((NI) 1));
{
TY178507 LOC5;
if (!!(((*p0).blocks->data[topblock0].label == NIM_NIL))) goto LA3;
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = (*p0).blocks->data[topblock0].label;
blockend0 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_552), LOC5, 1);
}
goto LA1;
LA3: ;
{
TY533289 LOC7;
memset((void*)LOC7, 0, sizeof(LOC7));
blockend0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_160), LOC7, 0);
}
LA1: ;
framelen0 = (*p0).blocks->data[topblock0].framelen;
{
TY178507 LOC12;
if (!(((NI16) 0) < framelen0)) goto LA10;
memset((void*)LOC12, 0, sizeof(LOC12));
LOC12[0] = rope_178401_2381377266(((NI64) (framelen0)));
addf_179205_2381377266(&blockend0, ((NimStringDesc*) &T839829468_553), LOC12, 1);
}
LA10: ;
endblock_544035_839829468(p0, blockend0);
}
N_NIMCALL(void, genblock_546083_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0) {
NI oldbreakidx_546099_839829468;
TY533289 LOC8;
{
NIM_BOOL LOC3;
NIM_BOOL LOC4;
LOC3 = (NIM_BOOL)0;
LOC4 = (NIM_BOOL)0;
LOC4 = isemptytype_297440_850551059((*n0).typ);
LOC3 = !(LOC4);
if (!(LOC3)) goto LA5;
LOC3 = ((*d0).k == ((Tlockind292808) 0));
LA5: ;
if (!LOC3) goto LA6;
gettemp_537032_839829468(p0, (*n0).typ, d0, NIM_FALSE);
}
LA6: ;
oldbreakidx_546099_839829468 = (*p0).breakidx;
memset((void*)LOC8, 0, sizeof(LOC8));
(*p0).breakidx = startblock_543978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC8, 0);
{
Tsym292834* sym0;
if (!!(((*(*n0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 1)))) goto LA11;
sym0 = (*(*n0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym;
(*sym0).loc.k = ((Tlockind292808) 10);
(*sym0).position = (NI)((*p0).breakidx + ((NI) 1));
}
LA11: ;
expr_539248_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 1)], d0);
endblock_544060_839829468(p0);
(*p0).breakidx = oldbreakidx_546099_839829468;
}
N_NIMCALL(void, genstmtlistexpr_558402_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0) {
NI length0;
length0 = sonslen_295351_850551059(n0);
{
NI i_558420_839829468;
NI HEX3Atmp_558424_839829468;
NI res_558427_839829468;
i_558420_839829468 = (NI)0;
HEX3Atmp_558424_839829468 = (NI)0;
HEX3Atmp_558424_839829468 = (NI)(length0 - ((NI) 2));
res_558427_839829468 = ((NI) 0);
{
while (1) {
if (!(res_558427_839829468 <= HEX3Atmp_558424_839829468)) goto LA3;
i_558420_839829468 = res_558427_839829468;
genstmts_539244_839829468(p0, (*n0).kindU.S6.sons->data[i_558420_839829468]);
res_558427_839829468 += ((NI) 1);
} LA3: ;
}
}
{
if (!(((NI) 0) < length0)) goto LA6;
expr_539248_839829468(p0, (*n0).kindU.S6.sons->data[(NI)(length0 - ((NI) 1))], d0);
}
LA6: ;
}
N_NIMCALL(void, genif_544982_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0) {
Tloc292816 a0;
Ropeobj178006* lelse0;
Ropeobj178006* lend0;
memset((void*)(&a0), 0, sizeof(a0));
lelse0 = (Ropeobj178006*)0;
{
NIM_BOOL LOC3;
NIM_BOOL LOC4;
LOC3 = (NIM_BOOL)0;
LOC4 = (NIM_BOOL)0;
LOC4 = isemptytype_297440_850551059((*n0).typ);
LOC3 = !(LOC4);
if (!(LOC3)) goto LA5;
LOC3 = ((*d0).k == ((Tlockind292808) 0));
LA5: ;
if (!LOC3) goto LA6;
gettemp_537032_839829468(p0, (*n0).typ, d0, NIM_FALSE);
}
LA6: ;
genlinedir_532823_839829468(p0, n0);
lend0 = getlabel_539217_839829468(p0);
{
NI i_545011_839829468;
NI HEX3Atmp_545435_839829468;
NI LOC9;
NI res_545438_839829468;
i_545011_839829468 = (NI)0;
HEX3Atmp_545435_839829468 = (NI)0;
LOC9 = (NI)0;
LOC9 = sonslen_295351_850551059(n0);
HEX3Atmp_545435_839829468 = (NI)(LOC9 - ((NI) 1));
res_545438_839829468 = ((NI) 0);
{
while (1) {
Tnode292802* it0;
if (!(res_545438_839829468 <= HEX3Atmp_545435_839829468)) goto LA11;
i_545011_839829468 = res_545438_839829468;
{
NIM_BOOL LOC14;
LOC14 = (NIM_BOOL)0;
LOC14 = ((*d0).k == ((Tlockind292808) 1));
if (!(LOC14)) goto LA15;
LOC14 = isemptytype_297440_850551059((*n0).typ);
LA15: ;
if (!LOC14) goto LA16;
(*d0).k = ((Tlockind292808) 0);
}
LA16: ;
it0 = (*n0).kindU.S6.sons->data[i_545011_839829468];
{
NI LOC20;
TY533289 LOC23;
NI LOC24;
TY532811 LOC25;
LOC20 = (NI)0;
LOC20 = len_293081_850551059(it0);
if (!(LOC20 == ((NI) 2))) goto LA21;
memset((void*)LOC23, 0, sizeof(LOC23));
LOC24 = (NI)0;
LOC24 = startblock_543978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC23, 0);
initlocexprsingleuse_539289_839829468(p0, (*it0).kindU.S6.sons->data[((NI) 0)], (&a0));
lelse0 = getlabel_539217_839829468(p0);
(*p0).labels += ((NI) 1);
memset((void*)LOC25, 0, sizeof(LOC25));
LOC25[0] = rdloc_538188_839829468(a0);
LOC25[1] = lelse0;
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_555), LOC25, 2);
{
NIM_BOOL LOC28;
Ropeobj178006** LOC32;
Ropeobj178006** LOC33;
LOC28 = (NIM_BOOL)0;
LOC28 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC28) goto LA29;
LOC28 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA29: ;
if (!LOC28) goto LA30;
LOC32 = (Ropeobj178006**)0;
LOC32 = s_529179_3723162438(p0, ((Tcprocsection529011) 2));
add_178487_2381377266(LOC32, ((NimStringDesc*) &T839829468_223));
expr_539248_839829468(p0, (*it0).kindU.S6.sons->data[((NI) 1)], d0);
LOC33 = (Ropeobj178006**)0;
LOC33 = s_529179_3723162438(p0, ((Tcprocsection529011) 2));
add_178487_2381377266(LOC33, ((NimStringDesc*) &T839829468_280));
}
goto LA26;
LA30: ;
{
expr_539248_839829468(p0, (*it0).kindU.S6.sons->data[((NI) 1)], d0);
}
LA26: ;
endblock_544060_839829468(p0);
{
NI LOC37;
TY178507 LOC40;
LOC37 = (NI)0;
LOC37 = sonslen_295351_850551059(n0);
if (!(((NI) 1) < LOC37)) goto LA38;
memset((void*)LOC40, 0, sizeof(LOC40));
LOC40[0] = lend0;
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_556), LOC40, 1);
}
LA38: ;
fixlabel_539230_839829468(p0, lelse0);
}
goto LA18;
LA21: ;
{
NI LOC42;
TY533289 LOC45;
NI LOC46;
LOC42 = (NI)0;
LOC42 = len_293081_850551059(it0);
if (!(LOC42 == ((NI) 1))) goto LA43;
memset((void*)LOC45, 0, sizeof(LOC45));
LOC46 = (NI)0;
LOC46 = startblock_543978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC45, 0);
expr_539248_839829468(p0, (*it0).kindU.S6.sons->data[((NI) 0)], d0);
endblock_544060_839829468(p0);
}
goto LA18;
LA43: ;
{
internalerror_196100_155036129((*n0).info, ((NimStringDesc*) &T839829468_557));
}
LA18: ;
res_545438_839829468 += ((NI) 1);
} LA11: ;
}
}
{
NI LOC50;
LOC50 = (NI)0;
LOC50 = sonslen_295351_850551059(n0);
if (!(((NI) 1) < LOC50)) goto LA51;
fixlabel_539230_839829468(p0, lend0);
}
LA51: ;
}
N_NIMCALL(void, downconv_558581_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0) {
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC3) goto LA4;
LOC3 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA4: ;
if (!LOC3) goto LA5;
expr_539248_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], d0);
}
goto LA1;
LA5: ;
{
Ttype292840* dest0;
Tnode292802* arg0;
Ttype292840* src0;
Tloc292816 a0;
Ropeobj178006* r0;
NIM_BOOL isref0;
Ttype292840* LOC10;
dest0 = skiptypes_296099_850551059((*n0).typ, IL64(211106247256320));
arg0 = (*n0).kindU.S6.sons->data[((NI) 0)];
{
while (1) {
if (!((*arg0).kind == ((Tnodekind292020) 66))) goto LA9;
arg0 = (*arg0).kindU.S6.sons->data[((NI) 0)];
} LA9: ;
}
src0 = skiptypes_296099_850551059((*arg0).typ, IL64(211106247256320));
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, arg0, (&a0));
r0 = rdloc_538188_839829468(a0);
LOC10 = (Ttype292840*)0;
LOC10 = skiptypes_296099_850551059((*arg0).typ, IL64(211106232576256));
isref0 = ((*LOC10).kind == ((Ttypekind292244) 22) || (*LOC10).kind == ((Ttypekind292244) 21) || (*LOC10).kind == ((Ttypekind292244) 23));
{
if (!isref0) goto LA13;
add_178487_2381377266(&r0, ((NimStringDesc*) &T839829468_558));
}
goto LA11;
LA13: ;
{
add_178487_2381377266(&r0, ((NimStringDesc*) &T839829468_153));
}
LA11: ;
{
NI i_558650_839829468;
NI HEX3Atmp_558677_839829468;
NI LOC17;
NI res_558680_839829468;
i_558650_839829468 = (NI)0;
HEX3Atmp_558677_839829468 = (NI)0;
LOC17 = (NI)0;
LOC17 = inheritancediff_326252_3876443242(dest0, src0);
HEX3Atmp_558677_839829468 = (LOC17 > 0? (LOC17) : -(LOC17));
res_558680_839829468 = ((NI) 2);
{
while (1) {
if (!(res_558680_839829468 <= HEX3Atmp_558677_839829468)) goto LA19;
i_558650_839829468 = res_558680_839829468;
add_178487_2381377266(&r0, ((NimStringDesc*) &T839829468_153));
res_558680_839829468 += ((NI) 1);
} LA19: ;
}
}
{
if (!isref0) goto LA22;
{
NIM_BOOL LOC26;
Ttype292840* LOC28;
TY532811 LOC31;
LOC26 = (NIM_BOOL)0;
LOC26 = ((*d0).k == ((Tlockind292808) 0));
if (!(LOC26)) goto LA27;
LOC28 = (Ttype292840*)0;
LOC28 = skiptypes_296099_850551059((*n0).typ, IL64(211106232576256));
LOC26 = ((*LOC28).kind == ((Ttypekind292244) 22) || (*LOC28).kind == ((Ttypekind292244) 21) || (*LOC28).kind == ((Ttypekind292244) 23));
LA27: ;
if (!LOC26) goto LA29;
gettemp_537032_839829468(p0, (*n0).typ, d0, NIM_FALSE);
memset((void*)LOC31, 0, sizeof(LOC31));
LOC31[0] = rdloc_538188_839829468((*d0));
LOC31[1] = r0;
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_559), LOC31, 2);
}
goto LA24;
LA29: ;
{
r0 = HEX26_178452_2381377266(((NimStringDesc*) &T839829468_52), r0);
putintodest_550468_839829468(p0, d0, (*n0).typ, r0, a0.s);
}
LA24: ;
}
goto LA20;
LA22: ;
{
putintodest_550468_839829468(p0, d0, (*n0).typ, r0, a0.s);
}
LA20: ;
}
LA1: ;
}
N_NIMCALL(void, upconv_558431_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0) {
Tloc292816 a0;
Ttype292840* dest0;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0));
dest0 = skiptypes_296099_850551059((*n0).typ, IL64(211106247256320));
{
NIM_BOOL LOC3;
NIM_BOOL LOC5;
Ropeobj178006* r0;
Ropeobj178006* nilcheck0;
Ttype292840* t0;
LOC3 = (NIM_BOOL)0;
LOC3 = (((*p0).options &(1U<<((NU)(((Toption169009) 1))&31U)))!=0);
if (!(LOC3)) goto LA4;
LOC5 = (NIM_BOOL)0;
LOC5 = isobjlackingtypefield_533513_839829468(dest0);
LOC3 = !(LOC5);
LA4: ;
if (!LOC3) goto LA6;
r0 = rdloc_538188_839829468(a0);
nilcheck0 = NIM_NIL;
t0 = skiptypes_296099_850551059(a0.t, IL64(211106232576256));
{
while (1) {
Ttype292840* LOC23;
if (!((*t0).kind == ((Ttypekind292244) 23) || (*t0).kind == ((Ttypekind292244) 21) || (*t0).kind == ((Ttypekind292244) 22))) goto LA9;
{
if (!!(((*t0).kind == ((Ttypekind292244) 23)))) goto LA12;
nilcheck0 = r0;
}
LA12: ;
{
NIM_BOOL LOC16;
NIM_BOOL LOC18;
TY178507 LOC22;
LOC16 = (NIM_BOOL)0;
LOC16 = !(((*t0).kind == ((Ttypekind292244) 23)));
if (LOC16) goto LA17;
LOC18 = (NIM_BOOL)0;
LOC18 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC18) goto LA19;
LOC18 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA19: ;
LOC16 = !(LOC18);
LA17: ;
if (!LOC16) goto LA20;
memset((void*)LOC22, 0, sizeof(LOC22));
LOC22[0] = r0;
r0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_124), LOC22, 1);
}
LA20: ;
LOC23 = (Ttype292840*)0;
LOC23 = lastson_295377_850551059(t0);
t0 = skiptypes_296099_850551059(LOC23, IL64(211106232576256));
} LA9: ;
}
{
NIM_BOOL LOC26;
LOC26 = (NIM_BOOL)0;
LOC26 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC26) goto LA27;
LOC26 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA27: ;
if (!!(LOC26)) goto LA28;
{
while (1) {
NIM_BOOL LOC32;
LOC32 = (NIM_BOOL)0;
LOC32 = ((*t0).kind == ((Ttypekind292244) 17));
if (!(LOC32)) goto LA33;
LOC32 = !(((*t0).sons->data[((NI) 0)] == NIM_NIL));
LA33: ;
if (!LOC32) goto LA31;
add_178487_2381377266(&r0, ((NimStringDesc*) &T839829468_153));
t0 = skiptypes_296099_850551059((*t0).sons->data[((NI) 0)], IL64(211106247215360));
} LA31: ;
}
}
LA28: ;
{
TY535238 LOC38;
if (!!((nilcheck0 == NIM_NIL))) goto LA36;
memset((void*)LOC38, 0, sizeof(LOC38));
LOC38[0] = nilcheck0;
LOC38[1] = r0;
LOC38[2] = gentypeinfo_535941_839829468((*p0).module, dest0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_560), LOC38, 3);
}
goto LA34;
LA36: ;
{
TY532811 LOC40;
memset((void*)LOC40, 0, sizeof(LOC40));
LOC40[0] = r0;
LOC40[1] = gentypeinfo_535941_839829468((*p0).module, dest0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_561), LOC40, 2);
}
LA34: ;
}
LA6: ;
{
TY532811 LOC45;
Ropeobj178006* LOC46;
if (!!(((*(*(*n0).kindU.S6.sons->data[((NI) 0)]).typ).kind == ((Ttypekind292244) 17)))) goto LA43;
memset((void*)LOC45, 0, sizeof(LOC45));
LOC45[0] = gettypedesc_535671_839829468((*p0).module, (*n0).typ);
LOC45[1] = rdloc_538188_839829468(a0);
LOC46 = (Ropeobj178006*)0;
LOC46 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_430), LOC45, 2);
putintodest_550468_839829468(p0, d0, (*n0).typ, LOC46, a0.s);
}
goto LA41;
LA43: ;
{
TY532811 LOC48;
Ropeobj178006* LOC49;
memset((void*)LOC48, 0, sizeof(LOC48));
LOC48[0] = gettypedesc_535671_839829468((*p0).module, dest0);
LOC48[1] = addrloc_538204_839829468(a0);
LOC49 = (Ropeobj178006*)0;
LOC49 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_429), LOC48, 2);
putintodest_550468_839829468(p0, d0, (*n0).typ, LOC49, a0.s);
}
LA41: ;
}
N_NIMCALL(void, genrangechck_556590_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0, NimStringDesc* magic0) {
Tloc292816 a0;
Ttype292840* dest0;
memset((void*)(&a0), 0, sizeof(a0));
dest0 = skiptypes_296099_850551059((*n0).typ, IL64(211106240964864));
{
NIM_BOOL LOC3;
Ttype292840* LOC5;
TY532811 LOC8;
Ropeobj178006* LOC9;
LOC3 = (NIM_BOOL)0;
LOC3 = !((((*p0).options &(1U<<((NU)(((Toption169009) 3))&31U)))!=0));
if (LOC3) goto LA4;
LOC5 = (Ttype292840*)0;
LOC5 = skiptypes_296099_850551059(dest0, 1048576);
LOC3 = ((*LOC5).kind >= ((Ttypekind292244) 40) && (*LOC5).kind <= ((Ttypekind292244) 44));
LA4: ;
if (!LOC3) goto LA6;
initlocexpr_539283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0));
memset((void*)LOC8, 0, sizeof(LOC8));
LOC8[0] = gettypedesc_535671_839829468((*p0).module, dest0);
LOC8[1] = rdcharloc_538227_839829468(a0);
LOC9 = (Ropeobj178006*)0;
LOC9 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_430), LOC8, 2);
putintodest_550468_839829468(p0, d0, (*n0).typ, LOC9, a0.s);
}
goto LA1;
LA6: ;
{
TY536475 LOC11;
Ropeobj178006* LOC12;
initlocexpr_539283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0));
memset((void*)LOC11, 0, sizeof(LOC11));
LOC11[0] = gettypedesc_535671_839829468((*p0).module, dest0);
LOC11[1] = rdcharloc_538227_839829468(a0);
LOC11[2] = genliteral_549476_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 1)], dest0);
LOC11[3] = genliteral_549476_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 2)], dest0);
LOC11[4] = rope_178277_2381377266(magic0);
LOC12 = (Ropeobj178006*)0;
LOC12 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_562), LOC11, 5);
putintodest_550468_839829468(p0, d0, dest0, LOC12, a0.s);
}
LA1: ;
}
N_NIMCALL(void, convstrtocstr_556642_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0) {
Tloc292816 a0;
Ttype292840* LOC1;
TY178507 LOC2;
Ropeobj178006* LOC3;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0));
LOC1 = (Ttype292840*)0;
LOC1 = skiptypes_296099_850551059((*n0).typ, IL64(211106240964864));
memset((void*)LOC2, 0, sizeof(LOC2));
LOC2[0] = rdloc_538188_839829468(a0);
LOC3 = (Ropeobj178006*)0;
LOC3 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_485), LOC2, 1);
putintodest_550468_839829468(p0, d0, LOC1, LOC3, a0.s);
}
N_NIMCALL(void, convcstrtostr_556654_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0) {
Tloc292816 a0;
Ttype292840* LOC1;
TY178507 LOC2;
Ropeobj178006* LOC3;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0));
LOC1 = (Ttype292840*)0;
LOC1 = skiptypes_296099_850551059((*n0).typ, IL64(211106240964864));
memset((void*)LOC2, 0, sizeof(LOC2));
LOC2[0] = rdloc_538188_839829468(a0);
LOC3 = (Ropeobj178006*)0;
LOC3 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_411), LOC2, 1);
putintodest_550468_839829468(p0, d0, LOC1, LOC3, a0.s);
gcusage_554439_839829468(n0);
}
static N_INLINE(NIM_BOOL, isroutine_297323_850551059)(Tsym292834* s0) {
NIM_BOOL result0;
result0 = (NIM_BOOL)0;
result0 = ((258048 &(1U<<((NU)((*s0).kind)&31U)))!=0);
return result0;
}
static N_INLINE(NIM_BOOL, isconstclosure_557810_839829468)(Tnode292802* n0) {
NIM_BOOL result0;
NIM_BOOL LOC1;
NIM_BOOL LOC2;
result0 = (NIM_BOOL)0;
LOC1 = (NIM_BOOL)0;
LOC2 = (NIM_BOOL)0;
LOC2 = ((*(*n0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 3));
if (!(LOC2)) goto LA3;
LOC2 = isroutine_297323_850551059((*(*n0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym);
LA3: ;
LOC1 = LOC2;
if (!(LOC1)) goto LA4;
LOC1 = ((*(*n0).kindU.S6.sons->data[((NI) 1)]).kind == ((Tnodekind292020) 23));
LA4: ;
result0 = LOC1;
return result0;
}
N_NIMCALL(void, genclosure_557836_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0) {
{
NIM_BOOL LOC3;
Ropeobj178006* tmp0;
Ropeobj178006* LOC6;
TY535238 LOC7;
LOC3 = (NIM_BOOL)0;
LOC3 = isconstclosure_557810_839829468(n0);
if (!LOC3) goto LA4;
(*(*p0).module).labels += ((NI) 1);
LOC6 = (Ropeobj178006*)0;
LOC6 = rope_178401_2381377266(((NI64) ((*(*p0).module).labels)));
tmp0 = HEX26_178452_2381377266(((NimStringDesc*) &T839829468_566), LOC6);
memset((void*)LOC7, 0, sizeof(LOC7));
LOC7[0] = gettypedesc_535671_839829468((*p0).module, (*n0).typ);
LOC7[1] = tmp0;
LOC7[2] = genconstexpr_554849_839829468(p0, n0);
addf_179205_2381377266(&(*(*p0).module).s[(((Tcfilesection529005) 8))- 0], ((NimStringDesc*) &T839829468_524), LOC7, 3);
putintodest_550468_839829468(p0, d0, (*n0).typ, tmp0, ((Tstorageloc292812) 1));
}
goto LA1;
LA4: ;
{
Tloc292816 tmp0;
Tloc292816 a0;
Tloc292816 b0;
TY535238 LOC14;
memset((void*)(&tmp0), 0, sizeof(tmp0));
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
initlocexpr_539283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0));
initlocexpr_539283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 1)], (&b0));
{
Tnode292802* LOC11;
LOC11 = (Tnode292802*)0;
LOC11 = skipconv_328882_3876443242((*n0).kindU.S6.sons->data[((NI) 0)]);
if (!((*LOC11).kind == ((Tnodekind292020) 155))) goto LA12;
internalerror_196100_155036129((*n0).info, ((NimStringDesc*) &T839829468_567));
}
LA12: ;
gettemp_537032_839829468(p0, (*n0).typ, (&tmp0), NIM_FALSE);
memset((void*)LOC14, 0, sizeof(LOC14));
LOC14[0] = rdloc_538188_839829468(tmp0);
LOC14[1] = rdloc_538188_839829468(a0);
LOC14[2] = rdloc_538188_839829468(b0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_568), LOC14, 3);
putlocintodest_539258_839829468(p0, d0, tmp0);
}
LA1: ;
}
static N_INLINE(Ropeobj178006*, assignlabel_544020_839829468)(Tblock529019* b0) {
Ropeobj178006* result0;
Ropeobj178006* LOC1;
result0 = (Ropeobj178006*)0;
LOC1 = (Ropeobj178006*)0;
LOC1 = rope_178401_2381377266(((NI64) ((*b0).id)));
unsureAsgnRef((void**) (&(*b0).label), HEX26_178452_2381377266(((NimStringDesc*) &T839829468_296), LOC1));
result0 = (*b0).label;
return result0;
}
N_NIMCALL(void, gencomputedgoto_545744_839829468)(Tcproc529021* p0, Tnode292802* n0) {
NI casepos0;
NI arraysize0;
NI id0;
Ropeobj178006* tmp0;
TY178507 LOC27;
Ropeobj178006* gotoarray0;
TY532811 LOC28;
TY178507 LOC33;
NI topblock0;
Ropeobj178006* oldbody0;
Ropeobj178006* tailb0;
Ropeobj178006* taila0;
Tnode292802* casestmt0;
Tloc292816 a_545871_839829468;
TY532811 LOC41;
{ casepos0 = ((NI) -1);
arraysize0 = (NI)0;
{
NI i_545768_839829468;
NI HEX3Atmp_545933_839829468;
NI LOC2;
NI res_545936_839829468;
i_545768_839829468 = (NI)0;
HEX3Atmp_545933_839829468 = (NI)0;
LOC2 = (NI)0;
LOC2 = len_293081_850551059(n0);
HEX3Atmp_545933_839829468 = (LOC2 - 1);
res_545936_839829468 = ((NI) 0);
{
while (1) {
Tnode292802* it0;
if (!(res_545936_839829468 <= HEX3Atmp_545933_839829468)) goto LA4;
i_545768_839829468 = res_545936_839829468;
it0 = (*n0).kindU.S6.sons->data[i_545768_839829468];
{
NI64 asize0;
if (!((*it0).kind == ((Tnodekind292020) 97))) goto LA7;
{
Tnode292802* LOC11;
LOC11 = (Tnode292802*)0;
LOC11 = lastson_295364_850551059(it0);
if (!!(((*LOC11).kind == ((Tnodekind292020) 85)))) goto LA12;
localerror_196085_155036129((*it0).info, ((NimStringDesc*) &T839829468_570));
goto BeforeRet;
}
LA12: ;
casepos0 = i_545768_839829468;
asize0 = lengthord_320007_3876443242((*(*it0).kindU.S6.sons->data[((NI) 0)]).typ);
{
if (!(IL64(10000) < asize0)) goto LA16;
localerror_196085_155036129((*it0).info, ((NimStringDesc*) &T839829468_571));
goto BeforeRet;
}
LA16: ;
arraysize0 = ((NI) (asize0));
{
NI64 LOC20;
LOC20 = (NI64)0;
LOC20 = firstord_320001_3876443242((*(*it0).kindU.S6.sons->data[((NI) 0)]).typ);
if (!!((LOC20 == IL64(0)))) goto LA21;
localerror_196085_155036129((*it0).info, ((NimStringDesc*) &T839829468_572));
goto BeforeRet;
}
LA21: ;
}
LA7: ;
res_545936_839829468 += ((NI) 1);
} LA4: ;
}
}
{
if (!(casepos0 < ((NI) 0))) goto LA25;
localerror_196085_155036129((*n0).info, ((NimStringDesc*) &T839829468_573));
goto BeforeRet;
}
LA25: ;
id0 = (NI)(((NI) ((*p0).labels)) + ((NI) 1));
(*p0).labels += (NI)(arraysize0 + ((NI) 1));
memset((void*)LOC27, 0, sizeof(LOC27));
LOC27[0] = rope_178401_2381377266(((NI64) (id0)));
tmp0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_574), LOC27, 1);
memset((void*)LOC28, 0, sizeof(LOC28));
LOC28[0] = tmp0;
LOC28[1] = rope_178401_2381377266(((NI64) (arraysize0)));
gotoarray0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_575), LOC28, 2);
{
NI i_545819_839829468;
NI HEX3Atmp_545941_839829468;
NI res_545944_839829468;
i_545819_839829468 = (NI)0;
HEX3Atmp_545941_839829468 = (NI)0;
HEX3Atmp_545941_839829468 = (NI)(arraysize0 - ((NI) 1));
res_545944_839829468 = ((NI) 1);
{
while (1) {
TY178507 LOC32;
if (!(res_545944_839829468 <= HEX3Atmp_545941_839829468)) goto LA31;
i_545819_839829468 = res_545944_839829468;
memset((void*)LOC32, 0, sizeof(LOC32));
LOC32[0] = rope_178401_2381377266(((NI64) ((NI)(((NI) (id0)) + i_545819_839829468))));
addf_179205_2381377266(&gotoarray0, ((NimStringDesc*) &T839829468_576), LOC32, 1);
res_545944_839829468 += ((NI) 1);
} LA31: ;
}
}
memset((void*)LOC33, 0, sizeof(LOC33));
LOC33[0] = rope_178401_2381377266(((NI64) ((NI)(((NI) (id0)) + arraysize0))));
addf_179205_2381377266(&gotoarray0, ((NimStringDesc*) &T839829468_577), LOC33, 1);
line_532690_839829468(p0, ((Tcprocsection529011) 0), gotoarray0);
topblock0 = (NI)(((*p0).blocks ? (*p0).blocks->Sup.len : 0) - ((NI) 1));
oldbody0 = (*p0).blocks->data[topblock0].sections[(((Tcprocsection529011) 2))- 0];
asgnRefNoCycle((void**) (&(*p0).blocks->data[topblock0].sections[(((Tcprocsection529011) 2))- 0]), NIM_NIL);
{
NI j_545854_839829468;
NI HEX3Atmp_545949_839829468;
NI HEX3Atmp_545950_839829468;
NI LOC35;
NI res_545953_839829468;
j_545854_839829468 = (NI)0;
HEX3Atmp_545949_839829468 = (NI)0;
HEX3Atmp_545950_839829468 = (NI)0;
HEX3Atmp_545949_839829468 = (NI)(casepos0 + ((NI) 1));
LOC35 = (NI)0;
LOC35 = len_293081_850551059(n0);
HEX3Atmp_545950_839829468 = (LOC35 - 1);
res_545953_839829468 = HEX3Atmp_545949_839829468;
{
while (1) {
if (!(res_545953_839829468 <= HEX3Atmp_545950_839829468)) goto LA37;
j_545854_839829468 = res_545953_839829468;
genstmts_539244_839829468(p0, (*n0).kindU.S6.sons->data[j_545854_839829468]);
res_545953_839829468 += ((NI) 1);
} LA37: ;
}
}
tailb0 = (*p0).blocks->data[topblock0].sections[(((Tcprocsection529011) 2))- 0];
asgnRefNoCycle((void**) (&(*p0).blocks->data[topblock0].sections[(((Tcprocsection529011) 2))- 0]), NIM_NIL);
{
NI j_545866_839829468;
NI HEX3Atmp_545958_839829468;
NI res_545961_839829468;
j_545866_839829468 = (NI)0;
HEX3Atmp_545958_839829468 = (NI)0;
HEX3Atmp_545958_839829468 = (NI)(casepos0 - ((NI) 1));
res_545961_839829468 = ((NI) 0);
{
while (1) {
if (!(res_545961_839829468 <= HEX3Atmp_545958_839829468)) goto LA40;
j_545866_839829468 = res_545961_839829468;
genstmts_539244_839829468(p0, (*n0).kindU.S6.sons->data[j_545866_839829468]);
res_545961_839829468 += ((NI) 1);
} LA40: ;
}
}
taila0 = (*p0).blocks->data[topblock0].sections[(((Tcprocsection529011) 2))- 0];
asgnRefNoCycle((void**) (&(*p0).blocks->data[topblock0].sections[(((Tcprocsection529011) 2))- 0]), HEX26_178418_2381377266(oldbody0, taila0));
casestmt0 = (*n0).kindU.S6.sons->data[casepos0];
memset((void*)(&a_545871_839829468), 0, sizeof(a_545871_839829468));
initlocexpr_539283_839829468(p0, (*casestmt0).kindU.S6.sons->data[((NI) 0)], (&a_545871_839829468));
memset((void*)LOC41, 0, sizeof(LOC41));
LOC41[0] = tmp0;
LOC41[1] = rdloc_538188_839829468(a_545871_839829468);
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_578), LOC41, 2);
{
NI i_545894_839829468;
NI HEX3Atmp_545977_839829468;
NI LOC43;
NI res_545980_839829468;
i_545894_839829468 = (NI)0;
HEX3Atmp_545977_839829468 = (NI)0;
LOC43 = (NI)0;
LOC43 = len_293081_850551059(casestmt0);
HEX3Atmp_545977_839829468 = (LOC43 - 1);
res_545980_839829468 = ((NI) 1);
{
while (1) {
TY533289 LOC46;
NI LOC47;
Tnode292802* it0;
Tnode292802* LOC57;
Ropeobj178006** LOC58;
Ropeobj178006** LOC59;
Tloc292816 a0;
TY532811 LOC60;
if (!(res_545980_839829468 <= HEX3Atmp_545977_839829468)) goto LA45;
i_545894_839829468 = res_545980_839829468;
memset((void*)LOC46, 0, sizeof(LOC46));
LOC47 = (NI)0;
LOC47 = startblock_543978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC46, 0);
it0 = (*casestmt0).kindU.S6.sons->data[i_545894_839829468];
{
NI j_545910_839829468;
NI HEX3Atmp_545969_839829468;
NI LOC49;
NI res_545972_839829468;
j_545910_839829468 = (NI)0;
HEX3Atmp_545969_839829468 = (NI)0;
LOC49 = (NI)0;
LOC49 = len_293081_850551059(it0);
HEX3Atmp_545969_839829468 = (NI)(LOC49 - ((NI) 2));
res_545972_839829468 = ((NI) 0);
{
while (1) {
NI64 val0;
TY178507 LOC56;
if (!(res_545972_839829468 <= HEX3Atmp_545969_839829468)) goto LA51;
j_545910_839829468 = res_545972_839829468;
{
if (!((*(*it0).kindU.S6.sons->data[j_545910_839829468]).kind == ((Tnodekind292020) 44))) goto LA54;
localerror_196085_155036129((*it0).info, ((NimStringDesc*) &T839829468_579));
goto BeforeRet;
}
LA54: ;
val0 = getordvalue_320129_3876443242((*it0).kindU.S6.sons->data[j_545910_839829468]);
memset((void*)LOC56, 0, sizeof(LOC56));
LOC56[0] = intliteral_539270_839829468((NI64)((NI64)(val0 + ((NI64) (id0))) + IL64(1)));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_580), LOC56, 1);
res_545972_839829468 += ((NI) 1);
} LA51: ;
}
}
LOC57 = (Tnode292802*)0;
LOC57 = lastson_295364_850551059(it0);
genstmts_539244_839829468(p0, LOC57);
LOC58 = (Ropeobj178006**)0;
LOC58 = s_529179_3723162438(p0, ((Tcprocsection529011) 2));
add_178482_2381377266(LOC58, tailb0);
LOC59 = (Ropeobj178006**)0;
LOC59 = s_529179_3723162438(p0, ((Tcprocsection529011) 2));
add_178482_2381377266(LOC59, taila0);
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*casestmt0).kindU.S6.sons->data[((NI) 0)], (&a0));
memset((void*)LOC60, 0, sizeof(LOC60));
LOC60[0] = tmp0;
LOC60[1] = rdloc_538188_839829468(a0);
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_578), LOC60, 2);
endblock_544060_839829468(p0);
res_545980_839829468 += ((NI) 1);
} LA45: ;
}
}
}BeforeRet: ;
}
N_NIMCALL(void, genwhilestmt_545984_839829468)(Tcproc529021* p0, Tnode292802* t0) {
Tloc292816 a0;
NI oldbreakidx_546011_839829468;
TY533289 LOC1;
Tnode292802* loopbody0;
memset((void*)(&a0), 0, sizeof(a0));
(*p0).withinloop += ((NI) 1);
genlinedir_532823_839829468(p0, t0);
oldbreakidx_546011_839829468 = (*p0).breakidx;
memset((void*)LOC1, 0, sizeof(LOC1));
(*p0).breakidx = startblock_543978_839829468(p0, ((NimStringDesc*) &T839829468_569), LOC1, 0);
(*p0).blocks->data[(*p0).breakidx].isloop = NIM_TRUE;
initlocexpr_539283_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 0)], (&a0));
{
NIM_BOOL LOC4;
Ropeobj178006* label0;
TY532811 LOC8;
LOC4 = (NIM_BOOL)0;
LOC4 = !(((*(*t0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 6)));
if (LOC4) goto LA5;
LOC4 = ((*(*t0).kindU.S6.sons->data[((NI) 0)]).kindU.S1.intval == IL64(0));
LA5: ;
if (!LOC4) goto LA6;
label0 = assignlabel_544020_839829468((&(*p0).blocks->data[(*p0).breakidx]));
memset((void*)LOC8, 0, sizeof(LOC8));
LOC8[0] = rdloc_538188_839829468(a0);
LOC8[1] = label0;
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_555), LOC8, 2);
}
LA6: ;
loopbody0 = (*t0).kindU.S6.sons->data[((NI) 1)];
{
NIM_BOOL LOC11;
LOC11 = (NIM_BOOL)0;
LOC11 = stmtscontainpragma_528083_2036603609(loopbody0, ((Tspecialword275003) 182));
if (!(LOC11)) goto LA12;
LOC11 = ((Cc_273413_2528170400[(ccompiler_273431_2528170400)- 1].Field20 &(1U<<((NU)(((Tinfoccprop273004) 1))&7U)))!=0);
LA12: ;
if (!LOC11) goto LA13;
{
NIM_BOOL LOC17;
NI LOC18;
LOC17 = (NIM_BOOL)0;
LOC18 = (NI)0;
LOC18 = len_293081_850551059(loopbody0);
LOC17 = (LOC18 == ((NI) 2));
if (!(LOC17)) goto LA19;
LOC17 = ((*(*loopbody0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 1));
LA19: ;
if (!LOC17) goto LA20;
loopbody0 = (*loopbody0).kindU.S6.sons->data[((NI) 1)];
}
LA20: ;
gencomputedgoto_545744_839829468(p0, loopbody0);
}
goto LA9;
LA13: ;
{
genstmts_539244_839829468(p0, loopbody0);
}
LA9: ;
{
TY533289 LOC27;
if (!(((*p0).options &(1U<<((NU)(((Toption169009) 19))&31U)))!=0)) goto LA25;
memset((void*)LOC27, 0, sizeof(LOC27));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_581), LOC27, 0);
}
LA25: ;
endblock_544060_839829468(p0);
(*p0).breakidx = oldbreakidx_546011_839829468;
(*p0).withinloop -= ((NI) 1);
}
N_NIMCALL(void, gengotovar_544258_839829468)(Tcproc529021* p0, Tnode292802* value0) {
{
if (!!(((*value0).kind >= ((Tnodekind292020) 5) && (*value0).kind <= ((Tnodekind292020) 15)))) goto LA3;
localerror_196085_155036129((*value0).info, ((NimStringDesc*) &T839829468_582));
}
goto LA1;
LA3: ;
{
TY178507 LOC6;
memset((void*)LOC6, 0, sizeof(LOC6));
LOC6[0] = rope_178401_2381377266((*value0).kindU.S1.intval);
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_583), LOC6, 1);
}
LA1: ;
}
N_NIMCALL(void, varindynamiclib_538812_839829468)(Tcgen529027* m0, Tsym292834* sym0) {
Tlib292820* lib0;
Ropeobj178006* extname0;
Ropeobj178006* tmp0;
TY535235 LOC1;
NimStringDesc* LOC2;
TY532811 LOC3;
lib0 = (*sym0).annex;
extname0 = (*sym0).loc.r;
loaddynamiclib_559480_839829468(m0, lib0);
(*sym0).loc.flags |= ((NU16)1)<<((((Tlocflag292810) 0))%(sizeof(NU16)*8));
tmp0 = mangledynlibproc_538816_839829468(sym0);
asgnRefNoCycle((void**) (&(*sym0).loc.r), tmp0);
(*m0).labels += ((NI) 2);
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = tmp0;
LOC1[1] = gettypedesc_535671_839829468(m0, (*sym0).typ);
LOC1[2] = (*lib0).name;
LOC2 = (NimStringDesc*)0;
LOC2 = HEX24_178856_2381377266(extname0);
LOC1[3] = makecstring_191638_155036129(LOC2);
appcg_532632_839829468(m0, &(*m0).s[(((Tcfilesection529005) 16))- 0], ((NimStringDesc*) &T839829468_584), LOC1, 4);
memset((void*)LOC3, 0, sizeof(LOC3));
LOC3[0] = (*sym0).loc.r;
LOC3[1] = gettypedesc_535671_839829468(m0, (*sym0).loc.t);
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 9))- 0], ((NimStringDesc*) &T839829468_585), LOC3, 2);
}
N_NIMCALL(void, assignglobalvar_538819_839829468)(Tcproc529021* p0, Tsym292834* s0) {
{ {
Ropeobj178006* LOC5;
if (!((*s0).loc.k == ((Tlockind292808) 0))) goto LA3;
LOC5 = (Ropeobj178006*)0;
LOC5 = manglename_533205_839829468(s0);
fillloc_532282_839829468((&(*s0).loc), ((Tlockind292808) 3), (*s0).typ, LOC5, ((Tstorageloc292812) 3));
}
LA3: ;
{
Tcgen529027* q0;
if (!(((*s0).loc.flags &(1U<<((NU)(((Tlocflag292810) 4))&15U)))!=0)) goto LA8;
q0 = findpendingmodule_532241_839829468((*p0).module, s0);
{
NIM_BOOL LOC12;
NIM_BOOL LOC14;
LOC12 = (NIM_BOOL)0;
LOC12 = !((q0 == NIM_NIL));
if (!(LOC12)) goto LA13;
LOC14 = (NIM_BOOL)0;
LOC14 = containsorincl_268862_2627731572((&(*q0).declaredthings), (*s0).Sup.id);
LOC12 = !(LOC14);
LA13: ;
if (!LOC12) goto LA15;
varindynamiclib_538812_839829468(q0, s0);
}
goto LA10;
LA15: ;
{
asgnRefNoCycle((void**) (&(*s0).loc.r), mangledynlibproc_538816_839829468(s0));
}
LA10: ;
goto BeforeRet;
}
LA8: ;
useheader_532369_839829468((*p0).module, s0);
{
if (!(((*s0).loc.flags &(1U<<((NU)(((Tlocflag292810) 3))&15U)))!=0)) goto LA20;
goto BeforeRet;
}
LA20: ;
{
if (!(((*s0).flags &(1U<<((NU)(((Tsymflag292184) 22))&31U)))!=0)) goto LA24;
declarethreadvar_538676_839829468((*p0).module, s0, (((*s0).flags &(1U<<((NU)(((Tsymflag292184) 5))&31U)))!=0));
}
goto LA22;
LA24: ;
{
Ropeobj178006* decl0;
Ropeobj178006* td0;
decl0 = NIM_NIL;
td0 = gettypedesc_535671_839829468((*p0).module, (*s0).loc.t);
{
TY178507 LOC43;
if (!(*s0).constraint == 0) goto LA29;
{
if (!(((*s0).flags &(1U<<((NU)(((Tsymflag292184) 5))&31U)))!=0)) goto LA33;
add_178487_2381377266(&decl0, ((NimStringDesc*) &T839829468_240));
}
LA33: ;
add_178482_2381377266(&decl0, td0);
{
if (!(((*s0).flags &(1U<<((NU)(((Tsymflag292184) 8))&31U)))!=0)) goto LA37;
add_178487_2381377266(&decl0, ((NimStringDesc*) &T839829468_121));
}
LA37: ;
{
if (!(((*s0).flags &(1U<<((NU)(((Tsymflag292184) 7))&31U)))!=0)) goto LA41;
add_178487_2381377266(&decl0, ((NimStringDesc*) &T839829468_122));
}
LA41: ;
memset((void*)LOC43, 0, sizeof(LOC43));
LOC43[0] = (*s0).loc.r;
addf_179205_2381377266(&decl0, ((NimStringDesc*) &T839829468_242), LOC43, 1);
}
goto LA27;
LA29: ;
{
NimStringDesc* LOC45;
TY532811 LOC46;
LOC45 = (NimStringDesc*)0;
LOC45 = rawNewString((*(*s0).constraint).kindU.S3.strval->Sup.len + 3);
appendString(LOC45, (*(*s0).constraint).kindU.S3.strval);
appendString(LOC45, ((NimStringDesc*) &T839829468_497));
memset((void*)LOC46, 0, sizeof(LOC46));
LOC46[0] = td0;
LOC46[1] = (*s0).loc.r;
decl0 = HEX25_178905_2381377266(LOC45, LOC46, 2);
}
LA27: ;
add_178482_2381377266(&(*(*p0).module).s[(((Tcfilesection529005) 9))- 0], decl0);
}
LA22: ;
{
if (!(((NI) 0) < (*p0).withinloop)) goto LA49;
resetloc_538350_839829468(p0, (&(*s0).loc));
}
LA49: ;
{
TY535238 LOC55;
NimStringDesc* LOC56;
NimStringDesc* LOC57;
if (!(((*(*(*p0).module).module).options & 163840) == 163840)) goto LA53;
memset((void*)LOC55, 0, sizeof(LOC55));
LOC56 = (NimStringDesc*)0;
LOC56 = rawNewString((*(*(*s0).owner).name).s->Sup.len + (*(*s0).name).s->Sup.len + 1);
appendString(LOC56, (*(*(*s0).owner).name).s);
appendChar(LOC56, 46);
appendString(LOC56, (*(*s0).name).s);
LOC57 = (NimStringDesc*)0;
LOC57 = nsuNormalize(LOC56);
LOC55[0] = makecstring_191638_155036129(LOC57);
LOC55[1] = (*s0).loc.r;
LOC55[2] = gentypeinfo_535941_839829468((*p0).module, (*s0).typ);
appcg_532632_839829468((*p0).module, &(*(*p0).module).s[(((Tcfilesection529005) 15))- 0], ((NimStringDesc*) &T839829468_586), LOC55, 3);
}
LA53: ;
}BeforeRet: ;
}
N_NIMCALL(Ropeobj178006*, gentraverseprocforglobal_538032_839829468)(Tcgen529027* m0, Tsym292834* s0) {
Ropeobj178006* result0;
Ropeobj178006* LOC1;
Ttraversalclosure537019 c0;
Tcproc529021* p0;
Ropeobj178006* sloc0;
Ropeobj178006* header0;
TY178507 LOC8;
Ropeobj178006* generatedproc0;
TY535235 LOC9;
Ropeobj178006** LOC10;
Ropeobj178006** LOC11;
Ropeobj178006** LOC12;
TY178507 LOC13;
result0 = (Ropeobj178006*)0;
LOC1 = (Ropeobj178006*)0;
LOC1 = gentypeinfo_535941_839829468(m0, (*s0).loc.t);
memset((void*)(&c0), 0, sizeof(c0));
p0 = newproc_529206_3723162438(NIM_NIL, m0);
sloc0 = (*s0).loc.r;
result0 = gettempname_533596_839829468(m0);
{
NIM_BOOL LOC4;
LOC4 = (NIM_BOOL)0;
LOC4 = (((*s0).flags &(1U<<((NU)(((Tsymflag292184) 22))&31U)))!=0);
if (!(LOC4)) goto LA5;
LOC4 = emulatedthreadvars_532949_839829468();
LA5: ;
if (!LOC4) goto LA6;
accessthreadlocalvar_532945_839829468(p0, s0);
sloc0 = HEX26_178452_2381377266(((NimStringDesc*) &T839829468_288), sloc0);
}
LA6: ;
c0.visitorfrmt = copyString(((NimStringDesc*) &T839829468_587));
c0.p = p0;
memset((void*)LOC8, 0, sizeof(LOC8));
LOC8[0] = result0;
header0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_588), LOC8, 1);
gentraverseproc_537022_839829468((&c0), sloc0, (*s0).loc.t);
memset((void*)LOC9, 0, sizeof(LOC9));
LOC9[0] = header0;
LOC10 = (Ropeobj178006**)0;
LOC10 = s_529179_3723162438(p0, ((Tcprocsection529011) 0));
LOC9[1] = (*LOC10);
LOC11 = (Ropeobj178006**)0;
LOC11 = s_529179_3723162438(p0, ((Tcprocsection529011) 1));
LOC9[2] = (*LOC11);
LOC12 = (Ropeobj178006**)0;
LOC12 = s_529179_3723162438(p0, ((Tcprocsection529011) 2));
LOC9[3] = (*LOC12);
generatedproc0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_190), LOC9, 4);
memset((void*)LOC13, 0, sizeof(LOC13));
LOC13[0] = header0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 7))- 0], ((NimStringDesc*) &T839829468_191), LOC13, 1);
add_178482_2381377266(&(*m0).s[(((Tcfilesection529005) 10))- 0], generatedproc0);
return result0;
}
N_NIMCALL(void, registergcroot_543762_839829468)(Tcproc529021* p0, Tsym292834* v0) {
{
NIM_BOOL LOC3;
Ropeobj178006* prc0;
Ropeobj178006** LOC7;
TY178507 LOC8;
LOC3 = (NIM_BOOL)0;
LOC3 = ((240 &(1U<<((NU)(gselectedgc_169133_2607990831)&7U)))!=0);
if (!(LOC3)) goto LA4;
LOC3 = containsgarbagecollectedref_320117_3876443242((*v0).loc.t);
LA4: ;
if (!LOC3) goto LA5;
prc0 = gentraverseprocforglobal_538032_839829468((*p0).module, v0);
LOC7 = (Ropeobj178006**)0;
LOC7 = procsec_529194_3723162438((*(*p0).module).initproc, ((Tcprocsection529011) 1));
memset((void*)LOC8, 0, sizeof(LOC8));
LOC8[0] = prc0;
appcg_532632_839829468((*p0).module, LOC7, ((NimStringDesc*) &T839829468_589), LOC8, 1);
}
LA5: ;
}
static N_INLINE(NIM_BOOL, isassignedimmediately_543781_839829468)(Tnode292802* n0) {
NIM_BOOL result0;
{ result0 = (NIM_BOOL)0;
{
if (!((*n0).kind == ((Tnodekind292020) 1))) goto LA3;
result0 = NIM_FALSE;
goto BeforeRet;
}
LA3: ;
{
NIM_BOOL LOC7;
LOC7 = (NIM_BOOL)0;
LOC7 = isinvalidreturntype_533548_839829468((*n0).typ);
if (!LOC7) goto LA8;
result0 = NIM_FALSE;
goto BeforeRet;
}
LA8: ;
result0 = NIM_TRUE;
}BeforeRet: ;
return result0;
}
N_NIMCALL(void, genasgncall_543695_839829468)(Tcproc529021* p0, Tnode292802* le0, Tnode292802* ri0, Tloc292816* d0) {
{
Ttype292840* LOC3;
LOC3 = (Ttype292840*)0;
LOC3 = skiptypes_296099_850551059((*(*ri0).kindU.S6.sons->data[((NI) 0)]).typ, 2048);
if (!((*LOC3).callconv == ((Tcallingconvention292002) 8))) goto LA4;
genclosurecall_540452_839829468(p0, le0, ri0, d0);
}
goto LA1;
LA4: ;
{
NIM_BOOL LOC7;
LOC7 = (NIM_BOOL)0;
LOC7 = ((*(*ri0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 3));
if (!(LOC7)) goto LA8;
LOC7 = (((*(*(*ri0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA8: ;
if (!LOC7) goto LA9;
geninfixcall_541929_839829468(p0, le0, ri0, d0);
}
goto LA1;
LA9: ;
{
NIM_BOOL LOC12;
LOC12 = (NIM_BOOL)0;
LOC12 = ((*(*ri0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 3));
if (!(LOC12)) goto LA13;
LOC12 = (((*(*(*ri0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).flags &(1U<<((NU)(((Tsymflag292184) 28))&31U)))!=0);
LA13: ;
if (!LOC12) goto LA14;
gennamedparamcall_542616_839829468(p0, ri0, d0);
}
goto LA1;
LA14: ;
{
genprefixcall_539960_839829468(p0, le0, ri0, d0);
}
LA1: ;
poststmtactions_532942_839829468(p0);
}
static N_INLINE(void, loadinto_543928_839829468)(Tcproc529021* p0, Tnode292802* le0, Tnode292802* ri0, Tloc292816* a0) {
{
NIM_BOOL LOC3;
NIM_BOOL LOC5;
LOC3 = (NIM_BOOL)0;
LOC3 = ((*ri0).kind == ((Tnodekind292020) 27) || (*ri0).kind == ((Tnodekind292020) 29) || (*ri0).kind == ((Tnodekind292020) 30) || (*ri0).kind == ((Tnodekind292020) 31) || (*ri0).kind == ((Tnodekind292020) 26) || (*ri0).kind == ((Tnodekind292020) 28) || (*ri0).kind == ((Tnodekind292020) 32));
if (!(LOC3)) goto LA4;
LOC5 = (NIM_BOOL)0;
LOC5 = !(((*(*ri0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 3)));
if (LOC5) goto LA6;
LOC5 = ((*(*(*ri0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).magic == ((Tmagic292524) 0));
LA6: ;
LOC3 = LOC5;
LA4: ;
if (!LOC3) goto LA7;
genasgncall_543695_839829468(p0, le0, ri0, a0);
}
goto LA1;
LA7: ;
{
if (!((*ri0).kind == ((Tnodekind292020) 47) || (*ri0).kind == ((Tnodekind292020) 65))) goto LA10;
genderef_543921_839829468(p0, ri0, a0, NIM_TRUE);
}
goto LA1;
LA10: ;
{
expr_539248_839829468(p0, ri0, a0);
}
LA1: ;
}
N_NIMCALL(void, gensinglevar_544276_839829468)(Tcproc529021* p0, Tnode292802* a0) {
Tsym292834* v0;
Tcproc529021* targetproc0;
{ v0 = (*(*a0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym;
{
if (!!(((1082130432 & (*v0).flags) == 0))) goto LA3;
{
if (!(((*v0).flags &(1U<<((NU)(((Tsymflag292184) 30))&31U)))!=0)) goto LA7;
gengotovar_544258_839829468(p0, (*a0).kindU.S6.sons->data[((NI) 2)]);
}
LA7: ;
goto BeforeRet;
}
LA3: ;
targetproc0 = p0;
{
if (!(((*v0).flags &(1U<<((NU)(((Tsymflag292184) 3))&31U)))!=0)) goto LA11;
{
NIM_BOOL LOC15;
NIM_BOOL LOC16;
LOC15 = (NIM_BOOL)0;
LOC16 = (NIM_BOOL)0;
LOC16 = (((*v0).flags & 96) == 32);
if (!(LOC16)) goto LA17;
LOC16 = ((*(*a0).kindU.S6.sons->data[((NI) 2)]).kind == ((Tnodekind292020) 1));
LA17: ;
LOC15 = LOC16;
if (!(LOC15)) goto LA18;
LOC15 = !((((*v0).loc.flags & 72) == 0));
LA18: ;
if (!LOC15) goto LA19;
goto BeforeRet;
}
LA19: ;
{
if (!(((*v0).flags &(1U<<((NU)(((Tsymflag292184) 9))&31U)))!=0)) goto LA23;
targetproc0 = (*(*p0).module).preinitproc;
}
LA23: ;
assignglobalvar_538819_839829468(targetproc0, v0);
genobjectinit_538242_839829468((*(*p0).module).preinitproc, ((Tcprocsection529011) 1), (*v0).typ, (*v0).loc, NIM_TRUE);
{
NIM_BOOL LOC27;
LOC27 = (NIM_BOOL)0;
LOC27 = (((*v0).flags &(1U<<((NU)(((Tsymflag292184) 6))&31U)))!=0);
if (!(LOC27)) goto LA28;
LOC27 = !((generatedheader_532201_839829468 == NIM_NIL));
LA28: ;
if (!LOC27) goto LA29;
genvarprototypeaux_544254_839829468(generatedheader_532201_839829468, v0);
}
LA29: ;
registergcroot_543762_839829468(p0, v0);
}
goto LA9;
LA11: ;
{
Tnode292802* value0;
NIM_BOOL imm0;
value0 = (*a0).kindU.S6.sons->data[((NI) 2)];
imm0 = isassignedimmediately_543781_839829468(value0);
{
NIM_BOOL LOC34;
NIM_BOOL LOC35;
NIM_BOOL LOC36;
NIM_BOOL LOC38;
NIM_BOOL LOC42;
Ropeobj178006* decl0;
Tloc292816 tmp0;
LOC34 = (NIM_BOOL)0;
LOC35 = (NIM_BOOL)0;
LOC36 = (NIM_BOOL)0;
LOC36 = imm0;
if (!(LOC36)) goto LA37;
LOC38 = (NIM_BOOL)0;
LOC38 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC38) goto LA39;
LOC38 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA39: ;
LOC36 = LOC38;
LA37: ;
LOC35 = LOC36;
if (!(LOC35)) goto LA40;
LOC35 = ((*p0).splitdecls == ((NI) 0));
LA40: ;
LOC34 = LOC35;
if (!(LOC34)) goto LA41;
LOC42 = (NIM_BOOL)0;
LOC42 = containshiddenpointer_320120_3876443242((*v0).typ);
LOC34 = !(LOC42);
LA41: ;
if (!LOC34) goto LA43;
genlinedir_532823_839829468(p0, a0);
decl0 = localvardecl_538532_839829468(p0, v0);
memset((void*)(&tmp0), 0, sizeof(tmp0));
{
NIM_BOOL LOC47;
NIM_BOOL LOC48;
Tnode292802* LOC50;
Tnode292802* LOC52;
Ropeobj178006* params0;
Ttype292840* typ0;
TY532811 LOC66;
LOC47 = (NIM_BOOL)0;
LOC48 = (NIM_BOOL)0;
LOC48 = ((*value0).kind == ((Tnodekind292020) 27) || (*value0).kind == ((Tnodekind292020) 29) || (*value0).kind == ((Tnodekind292020) 30) || (*value0).kind == ((Tnodekind292020) 31) || (*value0).kind == ((Tnodekind292020) 26) || (*value0).kind == ((Tnodekind292020) 28) || (*value0).kind == ((Tnodekind292020) 32));
if (!(LOC48)) goto LA49;
LOC50 = (Tnode292802*)0;
LOC50 = HEX5BHEX5D_293238_850551059(value0, ((NI) 0));
LOC48 = ((*LOC50).kind == ((Tnodekind292020) 3));
LA49: ;
LOC47 = LOC48;
if (!(LOC47)) goto LA51;
LOC52 = (Tnode292802*)0;
LOC52 = HEX5BHEX5D_293238_850551059(value0, ((NI) 0));
LOC47 = (((*(*LOC52).kindU.S4.sym).flags &(1U<<((NU)(((Tsymflag292184) 24))&31U)))!=0);
LA51: ;
if (!LOC47) goto LA53;
params0 = (Ropeobj178006*)0;
typ0 = skiptypes_296099_850551059((*(*value0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256));
{
NI i_544619_839829468;
NI HEX3Atmp_544825_839829468;
NI LOC56;
NI res_544828_839829468;
i_544619_839829468 = (NI)0;
HEX3Atmp_544825_839829468 = (NI)0;
LOC56 = (NI)0;
LOC56 = len_293081_850551059(value0);
HEX3Atmp_544825_839829468 = (LOC56 - 1);
res_544828_839829468 = ((NI) 1);
{
while (1) {
Ropeobj178006* LOC65;
if (!(res_544828_839829468 <= HEX3Atmp_544825_839829468)) goto LA58;
i_544619_839829468 = res_544828_839829468;
{
TY533289 LOC63;
Ropeobj178006* LOC64;
if (!!((params0 == NIM_NIL))) goto LA61;
memset((void*)LOC63, 0, sizeof(LOC63));
LOC64 = (Ropeobj178006*)0;
LOC64 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_110), LOC63, 0);
add_178482_2381377266(¶ms0, LOC64);
}
LA61: ;
LOC65 = (Ropeobj178006*)0;
LOC65 = genotherarg_539277_839829468(p0, value0, i_544619_839829468, typ0);
add_178482_2381377266(¶ms0, LOC65);
res_544828_839829468 += ((NI) 1);
} LA58: ;
}
}
memset((void*)LOC66, 0, sizeof(LOC66));
LOC66[0] = decl0;
LOC66[1] = params0;
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_590), LOC66, 2);
}
goto LA45;
LA53: ;
{
TY532811 LOC68;
initlocexprsingleuse_539289_839829468(p0, value0, (&tmp0));
memset((void*)LOC68, 0, sizeof(LOC68));
LOC68[0] = decl0;
LOC68[1] = rdloc_538188_839829468(tmp0);
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_591), LOC68, 2);
}
LA45: ;
goto BeforeRet;
}
LA43: ;
assignlocalvar_538614_839829468(p0, v0);
initlocalvar_538398_839829468(p0, v0, imm0);
}
LA9: ;
{
if (!!(((*(*a0).kindU.S6.sons->data[((NI) 2)]).kind == ((Tnodekind292020) 1)))) goto LA71;
genlinedir_532823_839829468(targetproc0, a0);
loadinto_543928_839829468(targetproc0, (*a0).kindU.S6.sons->data[((NI) 0)], (*a0).kindU.S6.sons->data[((NI) 2)], (&(*v0).loc));
}
LA71: ;
}BeforeRet: ;
}
N_NIMCALL(void, genclosurevar_544832_839829468)(Tcproc529021* p0, Tnode292802* a0) {
NIM_BOOL immediateasgn0;
immediateasgn0 = !(((*(*a0).kindU.S6.sons->data[((NI) 2)]).kind == ((Tnodekind292020) 1)));
{
Tloc292816 v0;
if (!immediateasgn0) goto LA3;
memset((void*)(&v0), 0, sizeof(v0));
initlocexpr_539283_839829468(p0, (*a0).kindU.S6.sons->data[((NI) 0)], (&v0));
genlinedir_532823_839829468(p0, a0);
loadinto_543928_839829468(p0, (*a0).kindU.S6.sons->data[((NI) 0)], (*a0).kindU.S6.sons->data[((NI) 2)], (&v0));
}
LA3: ;
}
N_NIMCALL(void, genvartuple_543794_839829468)(Tcproc529021* p0, Tnode292802* n0) {
Tloc292816 tup0;
Tloc292816 field0;
NI L0;
NIM_BOOL uselowering0;
Ttype292840* t0;
{ memset((void*)(&tup0), 0, sizeof(tup0));
memset((void*)(&field0), 0, sizeof(field0));
{
if (!!(((*n0).kind == ((Tnodekind292020) 36)))) goto LA3;
internalerror_196100_155036129((*n0).info, ((NimStringDesc*) &T839829468_592));
}
LA3: ;
L0 = sonslen_295351_850551059(n0);
uselowering0 = NIM_FALSE;
{
NI i_543822_839829468;
NI HEX3Atmp_543905_839829468;
NI res_543908_839829468;
i_543822_839829468 = (NI)0;
HEX3Atmp_543905_839829468 = (NI)0;
HEX3Atmp_543905_839829468 = (NI)(L0 - ((NI) 3));
res_543908_839829468 = ((NI) 0);
{
while (1) {
if (!(res_543908_839829468 <= HEX3Atmp_543905_839829468)) goto LA7;
i_543822_839829468 = res_543908_839829468;
{
Tnode292802* LOC10;
LOC10 = (Tnode292802*)0;
LOC10 = HEX5BHEX5D_293238_850551059(n0, i_543822_839829468);
if (!!(((*LOC10).kind == ((Tnodekind292020) 3)))) goto LA11;
uselowering0 = NIM_TRUE;
goto LA5;
}
LA11: ;
res_543908_839829468 += ((NI) 1);
} LA7: ;
}
} LA5: ;
{
Tnode292802* LOC17;
if (!uselowering0) goto LA15;
LOC17 = (Tnode292802*)0;
LOC17 = lowertupleunpacking_433037_2218250499(n0, (*p0).prc);
genstmts_539244_839829468(p0, LOC17);
goto BeforeRet;
}
LA15: ;
genlinedir_532823_839829468(p0, n0);
initlocexpr_539283_839829468(p0, (*n0).kindU.S6.sons->data[(NI)(L0 - ((NI) 1))], (&tup0));
t0 = getuniquetype_528640_2036603609(tup0.t);
{
NI i_543846_839829468;
NI HEX3Atmp_543914_839829468;
NI res_543917_839829468;
i_543846_839829468 = (NI)0;
HEX3Atmp_543914_839829468 = (NI)0;
HEX3Atmp_543914_839829468 = (NI)(L0 - ((NI) 3));
res_543917_839829468 = ((NI) 0);
{
while (1) {
if (!(res_543917_839829468 <= HEX3Atmp_543914_839829468)) goto LA20;
i_543846_839829468 = res_543917_839829468;
{
Tsym292834* v0;
v0 = (*(*n0).kindU.S6.sons->data[i_543846_839829468]).kindU.S4.sym;
{
if (!(((*v0).flags &(1U<<((NU)(((Tsymflag292184) 23))&31U)))!=0)) goto LA24;
goto LA21;
}
LA24: ;
{
if (!(((*v0).flags &(1U<<((NU)(((Tsymflag292184) 3))&31U)))!=0)) goto LA28;
assignglobalvar_538819_839829468(p0, v0);
genobjectinit_538242_839829468(p0, ((Tcprocsection529011) 1), (*v0).typ, (*v0).loc, NIM_TRUE);
registergcroot_543762_839829468(p0, v0);
}
goto LA26;
LA28: ;
{
Tnode292802* LOC31;
NIM_BOOL LOC32;
assignlocalvar_538614_839829468(p0, v0);
LOC31 = (Tnode292802*)0;
LOC31 = HEX5BHEX5D_293238_850551059(n0, (NI)(L0 - ((NI) 1)));
LOC32 = (NIM_BOOL)0;
LOC32 = isassignedimmediately_543781_839829468(LOC31);
initlocalvar_538398_839829468(p0, v0, LOC32);
}
LA26: ;
initloc_532273_839829468((&field0), ((Tlockind292808) 6), (*t0).sons->data[i_543846_839829468], tup0.s);
{
TY532811 LOC37;
if (!((*t0).kind == ((Ttypekind292244) 18))) goto LA35;
memset((void*)LOC37, 0, sizeof(LOC37));
LOC37[0] = rdloc_538188_839829468(tup0);
LOC37[1] = rope_178401_2381377266(((NI64) (i_543846_839829468)));
field0.r = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_185), LOC37, 2);
}
goto LA33;
LA35: ;
{
TY532811 LOC43;
{
if (!!(((*(*(*t0).n).kindU.S6.sons->data[i_543846_839829468]).kind == ((Tnodekind292020) 3)))) goto LA41;
internalerror_196100_155036129((*n0).info, ((NimStringDesc*) &T839829468_592));
}
LA41: ;
memset((void*)LOC43, 0, sizeof(LOC43));
LOC43[0] = rdloc_538188_839829468(tup0);
LOC43[1] = manglerecfieldname_534361_839829468((*(*(*t0).n).kindU.S6.sons->data[i_543846_839829468]).kindU.S4.sym, t0);
field0.r = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_90), LOC43, 2);
}
LA33: ;
putlocintodest_539258_839829468(p0, (&(*v0).loc), field0);
} LA21: ;
res_543917_839829468 += ((NI) 1);
} LA20: ;
}
}
}BeforeRet: ;
}
N_NIMCALL(void, genvarstmt_544854_839829468)(Tcproc529021* p0, Tnode292802* n0) {
{
NI i_544869_839829468;
NI HEX3Atmp_544902_839829468;
NI LOC2;
NI res_544905_839829468;
i_544869_839829468 = (NI)0;
HEX3Atmp_544902_839829468 = (NI)0;
LOC2 = (NI)0;
LOC2 = sonslen_295351_850551059(n0);
HEX3Atmp_544902_839829468 = (NI)(LOC2 - ((NI) 1));
res_544905_839829468 = ((NI) 0);
{
while (1) {
if (!(res_544905_839829468 <= HEX3Atmp_544902_839829468)) goto LA4;
i_544869_839829468 = res_544905_839829468;
{
Tnode292802* a0;
a0 = (*n0).kindU.S6.sons->data[i_544869_839829468];
{
if (!((*a0).kind == ((Tnodekind292020) 125))) goto LA8;
goto LA5;
}
LA8: ;
{
if (!((*a0).kind == ((Tnodekind292020) 35))) goto LA12;
{
if (!((*(*a0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 3))) goto LA16;
gensinglevar_544276_839829468(p0, a0);
}
goto LA14;
LA16: ;
{
genclosurevar_544832_839829468(p0, a0);
}
LA14: ;
}
goto LA10;
LA12: ;
{
genvartuple_543794_839829468(p0, a0);
}
LA10: ;
} LA5: ;
res_544905_839829468 += ((NI) 1);
} LA4: ;
}
}
}
static N_INLINE(NIM_BOOL, emitlazily_532248_839829468)(Tsym292834* s0) {
NIM_BOOL result0;
NIM_BOOL LOC1;
Tsym292834* LOC3;
result0 = (NIM_BOOL)0;
LOC1 = (NIM_BOOL)0;
LOC1 = ((gglobaloptions_169130_2607990831 &((NU64)1<<((NU)(((Tglobaloption169013) 2))&63U)))!=0);
if (LOC1) goto LA2;
LOC3 = (Tsym292834*)0;
LOC3 = getmodule_299123_2984716966(s0);
LOC1 = (((*LOC3).flags &(1U<<((NU)(((Tsymflag292184) 25))&31U)))!=0);
LA2: ;
result0 = LOC1;
return result0;
}
N_NIMCALL(void, genconststmt_544909_839829468)(Tcproc529021* p0, Tnode292802* t0) {
{
NI i_544924_839829468;
NI HEX3Atmp_544975_839829468;
NI LOC2;
NI res_544978_839829468;
i_544924_839829468 = (NI)0;
HEX3Atmp_544975_839829468 = (NI)0;
LOC2 = (NI)0;
LOC2 = sonslen_295351_850551059(t0);
HEX3Atmp_544975_839829468 = (NI)(LOC2 - ((NI) 1));
res_544978_839829468 = ((NI) 0);
{
while (1) {
if (!(res_544978_839829468 <= HEX3Atmp_544975_839829468)) goto LA4;
i_544924_839829468 = res_544978_839829468;
{
Tnode292802* it0;
Tsym292834* c0;
it0 = (*t0).kindU.S6.sons->data[i_544924_839829468];
{
if (!((*it0).kind == ((Tnodekind292020) 125))) goto LA8;
goto LA5;
}
LA8: ;
{
if (!!(((*it0).kind == ((Tnodekind292020) 102)))) goto LA12;
internalerror_196100_155036129((*t0).info, ((NimStringDesc*) &T839829468_593));
}
LA12: ;
c0 = (*(*it0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym;
{
NIM_BOOL LOC16;
LOC16 = (NIM_BOOL)0;
LOC16 = containscompiletimeonly_328721_3876443242((*c0).typ);
if (!LOC16) goto LA17;
goto LA5;
}
goto LA14;
LA17: ;
{
NIM_BOOL LOC20;
NIM_BOOL LOC21;
NI LOC24;
LOC20 = (NIM_BOOL)0;
LOC21 = (NIM_BOOL)0;
LOC21 = ((*(*c0).typ).kind == ((Ttypekind292244) 4) || (*(*c0).typ).kind == ((Ttypekind292244) 16) || (*(*c0).typ).kind == ((Ttypekind292244) 19) || (*(*c0).typ).kind == ((Ttypekind292244) 18) || (*(*c0).typ).kind == ((Ttypekind292244) 24));
if (!(LOC21)) goto LA22;
LOC21 = !((((*c0).loc.flags &(1U<<((NU)(((Tlocflag292810) 3))&15U)))!=0));
LA22: ;
LOC20 = LOC21;
if (!(LOC20)) goto LA23;
LOC24 = (NI)0;
LOC24 = len_293081_850551059((*c0).ast);
LOC20 = !((LOC24 == ((NI) 0)));
LA23: ;
if (!LOC20) goto LA25;
{
NIM_BOOL LOC29;
LOC29 = (NIM_BOOL)0;
LOC29 = emitlazily_532248_839829468(c0);
if (!!(LOC29)) goto LA30;
requestconstimpl_539240_839829468(p0, c0);
}
LA30: ;
}
goto LA14;
LA25: ;
LA14: ;
} LA5: ;
res_544978_839829468 += ((NI) 1);
} LA4: ;
}
}
}
N_NIMCALL(void, gencasestringbranch_547100_839829468)(Tcproc529021* p0, Tnode292802* b0, Tloc292816 e0, Ropeobj178006* labl0, Ropeobj178006** branches0, NI branches0Len0) {
Tloc292816 x0;
NI length0;
memset((void*)(&x0), 0, sizeof(x0));
length0 = sonslen_295351_850551059(b0);
{
NI i_547122_839829468;
NI HEX3Atmp_547409_839829468;
NI res_547412_839829468;
i_547122_839829468 = (NI)0;
HEX3Atmp_547409_839829468 = (NI)0;
HEX3Atmp_547409_839829468 = (NI)(length0 - ((NI) 2));
res_547412_839829468 = ((NI) 0);
{
while (1) {
NI j0;
NI64 LOC4;
TY535238 LOC5;
if (!(res_547412_839829468 <= HEX3Atmp_547409_839829468)) goto LA3;
i_547122_839829468 = res_547412_839829468;
initlocexpr_539283_839829468(p0, (*b0).kindU.S6.sons->data[i_547122_839829468], (&x0));
LOC4 = (NI64)0;
LOC4 = hashstring_528100_2036603609((*(*b0).kindU.S6.sons->data[i_547122_839829468]).kindU.S3.strval);
j0 = ((NI) ((NI64)(LOC4 & ((NI64) ((branches0Len0-1))))));
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = rdloc_538188_839829468(e0);
LOC5[1] = rdloc_538188_839829468(x0);
LOC5[2] = labl0;
appcg_532632_839829468((*p0).module, &branches0[j0], ((NimStringDesc*) &T839829468_595), LOC5, 3);
res_547412_839829468 += ((NI) 1);
} LA3: ;
}
}
}
N_NIMCALL(void, exprblock_544103_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0) {
TY533289 LOC1;
NI LOC2;
memset((void*)LOC1, 0, sizeof(LOC1));
LOC2 = (NI)0;
LOC2 = startblock_543978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC1, 0);
expr_539248_839829468(p0, n0, d0);
endblock_544060_839829468(p0);
}
N_NIMCALL(Ropeobj178006*, gencasesecondpass_546965_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0, NI labid0, NI until0) {
Ropeobj178006* result0;
Ropeobj178006* lend0;
result0 = (Ropeobj178006*)0;
lend0 = getlabel_539217_839829468(p0);
{
NI i_546984_839829468;
NI res_547017_839829468;
i_546984_839829468 = (NI)0;
res_547017_839829468 = ((NI) 1);
{
while (1) {
TY178507 LOC10;
if (!(res_547017_839829468 <= until0)) goto LA3;
i_546984_839829468 = res_547017_839829468;
{
NIM_BOOL LOC6;
LOC6 = (NIM_BOOL)0;
LOC6 = ((*d0).k == ((Tlockind292808) 1));
if (!(LOC6)) goto LA7;
LOC6 = isemptytype_297440_850551059((*t0).typ);
LA7: ;
if (!LOC6) goto LA8;
(*d0).k = ((Tlockind292808) 0);
}
LA8: ;
memset((void*)LOC10, 0, sizeof(LOC10));
LOC10[0] = rope_178401_2381377266(((NI64) ((NI)(labid0 + i_546984_839829468))));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_599), LOC10, 1);
{
NI length0;
TY178507 LOC15;
if (!((*(*t0).kindU.S6.sons->data[i_546984_839829468]).kind == ((Tnodekind292020) 85))) goto LA13;
length0 = sonslen_295351_850551059((*t0).kindU.S6.sons->data[i_546984_839829468]);
exprblock_544103_839829468(p0, (*(*t0).kindU.S6.sons->data[i_546984_839829468]).kindU.S6.sons->data[(NI)(length0 - ((NI) 1))], d0);
memset((void*)LOC15, 0, sizeof(LOC15));
LOC15[0] = lend0;
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_556), LOC15, 1);
}
goto LA11;
LA13: ;
{
exprblock_544103_839829468(p0, (*(*t0).kindU.S6.sons->data[i_546984_839829468]).kindU.S6.sons->data[((NI) 0)], d0);
}
LA11: ;
res_547017_839829468 += ((NI) 1);
} LA3: ;
}
}
result0 = lend0;
return result0;
}
N_NIMCALL(void, gencasegenericbranch_546910_839829468)(Tcproc529021* p0, Tnode292802* b0, Tloc292816 e0, NimStringDesc* rangeformat0, NimStringDesc* eqformat0, Ropeobj178006* labl0) {
Tloc292816 x0;
Tloc292816 y0;
NI length0;
memset((void*)(&x0), 0, sizeof(x0));
memset((void*)(&y0), 0, sizeof(y0));
length0 = sonslen_295351_850551059(b0);
{
NI i_546932_839829468;
NI HEX3Atmp_546958_839829468;
NI res_546961_839829468;
i_546932_839829468 = (NI)0;
HEX3Atmp_546958_839829468 = (NI)0;
HEX3Atmp_546958_839829468 = (NI)(length0 - ((NI) 2));
res_546961_839829468 = ((NI) 0);
{
while (1) {
if (!(res_546961_839829468 <= HEX3Atmp_546958_839829468)) goto LA3;
i_546932_839829468 = res_546961_839829468;
{
TY535235 LOC8;
if (!((*(*b0).kindU.S6.sons->data[i_546932_839829468]).kind == ((Tnodekind292020) 44))) goto LA6;
initlocexpr_539283_839829468(p0, (*(*b0).kindU.S6.sons->data[i_546932_839829468]).kindU.S6.sons->data[((NI) 0)], (&x0));
initlocexpr_539283_839829468(p0, (*(*b0).kindU.S6.sons->data[i_546932_839829468]).kindU.S6.sons->data[((NI) 1)], (&y0));
memset((void*)LOC8, 0, sizeof(LOC8));
LOC8[0] = rdcharloc_538227_839829468(e0);
LOC8[1] = rdcharloc_538227_839829468(x0);
LOC8[2] = rdcharloc_538227_839829468(y0);
LOC8[3] = labl0;
linecg_532707_839829468(p0, ((Tcprocsection529011) 2), rangeformat0, LOC8, 4);
}
goto LA4;
LA6: ;
{
TY535238 LOC10;
initlocexpr_539283_839829468(p0, (*b0).kindU.S6.sons->data[i_546932_839829468], (&x0));
memset((void*)LOC10, 0, sizeof(LOC10));
LOC10[0] = rdcharloc_538227_839829468(e0);
LOC10[1] = rdcharloc_538227_839829468(x0);
LOC10[2] = labl0;
linecg_532707_839829468(p0, ((Tcprocsection529011) 2), eqformat0, LOC10, 3);
}
LA4: ;
res_546961_839829468 += ((NI) 1);
} LA3: ;
}
}
}
N_NIMCALL(Ropeobj178006*, genifforcaseuntil_547021_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0, NimStringDesc* rangeformat0, NimStringDesc* eqformat0, NI until0, Tloc292816 a0) {
Ropeobj178006* result0;
NI labid0;
result0 = (Ropeobj178006*)0;
labid0 = (*p0).labels;
{
NI i_547042_839829468;
NI res_547083_839829468;
i_547042_839829468 = (NI)0;
res_547083_839829468 = ((NI) 1);
{
while (1) {
if (!(res_547083_839829468 <= until0)) goto LA3;
i_547042_839829468 = res_547083_839829468;
(*p0).labels += ((NI) 1);
{
Ropeobj178006* LOC8;
Ropeobj178006* LOC9;
if (!((*(*t0).kindU.S6.sons->data[i_547042_839829468]).kind == ((Tnodekind292020) 85))) goto LA6;
LOC8 = (Ropeobj178006*)0;
LOC8 = rope_178401_2381377266(((NI64) ((*p0).labels)));
LOC9 = (Ropeobj178006*)0;
LOC9 = HEX26_178452_2381377266(((NimStringDesc*) &T839829468_296), LOC8);
gencasegenericbranch_546910_839829468(p0, (*t0).kindU.S6.sons->data[i_547042_839829468], a0, rangeformat0, eqformat0, LOC9);
}
goto LA4;
LA6: ;
{
TY178507 LOC11;
memset((void*)LOC11, 0, sizeof(LOC11));
LOC11[0] = rope_178401_2381377266(((NI64) ((*p0).labels)));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_598), LOC11, 1);
}
LA4: ;
res_547083_839829468 += ((NI) 1);
} LA3: ;
}
}
{
NI LOC14;
NI gototarget0;
TY178507 LOC17;
TY178507 LOC18;
LOC14 = (NI)0;
LOC14 = len_293081_850551059(t0);
if (!(until0 < (NI)(LOC14 - ((NI) 1)))) goto LA15;
(*p0).labels += ((NI) 1);
gototarget0 = (*p0).labels;
memset((void*)LOC17, 0, sizeof(LOC17));
LOC17[0] = rope_178401_2381377266(((NI64) (gototarget0)));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_598), LOC17, 1);
result0 = gencasesecondpass_546965_839829468(p0, t0, d0, ((NI) (labid0)), until0);
memset((void*)LOC18, 0, sizeof(LOC18));
LOC18[0] = rope_178401_2381377266(((NI64) (gototarget0)));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_599), LOC18, 1);
}
goto LA12;
LA15: ;
{
result0 = gencasesecondpass_546965_839829468(p0, t0, d0, ((NI) (labid0)), until0);
}
LA12: ;
return result0;
}
N_NIMCALL(void, gencasegeneric_547087_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0, NimStringDesc* rangeformat0, NimStringDesc* eqformat0) {
Tloc292816 a0;
Ropeobj178006* lend0;
NI LOC1;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 0)], (&a0));
LOC1 = (NI)0;
LOC1 = sonslen_295351_850551059(t0);
lend0 = genifforcaseuntil_547021_839829468(p0, t0, d0, rangeformat0, eqformat0, (NI)(LOC1 - ((NI) 1)), a0);
fixlabel_539230_839829468(p0, lend0);
}
N_NIMCALL(void, genstringcase_547416_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0) {
NI strings0;
strings0 = ((NI) 0);
{
NI i_547434_839829468;
NI HEX3Atmp_547549_839829468;
NI LOC2;
NI res_547552_839829468;
i_547434_839829468 = (NI)0;
HEX3Atmp_547549_839829468 = (NI)0;
LOC2 = (NI)0;
LOC2 = sonslen_295351_850551059(t0);
HEX3Atmp_547549_839829468 = (NI)(LOC2 - ((NI) 1));
res_547552_839829468 = ((NI) 1);
{
while (1) {
if (!(res_547552_839829468 <= HEX3Atmp_547549_839829468)) goto LA4;
i_547434_839829468 = res_547552_839829468;
{
NI LOC9;
if (!((*(*t0).kindU.S6.sons->data[i_547434_839829468]).kind == ((Tnodekind292020) 85))) goto LA7;
LOC9 = (NI)0;
LOC9 = sonslen_295351_850551059((*t0).kindU.S6.sons->data[i_547434_839829468]);
strings0 += (NI)(LOC9 - ((NI) 1));
}
LA7: ;
res_547552_839829468 += ((NI) 1);
} LA4: ;
}
}
{
NI bitmask0;
NI LOC14;
TY191350* branches0;
Tloc292816 a0;
NI labid0;
TY532811 LOC26;
TY533289 LOC35;
Ropeobj178006* lend0;
NI LOC42;
if (!(((NI) 8) < strings0)) goto LA12;
LOC14 = (NI)0;
LOC14 = nextpoweroftwo_101629_1009420244(strings0);
bitmask0 = (NI)(LOC14 - ((NI) 1));
branches0 = (TY191350*)0;
branches0 = (TY191350*) newSeq((&NTI191350), ((NI) ((NI)(bitmask0 + ((NI) 1)))));
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 0)], (&a0));
labid0 = (*p0).labels;
{
NI i_547483_839829468;
NI HEX3Atmp_547559_839829468;
NI LOC16;
NI res_547562_839829468;
i_547483_839829468 = (NI)0;
HEX3Atmp_547559_839829468 = (NI)0;
LOC16 = (NI)0;
LOC16 = sonslen_295351_850551059(t0);
HEX3Atmp_547559_839829468 = (NI)(LOC16 - ((NI) 1));
res_547562_839829468 = ((NI) 1);
{
while (1) {
if (!(res_547562_839829468 <= HEX3Atmp_547559_839829468)) goto LA18;
i_547483_839829468 = res_547562_839829468;
(*p0).labels += ((NI) 1);
{
Ropeobj178006* LOC23;
Ropeobj178006* LOC24;
if (!((*(*t0).kindU.S6.sons->data[i_547483_839829468]).kind == ((Tnodekind292020) 85))) goto LA21;
LOC23 = (Ropeobj178006*)0;
LOC23 = rope_178401_2381377266(((NI64) ((*p0).labels)));
LOC24 = (Ropeobj178006*)0;
LOC24 = HEX26_178452_2381377266(((NimStringDesc*) &T839829468_296), LOC23);
gencasestringbranch_547100_839829468(p0, (*t0).kindU.S6.sons->data[i_547483_839829468], a0, LOC24, branches0->data, branches0->Sup.len);
}
goto LA19;
LA21: ;
{
}
LA19: ;
res_547562_839829468 += ((NI) 1);
} LA18: ;
}
}
memset((void*)LOC26, 0, sizeof(LOC26));
LOC26[0] = rdloc_538188_839829468(a0);
LOC26[1] = rope_178401_2381377266(((NI64) (bitmask0)));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_596), LOC26, 2);
{
NI j_547517_839829468;
NI HEX3Atmp_547567_839829468;
NI res_547570_839829468;
j_547517_839829468 = (NI)0;
HEX3Atmp_547567_839829468 = (NI)0;
HEX3Atmp_547567_839829468 = (branches0 ? (branches0->Sup.len-1) : -1);
res_547570_839829468 = ((NI) 0);
{
while (1) {
if (!(res_547570_839829468 <= HEX3Atmp_547567_839829468)) goto LA29;
j_547517_839829468 = res_547570_839829468;
{
TY532811 LOC34;
if (!!((branches0->data[j_547517_839829468] == NIM_NIL))) goto LA32;
memset((void*)LOC34, 0, sizeof(LOC34));
LOC34[0] = intliteral_539270_839829468(((NI64) (j_547517_839829468)));
LOC34[1] = branches0->data[j_547517_839829468];
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_597), LOC34, 2);
}
LA32: ;
res_547570_839829468 += ((NI) 1);
} LA29: ;
}
}
memset((void*)LOC35, 0, sizeof(LOC35));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_160), LOC35, 0);
{
NI LOC38;
TY178507 LOC41;
LOC38 = (NI)0;
LOC38 = sonslen_295351_850551059(t0);
if (!!(((*(*t0).kindU.S6.sons->data[(NI)(LOC38 - ((NI) 1))]).kind == ((Tnodekind292020) 85)))) goto LA39;
memset((void*)LOC41, 0, sizeof(LOC41));
LOC41[0] = rope_178401_2381377266(((NI64) ((*p0).labels)));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_598), LOC41, 1);
}
LA39: ;
LOC42 = (NI)0;
LOC42 = sonslen_295351_850551059(t0);
lend0 = gencasesecondpass_546965_839829468(p0, t0, d0, ((NI) (labid0)), (NI)(LOC42 - ((NI) 1)));
fixlabel_539230_839829468(p0, lend0);
}
goto LA10;
LA12: ;
{
gencasegeneric_547087_839829468(p0, t0, d0, ((NimStringDesc*) &T839829468_490), ((NimStringDesc*) &T839829468_595));
}
LA10: ;
}
N_NIMCALL(void, gengotoforcase_545673_839829468)(Tcproc529021* p0, Tnode292802* casestmt0) {
{ {
NI i_545695_839829468;
NI HEX3Atmp_545737_839829468;
NI LOC2;
NI res_545740_839829468;
i_545695_839829468 = (NI)0;
HEX3Atmp_545737_839829468 = (NI)0;
LOC2 = (NI)0;
LOC2 = len_293081_850551059(casestmt0);
HEX3Atmp_545737_839829468 = (LOC2 - 1);
res_545740_839829468 = ((NI) 1);
{
while (1) {
TY533289 LOC5;
NI LOC6;
Tnode292802* it0;
Tnode292802* LOC16;
if (!(res_545740_839829468 <= HEX3Atmp_545737_839829468)) goto LA4;
i_545695_839829468 = res_545740_839829468;
memset((void*)LOC5, 0, sizeof(LOC5));
LOC6 = (NI)0;
LOC6 = startblock_543978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC5, 0);
it0 = (*casestmt0).kindU.S6.sons->data[i_545695_839829468];
{
NI j_545711_839829468;
NI HEX3Atmp_545730_839829468;
NI LOC8;
NI res_545733_839829468;
j_545711_839829468 = (NI)0;
HEX3Atmp_545730_839829468 = (NI)0;
LOC8 = (NI)0;
LOC8 = len_293081_850551059(it0);
HEX3Atmp_545730_839829468 = (NI)(LOC8 - ((NI) 2));
res_545733_839829468 = ((NI) 0);
{
while (1) {
NI64 val0;
TY178507 LOC15;
if (!(res_545733_839829468 <= HEX3Atmp_545730_839829468)) goto LA10;
j_545711_839829468 = res_545733_839829468;
{
if (!((*(*it0).kindU.S6.sons->data[j_545711_839829468]).kind == ((Tnodekind292020) 44))) goto LA13;
localerror_196085_155036129((*it0).info, ((NimStringDesc*) &T839829468_579));
goto BeforeRet;
}
LA13: ;
val0 = getordvalue_320129_3876443242((*it0).kindU.S6.sons->data[j_545711_839829468]);
memset((void*)LOC15, 0, sizeof(LOC15));
LOC15[0] = rope_178401_2381377266(val0);
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_602), LOC15, 1);
res_545733_839829468 += ((NI) 1);
} LA10: ;
}
}
LOC16 = (Tnode292802*)0;
LOC16 = lastson_295364_850551059(it0);
genstmts_539244_839829468(p0, LOC16);
endblock_544060_839829468(p0);
res_545740_839829468 += ((NI) 1);
} LA4: ;
}
}
}BeforeRet: ;
}
N_NIMCALL(NIM_BOOL, branchhastoobigrange_547575_839829468)(Tnode292802* b0) {
NIM_BOOL result0;
{ result0 = (NIM_BOOL)0;
{
NI i_547590_839829468;
NI HEX3Atmp_547608_839829468;
NI LOC2;
NI res_547611_839829468;
i_547590_839829468 = (NI)0;
HEX3Atmp_547608_839829468 = (NI)0;
LOC2 = (NI)0;
LOC2 = sonslen_295351_850551059(b0);
HEX3Atmp_547608_839829468 = (NI)(LOC2 - ((NI) 2));
res_547611_839829468 = ((NI) 0);
{
while (1) {
if (!(res_547611_839829468 <= HEX3Atmp_547608_839829468)) goto LA4;
i_547590_839829468 = res_547611_839829468;
{
NIM_BOOL LOC7;
LOC7 = (NIM_BOOL)0;
LOC7 = ((*(*b0).kindU.S6.sons->data[i_547590_839829468]).kind == ((Tnodekind292020) 44));
if (!(LOC7)) goto LA8;
LOC7 = (IL64(256) < (NI64)((*(*(*b0).kindU.S6.sons->data[i_547590_839829468]).kindU.S6.sons->data[((NI) 1)]).kindU.S1.intval - (*(*(*b0).kindU.S6.sons->data[i_547590_839829468]).kindU.S6.sons->data[((NI) 0)]).kindU.S1.intval));
LA8: ;
if (!LOC7) goto LA9;
result0 = NIM_TRUE;
goto BeforeRet;
}
LA9: ;
res_547611_839829468 += ((NI) 1);
} LA4: ;
}
}
}BeforeRet: ;
return result0;
}
N_NIMCALL(NI, ifswitchsplitpoint_547615_839829468)(Tcproc529021* p0, Tnode292802* n0) {
NI result0;
result0 = (NI)0;
{
NI i_547630_839829468;
NI HEX3Atmp_547654_839829468;
NI LOC2;
NI res_547657_839829468;
i_547630_839829468 = (NI)0;
HEX3Atmp_547654_839829468 = (NI)0;
LOC2 = (NI)0;
LOC2 = len_293081_850551059(n0);
HEX3Atmp_547654_839829468 = (NI)(LOC2 - ((NI) 1));
res_547657_839829468 = ((NI) 1);
{
while (1) {
Tnode292802* branch0;
Tnode292802* stmtblock0;
if (!(res_547657_839829468 <= HEX3Atmp_547654_839829468)) goto LA4;
i_547630_839829468 = res_547657_839829468;
branch0 = HEX5BHEX5D_293238_850551059(n0, i_547630_839829468);
stmtblock0 = lastson_295364_850551059(branch0);
{
NIM_BOOL LOC7;
LOC7 = (NIM_BOOL)0;
LOC7 = stmtscontainpragma_528083_2036603609(stmtblock0, ((Tspecialword275003) 181));
if (!LOC7) goto LA8;
result0 = i_547630_839829468;
}
goto LA5;
LA8: ;
{
if (!!(((Cc_273413_2528170400[(ccompiler_273431_2528170400)- 1].Field20 &(1U<<((NU)(((Tinfoccprop273004) 0))&7U)))!=0))) goto LA11;
{
NIM_BOOL LOC15;
LOC15 = (NIM_BOOL)0;
LOC15 = ((*branch0).kind == ((Tnodekind292020) 85));
if (!(LOC15)) goto LA16;
LOC15 = branchhastoobigrange_547575_839829468(branch0);
LA16: ;
if (!LOC15) goto LA17;
result0 = i_547630_839829468;
}
LA17: ;
}
goto LA5;
LA11: ;
LA5: ;
res_547657_839829468 += ((NI) 1);
} LA4: ;
}
}
return result0;
}
N_NIMCALL(void, genordinalcase_547724_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0) {
NI splitpoint0;
Tloc292816 a0;
Ropeobj178006* lend0;
splitpoint0 = ifswitchsplitpoint_547615_839829468(p0, n0);
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0));
{
if (!(((NI) 0) < splitpoint0)) goto LA3;
lend0 = genifforcaseuntil_547021_839829468(p0, n0, d0, ((NimStringDesc*) &T839829468_600), ((NimStringDesc*) &T839829468_601), splitpoint0, a0);
}
goto LA1;
LA3: ;
{
lend0 = NIM_NIL;
}
LA1: ;
{
NI LOC8;
TY178507 LOC11;
NIM_BOOL hasdefault0;
TY533289 LOC37;
LOC8 = (NI)0;
LOC8 = len_293081_850551059(n0);
if (!((NI)(splitpoint0 + ((NI) 1)) < LOC8)) goto LA9;
memset((void*)LOC11, 0, sizeof(LOC11));
LOC11[0] = rdcharloc_538227_839829468(a0);
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_603), LOC11, 1);
hasdefault0 = NIM_FALSE;
{
NI i_547757_839829468;
NI HEX3Atmp_547816_839829468;
NI HEX3Atmp_547817_839829468;
NI LOC13;
NI res_547820_839829468;
i_547757_839829468 = (NI)0;
HEX3Atmp_547816_839829468 = (NI)0;
HEX3Atmp_547817_839829468 = (NI)0;
HEX3Atmp_547816_839829468 = (NI)(splitpoint0 + ((NI) 1));
LOC13 = (NI)0;
LOC13 = len_293081_850551059(n0);
HEX3Atmp_547817_839829468 = (LOC13 - 1);
res_547820_839829468 = HEX3Atmp_547816_839829468;
{
while (1) {
Tnode292802* branch0;
Tnode292802* LOC28;
TY533289 LOC29;
if (!(res_547820_839829468 <= HEX3Atmp_547817_839829468)) goto LA15;
i_547757_839829468 = res_547820_839829468;
{
NIM_BOOL LOC18;
LOC18 = (NIM_BOOL)0;
LOC18 = ((*d0).k == ((Tlockind292808) 1));
if (!(LOC18)) goto LA19;
LOC18 = isemptytype_297440_850551059((*n0).typ);
LA19: ;
if (!LOC18) goto LA20;
(*d0).k = ((Tlockind292808) 0);
}
LA20: ;
branch0 = HEX5BHEX5D_293238_850551059(n0, i_547757_839829468);
{
if (!((*branch0).kind == ((Tnodekind292020) 85))) goto LA24;
gencaserange_537028_839829468(p0, branch0);
}
goto LA22;
LA24: ;
{
TY533289 LOC27;
memset((void*)LOC27, 0, sizeof(LOC27));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_181), LOC27, 0);
hasdefault0 = NIM_TRUE;
}
LA22: ;
LOC28 = (Tnode292802*)0;
LOC28 = lastson_295364_850551059(branch0);
exprblock_544103_839829468(p0, LOC28, d0);
memset((void*)LOC29, 0, sizeof(LOC29));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_182), LOC29, 0);
res_547820_839829468 += ((NI) 1);
} LA15: ;
}
}
{
NIM_BOOL LOC32;
TY533289 LOC36;
LOC32 = (NIM_BOOL)0;
LOC32 = ((Cc_273413_2528170400[(ccompiler_273431_2528170400)- 1].Field20 &(1U<<((NU)(((Tinfoccprop273004) 3))&7U)))!=0);
if (!(LOC32)) goto LA33;
LOC32 = !(hasdefault0);
LA33: ;
if (!LOC32) goto LA34;
memset((void*)LOC36, 0, sizeof(LOC36));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_604), LOC36, 0);
}
LA34: ;
memset((void*)LOC37, 0, sizeof(LOC37));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_160), LOC37, 0);
}
LA9: ;
{
if (!!((lend0 == NIM_NIL))) goto LA40;
fixlabel_539230_839829468(p0, lend0);
}
LA40: ;
}
N_NIMCALL(void, gencase_547826_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0) {
Ttype292840* LOC8;
genlinedir_532823_839829468(p0, t0);
{
NIM_BOOL LOC3;
NIM_BOOL LOC4;
LOC3 = (NIM_BOOL)0;
LOC4 = (NIM_BOOL)0;
LOC4 = isemptytype_297440_850551059((*t0).typ);
LOC3 = !(LOC4);
if (!(LOC3)) goto LA5;
LOC3 = ((*d0).k == ((Tlockind292808) 0));
LA5: ;
if (!LOC3) goto LA6;
gettemp_537032_839829468(p0, (*t0).typ, d0, NIM_FALSE);
}
LA6: ;
LOC8 = (Ttype292840*)0;
LOC8 = skiptypes_296099_850551059((*(*t0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106242013440));
switch ((*LOC8).kind) {
case ((Ttypekind292244) 28):
{
genstringcase_547416_839829468(p0, t0, d0);
}
break;
case ((Ttypekind292244) 36) ... ((Ttypekind292244) 39):
{
gencasegeneric_547087_839829468(p0, t0, d0, ((NimStringDesc*) &T839829468_600), ((NimStringDesc*) &T839829468_601));
}
break;
default:
{
{
NIM_BOOL LOC14;
LOC14 = (NIM_BOOL)0;
LOC14 = ((*(*t0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 3));
if (!(LOC14)) goto LA15;
LOC14 = (((*(*(*t0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).flags &(1U<<((NU)(((Tsymflag292184) 30))&31U)))!=0);
LA15: ;
if (!LOC14) goto LA16;
gengotoforcase_545673_839829468(p0, t0);
}
goto LA12;
LA16: ;
{
genordinalcase_547724_839829468(p0, t0, d0);
}
LA12: ;
}
break;
}
}
static N_INLINE(Tnode292802*, pop_318246_1689653243)(Tnodeseq292796** s0) {
Tnode292802* result0;
NI L0;
result0 = (Tnode292802*)0;
L0 = (NI)(((*s0) ? (*s0)->Sup.len : 0) - ((NI) 1));
result0 = (*s0)->data[L0];
(*s0) = (Tnodeseq292796*) setLengthSeq(&((*s0))->Sup, sizeof(Tnode292802*), ((NI) (L0)));
return result0;
}
N_NIMCALL(void, blockleaveactions_545442_839829468)(Tcproc529021* p0, NI howmanytrys0, NI howmanyexcepts0) {
Tnodeseq292796* stack0;
NI alreadypoppedcnt0;
stack0 = (Tnodeseq292796*)0;
stack0 = (Tnodeseq292796*) newSeq((&NTI292796), ((NI) 0));
alreadypoppedcnt0 = (*p0).inexceptblock;
{
NI i_545471_839829468;
NI res_545596_839829468;
i_545471_839829468 = (NI)0;
res_545596_839829468 = ((NI) 1);
{
while (1) {
Tnode292802* trystmt0;
Tnode292802* finallystmt0;
if (!(res_545596_839829468 <= howmanytrys0)) goto LA3;
i_545471_839829468 = res_545596_839829468;
{
NIM_BOOL LOC6;
LOC6 = (NIM_BOOL)0;
LOC6 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC6) goto LA7;
LOC6 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA7: ;
if (!!(LOC6)) goto LA8;
{
if (!(((NI) 0) < alreadypoppedcnt0)) goto LA12;
alreadypoppedcnt0 -= ((NI) 1);
}
goto LA10;
LA12: ;
{
TY533289 LOC15;
memset((void*)LOC15, 0, sizeof(LOC15));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_605), LOC15, 0);
}
LA10: ;
}
LA8: ;
trystmt0 = pop_318246_1689653243((&(*p0).nestedtrystmts));
stack0 = (Tnodeseq292796*) incrSeqV2(&(stack0)->Sup, sizeof(Tnode292802*));
asgnRefNoCycle((void**) (&stack0->data[stack0->Sup.len]), trystmt0);
++stack0->Sup.len;
finallystmt0 = lastson_295364_850551059(trystmt0);
{
if (!((*finallystmt0).kind == ((Tnodekind292020) 107))) goto LA18;
genstmts_539244_839829468(p0, (*finallystmt0).kindU.S6.sons->data[((NI) 0)]);
}
LA18: ;
res_545596_839829468 += ((NI) 1);
} LA3: ;
}
}
{
NI i_545546_839829468;
NI HEX3Atmp_545601_839829468;
NI res_545604_839829468;
i_545546_839829468 = (NI)0;
HEX3Atmp_545601_839829468 = (NI)0;
HEX3Atmp_545601_839829468 = (NI)(howmanytrys0 - ((NI) 1));
res_545604_839829468 = HEX3Atmp_545601_839829468;
{
while (1) {
if (!(((NI) 0) <= res_545604_839829468)) goto LA22;
i_545546_839829468 = res_545604_839829468;
(*p0).nestedtrystmts = (Tnodeseq292796*) incrSeqV2(&((*p0).nestedtrystmts)->Sup, sizeof(Tnode292802*));
asgnRefNoCycle((void**) (&(*p0).nestedtrystmts->data[(*p0).nestedtrystmts->Sup.len]), stack0->data[i_545546_839829468]);
++(*p0).nestedtrystmts->Sup.len;
res_545604_839829468 -= ((NI) 1);
} LA22: ;
}
}
{
NIM_BOOL LOC25;
LOC25 = (NIM_BOOL)0;
LOC25 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC25) goto LA26;
LOC25 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA26: ;
if (!!(LOC25)) goto LA27;
{
NI i_545587_839829468;
NI HEX3Atmp_545610_839829468;
NI res_545613_839829468;
i_545587_839829468 = (NI)0;
HEX3Atmp_545610_839829468 = (NI)0;
HEX3Atmp_545610_839829468 = (NI)(howmanyexcepts0 - ((NI) 1));
res_545613_839829468 = HEX3Atmp_545610_839829468;
{
while (1) {
TY533289 LOC32;
if (!(((NI) 0) <= res_545613_839829468)) goto LA31;
i_545587_839829468 = res_545613_839829468;
memset((void*)LOC32, 0, sizeof(LOC32));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_606), LOC32, 0);
res_545613_839829468 -= ((NI) 1);
} LA31: ;
}
}
}
LA27: ;
}
N_NIMCALL(void, genreturnstmt_545617_839829468)(Tcproc529021* p0, Tnode292802* t0) {
TY533289 LOC14;
{ {
if (!(((*t0).flags &(1U<<((NU)(((Tnodeflag292427) 14))&15U)))!=0)) goto LA3;
goto BeforeRet;
}
LA3: ;
(*p0).beforeretneeded = NIM_TRUE;
genlinedir_532823_839829468(p0, t0);
{
if (!!(((*(*t0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 1)))) goto LA7;
genstmts_539244_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 0)]);
}
LA7: ;
blockleaveactions_545442_839829468(p0, ((*p0).nestedtrystmts ? (*p0).nestedtrystmts->Sup.len : 0), (*p0).inexceptblock);
{
Ropeobj178006* safepoint0;
TY178507 LOC13;
if (!(((NI) 0) < ((*p0).finallysafepoints ? (*p0).finallysafepoints->Sup.len : 0))) goto LA11;
safepoint0 = (*p0).finallysafepoints->data[(NI)(((*p0).finallysafepoints ? (*p0).finallysafepoints->Sup.len : 0) - ((NI) 1))];
memset((void*)LOC13, 0, sizeof(LOC13));
LOC13[0] = safepoint0;
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_607), LOC13, 1);
}
LA11: ;
memset((void*)LOC14, 0, sizeof(LOC14));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_608), LOC14, 0);
}BeforeRet: ;
}
N_NIMCALL(void, genbreakstmt_546444_839829468)(Tcproc529021* p0, Tnode292802* t0) {
NI idx0;
Ropeobj178006* label0;
TY178507 LOC16;
idx0 = (*p0).breakidx;
{
Tsym292834* sym0;
if (!!(((*(*t0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 1)))) goto LA3;
sym0 = (*(*t0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym;
idx0 = (NI)((*sym0).position - ((NI) 1));
}
goto LA1;
LA3: ;
{
{
while (1) {
NIM_BOOL LOC8;
LOC8 = (NIM_BOOL)0;
LOC8 = (((NI) 0) <= idx0);
if (!(LOC8)) goto LA9;
LOC8 = !((*p0).blocks->data[idx0].isloop);
LA9: ;
if (!LOC8) goto LA7;
idx0 -= ((NI) 1);
} LA7: ;
}
{
NIM_BOOL LOC12;
LOC12 = (NIM_BOOL)0;
LOC12 = (idx0 < ((NI) 0));
if (LOC12) goto LA13;
LOC12 = !((*p0).blocks->data[idx0].isloop);
LA13: ;
if (!LOC12) goto LA14;
internalerror_196100_155036129((*t0).info, ((NimStringDesc*) &T839829468_609));
}
LA14: ;
}
LA1: ;
label0 = assignlabel_544020_839829468((&(*p0).blocks->data[idx0]));
blockleaveactions_545442_839829468(p0, (NI)(((*p0).nestedtrystmts ? (*p0).nestedtrystmts->Sup.len : 0) - ((NI) ((*p0).blocks->data[idx0].nestedtrystmts))), (NI)((*p0).inexceptblock - ((NI) ((*p0).blocks->data[idx0].nestedexceptstmts))));
genlinedir_532823_839829468(p0, t0);
memset((void*)LOC16, 0, sizeof(LOC16));
LOC16[0] = label0;
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_556), LOC16, 1);
}
N_NIMCALL(NIM_BOOL, fielddiscriminantcheckneeded_549080_839829468)(Tcproc529021* p0, Tnode292802* asgn0) {
NIM_BOOL result0;
result0 = (NIM_BOOL)0;
{
Tnode292802* le0;
if (!(((*p0).options &(1U<<((NU)(((Toption169009) 2))&31U)))!=0)) goto LA3;
le0 = (*asgn0).kindU.S6.sons->data[((NI) 0)];
{
Tsym292834* field0;
if (!((*le0).kind == ((Tnodekind292020) 46))) goto LA7;
field0 = (*(*(*le0).kindU.S6.sons->data[((NI) 0)]).kindU.S6.sons->data[((NI) 1)]).kindU.S4.sym;
result0 = (((*field0).flags &(1U<<((NU)(((Tsymflag292184) 18))&31U)))!=0);
}
goto LA5;
LA7: ;
{
Tsym292834* field0;
if (!((*le0).kind == ((Tnodekind292020) 45))) goto LA10;
field0 = (*(*le0).kindU.S6.sons->data[((NI) 1)]).kindU.S4.sym;
result0 = (((*field0).flags &(1U<<((NU)(((Tsymflag292184) 18))&31U)))!=0);
}
goto LA5;
LA10: ;
LA5: ;
}
LA3: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, discriminatortabledecl_536094_839829468)(Tcgen529027* m0, Ttype292840* objtype0, Tsym292834* d0) {
Ropeobj178006* result0;
Ropeobj178006* LOC1;
Ropeobj178006* tmp0;
TY532811 LOC2;
NI64 LOC3;
result0 = (Ropeobj178006*)0;
LOC1 = (Ropeobj178006*)0;
LOC1 = cgsym_532403_839829468(m0, ((NimStringDesc*) &T839829468_130));
tmp0 = discriminatortablename_536057_839829468(m0, objtype0, d0);
memset((void*)LOC2, 0, sizeof(LOC2));
LOC2[0] = tmp0;
LOC3 = (NI64)0;
LOC3 = lengthord_320007_3876443242((*d0).typ);
LOC2[1] = rope_178401_2381377266((NI64)(LOC3 + IL64(1)));
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_203), LOC2, 2);
return result0;
}
N_NIMCALL(void, gendiscriminantcheck_549144_839829468)(Tcproc529021* p0, Tloc292816 a0, Tloc292816 tmp0, Ttype292840* objtype0, Tsym292834* field0) {
Ttype292840* t0;
Ropeobj178006* LOC1;
NI64 L0;
TY535235 LOC8;
t0 = skiptypes_296099_850551059(objtype0, IL64(211106240964864));
LOC1 = (Ropeobj178006*)0;
LOC1 = gentypeinfo_535941_839829468((*p0).module, t0);
L0 = lengthord_320007_3876443242((*field0).typ);
{
NIM_BOOL LOC4;
TY178507 LOC7;
LOC4 = (NIM_BOOL)0;
LOC4 = containsorincl_268862_2627731572((&(*(*p0).module).declaredthings), (*field0).Sup.id);
if (!!(LOC4)) goto LA5;
memset((void*)LOC7, 0, sizeof(LOC7));
LOC7[0] = discriminatortabledecl_536094_839829468((*p0).module, t0, field0);
appcg_532640_839829468((*p0).module, ((Tcfilesection529005) 9), ((NimStringDesc*) &T839829468_610), LOC7, 1);
}
LA5: ;
memset((void*)LOC8, 0, sizeof(LOC8));
LOC8[0] = rdloc_538188_839829468(a0);
LOC8[1] = rdloc_538188_839829468(tmp0);
LOC8[2] = discriminatortablename_536057_839829468((*p0).module, t0, field0);
LOC8[3] = intliteral_539270_839829468((NI64)(L0 + IL64(1)));
linecg_532707_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_611), LOC8, 4);
}
N_NIMCALL(void, asgnfielddiscriminant_549209_839829468)(Tcproc529021* p0, Tnode292802* e0) {
Tloc292816 a0;
Tloc292816 tmp0;
Tnode292802* dotexpr0;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&tmp0), 0, sizeof(tmp0));
dotexpr0 = (*e0).kindU.S6.sons->data[((NI) 0)];
{
if (!((*dotexpr0).kind == ((Tnodekind292020) 46))) goto LA3;
dotexpr0 = (*dotexpr0).kindU.S6.sons->data[((NI) 0)];
}
LA3: ;
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], (&a0));
gettemp_537032_839829468(p0, a0.t, (&tmp0), NIM_FALSE);
expr_539248_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&tmp0));
gendiscriminantcheck_549144_839829468(p0, a0, tmp0, (*(*dotexpr0).kindU.S6.sons->data[((NI) 0)]).typ, (*(*dotexpr0).kindU.S6.sons->data[((NI) 1)]).kindU.S4.sym);
genassignment_539264_839829468(p0, a0, tmp0, 0);
}
N_NIMCALL(void, genasgn_549239_839829468)(Tcproc529021* p0, Tnode292802* e0, NIM_BOOL fastasgn0) {
genlinedir_532823_839829468(p0, e0);
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = ((*(*e0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 3));
if (!(LOC3)) goto LA4;
LOC3 = (((*(*(*e0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).flags &(1U<<((NU)(((Tsymflag292184) 30))&31U)))!=0);
LA4: ;
if (!LOC3) goto LA5;
gengotovar_544258_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)]);
}
goto LA1;
LA5: ;
{
NIM_BOOL LOC8;
Tloc292816 a0;
LOC8 = (NIM_BOOL)0;
LOC8 = fielddiscriminantcheckneeded_549080_839829468(p0, e0);
if (!!(LOC8)) goto LA9;
memset((void*)(&a0), 0, sizeof(a0));
{
Tnode292802* LOC13;
Tnode292802* LOC16;
LOC13 = (Tnode292802*)0;
LOC13 = HEX5BHEX5D_293238_850551059(e0, ((NI) 0));
if (!((*LOC13).kind == ((Tnodekind292020) 47) || (*LOC13).kind == ((Tnodekind292020) 65))) goto LA14;
LOC16 = (Tnode292802*)0;
LOC16 = HEX5BHEX5D_293238_850551059(e0, ((NI) 0));
genderef_543921_839829468(p0, LOC16, (&a0), NIM_TRUE);
}
goto LA11;
LA14: ;
{
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], (&a0));
}
LA11: ;
{
if (!fastasgn0) goto LA20;
a0.flags |= ((NU16)1)<<((((Tlocflag292810) 2))%(sizeof(NU16)*8));
}
LA20: ;
loadinto_543928_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
}
goto LA1;
LA9: ;
{
asgnfielddiscriminant_549209_839829468(p0, e0);
}
LA1: ;
}
N_NIMCALL(Ropeobj178006*, genasmoremitstmt_548529_839829468)(Tcproc529021* p0, Tnode292802* t0, NIM_BOOL isasmstmt0) {
Ropeobj178006* result0;
NimStringDesc* res0;
result0 = (Ropeobj178006*)0;
res0 = copyString(((NimStringDesc*) &T839829468_490));
{
NI i_548547_839829468;
NI HEX3Atmp_548644_839829468;
NI LOC2;
NI res_548647_839829468;
i_548547_839829468 = (NI)0;
HEX3Atmp_548644_839829468 = (NI)0;
LOC2 = (NI)0;
LOC2 = sonslen_295351_850551059(t0);
HEX3Atmp_548644_839829468 = (NI)(LOC2 - ((NI) 1));
res_548647_839829468 = ((NI) 0);
{
while (1) {
if (!(res_548647_839829468 <= HEX3Atmp_548644_839829468)) goto LA4;
i_548547_839829468 = res_548647_839829468;
switch ((*(*t0).kindU.S6.sons->data[i_548547_839829468]).kind) {
case ((Tnodekind292020) 20) ... ((Tnodekind292020) 22):
{
res0 = resizeString(res0, (*(*t0).kindU.S6.sons->data[i_548547_839829468]).kindU.S3.strval->Sup.len + 0);
appendString(res0, (*(*t0).kindU.S6.sons->data[i_548547_839829468]).kindU.S3.strval);
}
break;
case ((Tnodekind292020) 3):
{
Tsym292834* sym0;
sym0 = (*(*t0).kindU.S6.sons->data[i_548547_839829468]).kindU.S4.sym;
{
Tloc292816 a0;
Ropeobj178006* LOC11;
NimStringDesc* LOC12;
if (!((28672 &(1U<<((NU)((*sym0).kind)&31U)))!=0)) goto LA9;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*t0).kindU.S6.sons->data[i_548547_839829468], (&a0));
LOC11 = (Ropeobj178006*)0;
LOC11 = rdloc_538188_839829468(a0);
LOC12 = (NimStringDesc*)0;
LOC12 = HEX24_178856_2381377266(LOC11);
res0 = resizeString(res0, LOC12->Sup.len + 0);
appendString(res0, LOC12);
}
goto LA7;
LA9: ;
{
Ropeobj178006* LOC16;
NimStringDesc* LOC17;
if (!((*sym0).kind == ((Tsymkind292435) 7))) goto LA14;
LOC16 = (Ropeobj178006*)0;
LOC16 = gettypedesc_535671_839829468((*p0).module, (*sym0).typ);
LOC17 = (NimStringDesc*)0;
LOC17 = HEX24_178856_2381377266(LOC16);
res0 = resizeString(res0, LOC17->Sup.len + 0);
appendString(res0, LOC17);
}
goto LA7;
LA14: ;
{
Ropeobj178006* r0;
NimStringDesc* LOC23;
r0 = (*sym0).loc.r;
{
if (!(r0 == NIM_NIL)) goto LA21;
r0 = manglename_533205_839829468(sym0);
asgnRefNoCycle((void**) (&(*sym0).loc.r), r0);
}
LA21: ;
LOC23 = (NimStringDesc*)0;
LOC23 = HEX24_178856_2381377266(r0);
res0 = resizeString(res0, LOC23->Sup.len + 0);
appendString(res0, LOC23);
}
LA7: ;
}
break;
default:
{
internalerror_196100_155036129((*(*t0).kindU.S6.sons->data[i_548547_839829468]).info, ((NimStringDesc*) &T839829468_612));
}
break;
}
res_548647_839829468 += ((NI) 1);
} LA4: ;
}
}
{
NIM_BOOL LOC27;
LOC27 = (NIM_BOOL)0;
LOC27 = isasmstmt0;
if (!(LOC27)) goto LA28;
LOC27 = ((Cc_273413_2528170400[(ccompiler_273431_2528170400)- 1].Field20 &(1U<<((NU)(((Tinfoccprop273004) 5))&7U)))!=0);
LA28: ;
if (!LOC27) goto LA29;
{
NimStringDesc* x_548604_839829468;
NI first_548656_839829468;
NI last_548658_839829468;
x_548604_839829468 = (NimStringDesc*)0;
first_548656_839829468 = ((NI) 0);
last_548658_839829468 = ((NI) 0);
{
while (1) {
NI j0;
{
while (1) {
if (!!((((NU8)(res0->data[last_548658_839829468])) == ((NU8)(0)) || ((NU8)(res0->data[last_548658_839829468])) == ((NU8)(13)) || ((NU8)(res0->data[last_548658_839829468])) == ((NU8)(10))))) goto LA35;
last_548658_839829468 += ((NI) 1);
} LA35: ;
}
x_548604_839829468 = copyStrLast(res0, first_548656_839829468, (NI)(last_548658_839829468 - ((NI) 1)));
j0 = ((NI) 0);
{
while (1) {
if (!(((NU8)(x_548604_839829468->data[j0])) == ((NU8)(32)) || ((NU8)(x_548604_839829468->data[j0])) == ((NU8)(9)))) goto LA37;
j0 += ((NI) 1);
} LA37: ;
}
{
if (!(((NU8)(x_548604_839829468->data[j0])) == ((NU8)(34)) || ((NU8)(x_548604_839829468->data[j0])) == ((NU8)(58)))) goto LA40;
add_178487_2381377266(&result0, x_548604_839829468);
add_178487_2381377266(&result0, tnl_176644_4151366050);
}
goto LA38;
LA40: ;
{
if (!!(((NU8)(x_548604_839829468->data[j0]) == (NU8)(0)))) goto LA43;
add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_613));
add_178487_2381377266(&result0, x_548604_839829468);
add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_614));
}
goto LA38;
LA43: ;
LA38: ;
{
if (!((NU8)(res0->data[last_548658_839829468]) == (NU8)(10))) goto LA47;
last_548658_839829468 += ((NI) 1);
}
goto LA45;
LA47: ;
{
if (!((NU8)(res0->data[last_548658_839829468]) == (NU8)(13))) goto LA50;
last_548658_839829468 += ((NI) 1);
{
if (!((NU8)(res0->data[last_548658_839829468]) == (NU8)(10))) goto LA54;
last_548658_839829468 += ((NI) 1);
}
LA54: ;
}
goto LA45;
LA50: ;
{
goto LA32;
}
LA45: ;
first_548656_839829468 = last_548658_839829468;
}
} LA32: ;
}
}
goto LA25;
LA29: ;
{
res0 = resizeString(res0, tnl_176644_4151366050->Sup.len + 0);
appendString(res0, tnl_176644_4151366050);
result0 = rope_178277_2381377266(res0);
}
LA25: ;
return result0;
}
N_NIMCALL(void, genasmstmt_548659_839829468)(Tcproc529021* p0, Tnode292802* t0) {
Ropeobj178006* s0;
genlinedir_532823_839829468(p0, t0);
s0 = genasmoremitstmt_548529_839829468(p0, t0, NIM_TRUE);
{
TY178507 LOC5;
if (!((*p0).prc == NIM_NIL)) goto LA3;
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = s0;
addf_179205_2381377266(&(*(*p0).module).s[(((Tcfilesection529005) 7))- 0], Cc_273413_2528170400[(ccompiler_273431_2528170400)- 1].Field17, LOC5, 1);
}
goto LA1;
LA3: ;
{
TY178507 LOC7;
memset((void*)LOC7, 0, sizeof(LOC7));
LOC7[0] = s0;
linef_532700_839829468(p0, ((Tcprocsection529011) 2), Cc_273413_2528170400[(ccompiler_273431_2528170400)- 1].Field17, LOC7, 1);
}
LA1: ;
}
static N_INLINE(void, gensimpleblock_544095_839829468)(Tcproc529021* p0, Tnode292802* stmts0) {
TY533289 LOC1;
NI LOC2;
memset((void*)LOC1, 0, sizeof(LOC1));
LOC2 = (NI)0;
LOC2 = startblock_543978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC1, 0);
genstmts_539244_839829468(p0, stmts0);
endblock_544060_839829468(p0);
}
N_NIMCALL(void, gentrycpp_547865_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0) {
Ropeobj178006* exc0;
TY533289 LOC16;
NI LOC17;
NI length0;
TY178507 LOC18;
Ropeobj178006* LOC19;
NI i0;
NIM_BOOL catchallpresent0;
TY533289 LOC78;
Tnode292802* LOC79;
{
NIM_BOOL LOC3;
NIM_BOOL LOC4;
LOC3 = (NIM_BOOL)0;
LOC4 = (NIM_BOOL)0;
LOC4 = isemptytype_297440_850551059((*t0).typ);
LOC3 = !(LOC4);
if (!(LOC3)) goto LA5;
LOC3 = ((*d0).k == ((Tlockind292808) 0));
LA5: ;
if (!LOC3) goto LA6;
gettemp_537032_839829468(p0, (*t0).typ, d0, NIM_FALSE);
}
LA6: ;
genlinedir_532823_839829468(p0, t0);
exc0 = gettempname_533596_839829468((*p0).module);
{
Tsym292834* LOC10;
Ropeobj178006* LOC13;
LOC10 = (Tsym292834*)0;
LOC10 = getcompilerproc_338746_3937434831(((NimStringDesc*) &T839829468_615));
if (!!((LOC10 == NIM_NIL))) goto LA11;
LOC13 = (Ropeobj178006*)0;
LOC13 = cgsym_532403_839829468((*p0).module, ((NimStringDesc*) &T839829468_615));
}
goto LA8;
LA11: ;
{
Ropeobj178006* LOC15;
LOC15 = (Ropeobj178006*)0;
LOC15 = cgsym_532403_839829468((*p0).module, ((NimStringDesc*) &T839829468_616));
}
LA8: ;
(*p0).nestedtrystmts = (Tnodeseq292796*) incrSeqV2(&((*p0).nestedtrystmts)->Sup, sizeof(Tnode292802*));
asgnRefNoCycle((void**) (&(*p0).nestedtrystmts->data[(*p0).nestedtrystmts->Sup.len]), t0);
++(*p0).nestedtrystmts->Sup.len;
memset((void*)LOC16, 0, sizeof(LOC16));
LOC17 = (NI)0;
LOC17 = startblock_543978_839829468(p0, ((NimStringDesc*) &T839829468_617), LOC16, 0);
expr_539248_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 0)], d0);
length0 = sonslen_295351_850551059(t0);
memset((void*)LOC18, 0, sizeof(LOC18));
LOC18[0] = exc0;
LOC19 = (Ropeobj178006*)0;
LOC19 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_618), LOC18, 1);
endblock_544035_839829468(p0, LOC19);
{
TY533289 LOC24;
if (!(((*p0).options &(1U<<((NU)(((Toption169009) 15))&31U)))!=0)) goto LA22;
memset((void*)LOC24, 0, sizeof(LOC24));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_619), LOC24, 0);
}
LA22: ;
(*p0).inexceptblock += ((NI) 1);
i0 = ((NI) 1);
catchallpresent0 = NIM_FALSE;
{
while (1) {
NIM_BOOL LOC27;
NI blen0;
LOC27 = (NIM_BOOL)0;
LOC27 = (i0 < length0);
if (!(LOC27)) goto LA28;
LOC27 = ((*(*t0).kindU.S6.sons->data[i0]).kind == ((Tnodekind292020) 87));
LA28: ;
if (!LOC27) goto LA26;
{
NIM_BOOL LOC31;
LOC31 = (NIM_BOOL)0;
LOC31 = ((*d0).k == ((Tlockind292808) 1));
if (!(LOC31)) goto LA32;
LOC31 = isemptytype_297440_850551059((*t0).typ);
LA32: ;
if (!LOC31) goto LA33;
(*d0).k = ((Tlockind292808) 0);
}
LA33: ;
blen0 = sonslen_295351_850551059((*t0).kindU.S6.sons->data[i0]);
{
Ropeobj178006** LOC39;
TY533289 LOC40;
if (!(((NI) 1) < i0)) goto LA37;
LOC39 = (Ropeobj178006**)0;
LOC39 = s_529179_3723162438(p0, ((Tcprocsection529011) 2));
memset((void*)LOC40, 0, sizeof(LOC40));
addf_179205_2381377266(LOC39, ((NimStringDesc*) &T839829468_620), LOC40, 0);
}
LA37: ;
{
TY533289 LOC45;
NI LOC46;
TY533289 LOC47;
if (!(blen0 == ((NI) 1))) goto LA43;
catchallpresent0 = NIM_TRUE;
memset((void*)LOC45, 0, sizeof(LOC45));
LOC46 = (NI)0;
LOC46 = startblock_543978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC45, 0);
expr_539248_839829468(p0, (*(*t0).kindU.S6.sons->data[i0]).kindU.S6.sons->data[((NI) 0)], d0);
memset((void*)LOC47, 0, sizeof(LOC47));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_606), LOC47, 0);
endblock_544060_839829468(p0);
}
goto LA41;
LA43: ;
{
Ropeobj178006* orexpr0;
TY178507 LOC57;
TY533289 LOC58;
NI LOC59;
TY533289 LOC60;
orexpr0 = NIM_NIL;
{
NI j_547978_839829468;
NI HEX3Atmp_548101_839829468;
NI res_548104_839829468;
j_547978_839829468 = (NI)0;
HEX3Atmp_548101_839829468 = (NI)0;
HEX3Atmp_548101_839829468 = (NI)(blen0 - ((NI) 2));
res_548104_839829468 = ((NI) 0);
{
while (1) {
TY532811 LOC56;
if (!(res_548104_839829468 <= HEX3Atmp_548101_839829468)) goto LA51;
j_547978_839829468 = res_548104_839829468;
{
if (!!((orexpr0 == NIM_NIL))) goto LA54;
add_178487_2381377266(&orexpr0, ((NimStringDesc*) &T839829468_229));
}
LA54: ;
memset((void*)LOC56, 0, sizeof(LOC56));
LOC56[0] = exc0;
LOC56[1] = gentypeinfo_535941_839829468((*p0).module, (*(*(*t0).kindU.S6.sons->data[i0]).kindU.S6.sons->data[j_547978_839829468]).typ);
appcg_532632_839829468((*p0).module, &orexpr0, ((NimStringDesc*) &T839829468_621), LOC56, 2);
res_548104_839829468 += ((NI) 1);
} LA51: ;
}
}
memset((void*)LOC57, 0, sizeof(LOC57));
LOC57[0] = orexpr0;
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_622), LOC57, 1);
memset((void*)LOC58, 0, sizeof(LOC58));
LOC59 = (NI)0;
LOC59 = startblock_543978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC58, 0);
expr_539248_839829468(p0, (*(*t0).kindU.S6.sons->data[i0]).kindU.S6.sons->data[(NI)(blen0 - ((NI) 1))], d0);
memset((void*)LOC60, 0, sizeof(LOC60));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_606), LOC60, 0);
endblock_544060_839829468(p0);
}
LA41: ;
i0 += ((NI) 1);
} LA26: ;
}
{
TY533289 LOC70;
NI LOC71;
Tnode292802* finallyblock0;
TY533289 LOC76;
Ropeobj178006* LOC77;
if (!!(catchallpresent0)) goto LA63;
{
TY533289 LOC69;
if (!(((NI) 1) < i0)) goto LA67;
memset((void*)LOC69, 0, sizeof(LOC69));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_620), LOC69, 0);
}
LA67: ;
memset((void*)LOC70, 0, sizeof(LOC70));
LOC71 = (NI)0;
LOC71 = startblock_543978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC70, 0);
finallyblock0 = lastson_295364_850551059(t0);
{
if (!((*finallyblock0).kind == ((Tnodekind292020) 107))) goto LA74;
genstmts_539244_839829468(p0, (*finallyblock0).kindU.S6.sons->data[((NI) 0)]);
}
LA74: ;
memset((void*)LOC76, 0, sizeof(LOC76));
LOC77 = (Ropeobj178006*)0;
LOC77 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_623), LOC76, 0);
line_532690_839829468(p0, ((Tcprocsection529011) 2), LOC77);
endblock_544060_839829468(p0);
}
LA63: ;
memset((void*)LOC78, 0, sizeof(LOC78));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_160), LOC78, 0);
(*p0).inexceptblock -= ((NI) 1);
LOC79 = (Tnode292802*)0;
LOC79 = pop_318246_1689653243((&(*p0).nestedtrystmts));
{
NIM_BOOL LOC82;
LOC82 = (NIM_BOOL)0;
LOC82 = (i0 < length0);
if (!(LOC82)) goto LA83;
LOC82 = ((*(*t0).kindU.S6.sons->data[i0]).kind == ((Tnodekind292020) 107));
LA83: ;
if (!LOC82) goto LA84;
gensimpleblock_544095_839829468(p0, (*(*t0).kindU.S6.sons->data[i0]).kindU.S6.sons->data[((NI) 0)]);
}
LA84: ;
}
N_NIMCALL(void, line_532695_839829468)(Tcproc529021* p0, Tcprocsection529011 s0, NimStringDesc* r0) {
Ropeobj178006** LOC1;
Ropeobj178006* LOC2;
Ropeobj178006* LOC3;
LOC1 = (Ropeobj178006**)0;
LOC1 = s_529179_3723162438(p0, s0);
LOC2 = (Ropeobj178006*)0;
LOC2 = rope_178277_2381377266(r0);
LOC3 = (Ropeobj178006*)0;
LOC3 = indentline_532656_839829468(p0, LOC2);
add_178482_2381377266(LOC1, LOC3);
}
static N_INLINE(Ropeobj178006*, pop_178530_1689653243)(TY191350** s0) {
Ropeobj178006* result0;
NI L0;
result0 = (Ropeobj178006*)0;
L0 = (NI)(((*s0) ? (*s0)->Sup.len : 0) - ((NI) 1));
result0 = (*s0)->data[L0];
(*s0) = (TY191350*) setLengthSeq(&((*s0))->Sup, sizeof(Ropeobj178006*), ((NI) (L0)));
return result0;
}
N_NIMCALL(void, gentry_548114_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0) {
NIM_BOOL LOC8;
Ropeobj178006* safepoint0;
TY178507 LOC17;
TY178507 LOC18;
TY178507 LOC37;
NI LOC38;
NI length0;
TY533289 LOC39;
TY533289 LOC40;
NI LOC41;
TY533289 LOC42;
NI i0;
Tnode292802* LOC95;
TY178507 LOC103;
{
NIM_BOOL LOC3;
NIM_BOOL LOC4;
LOC3 = (NIM_BOOL)0;
LOC4 = (NIM_BOOL)0;
LOC4 = isemptytype_297440_850551059((*t0).typ);
LOC3 = !(LOC4);
if (!(LOC3)) goto LA5;
LOC3 = ((*d0).k == ((Tlockind292808) 0));
LA5: ;
if (!LOC3) goto LA6;
gettemp_537032_839829468(p0, (*t0).typ, d0, NIM_FALSE);
}
LA6: ;
LOC8 = (NIM_BOOL)0;
LOC8 = includestr_147249_3771138726((&(*(*p0).module).headerfiles), ((NimStringDesc*) &T839829468_624));
genlinedir_532823_839829468(p0, t0);
safepoint0 = gettempname_533596_839829468((*p0).module);
{
Tsym292834* LOC11;
Ropeobj178006* LOC14;
LOC11 = (Tsym292834*)0;
LOC11 = getcompilerproc_338746_3937434831(((NimStringDesc*) &T839829468_615));
if (!!((LOC11 == NIM_NIL))) goto LA12;
LOC14 = (Ropeobj178006*)0;
LOC14 = cgsym_532403_839829468((*p0).module, ((NimStringDesc*) &T839829468_615));
}
goto LA9;
LA12: ;
{
Ropeobj178006* LOC16;
LOC16 = (Ropeobj178006*)0;
LOC16 = cgsym_532403_839829468((*p0).module, ((NimStringDesc*) &T839829468_616));
}
LA9: ;
memset((void*)LOC17, 0, sizeof(LOC17));
LOC17[0] = safepoint0;
linefmt_532714_839829468(p0, ((Tcprocsection529011) 0), ((NimStringDesc*) &T839829468_625), LOC17, 1);
memset((void*)LOC18, 0, sizeof(LOC18));
LOC18[0] = safepoint0;
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_626), LOC18, 1);
{
NIM_BOOL LOC21;
TY178507 LOC24;
LOC21 = (NIM_BOOL)0;
LOC21 = isdefined_200011_1967573533(((NimStringDesc*) &T839829468_627));
if (!LOC21) goto LA22;
memset((void*)LOC24, 0, sizeof(LOC24));
LOC24[0] = safepoint0;
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_628), LOC24, 1);
}
goto LA19;
LA22: ;
{
NIM_BOOL LOC26;
TY178507 LOC29;
LOC26 = (NIM_BOOL)0;
LOC26 = isdefined_200011_1967573533(((NimStringDesc*) &T839829468_629));
if (!LOC26) goto LA27;
memset((void*)LOC29, 0, sizeof(LOC29));
LOC29[0] = safepoint0;
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_630), LOC29, 1);
}
goto LA19;
LA27: ;
{
NIM_BOOL LOC31;
TY178507 LOC34;
LOC31 = (NIM_BOOL)0;
LOC31 = isdefined_200011_1967573533(((NimStringDesc*) &T839829468_631));
if (!LOC31) goto LA32;
memset((void*)LOC34, 0, sizeof(LOC34));
LOC34[0] = safepoint0;
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_632), LOC34, 1);
}
goto LA19;
LA32: ;
{
TY178507 LOC36;
memset((void*)LOC36, 0, sizeof(LOC36));
LOC36[0] = safepoint0;
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_628), LOC36, 1);
}
LA19: ;
memset((void*)LOC37, 0, sizeof(LOC37));
LOC37[0] = safepoint0;
LOC38 = (NI)0;
LOC38 = startblock_543978_839829468(p0, ((NimStringDesc*) &T839829468_633), LOC37, 1);
length0 = sonslen_295351_850551059(t0);
(*p0).nestedtrystmts = (Tnodeseq292796*) incrSeqV2(&((*p0).nestedtrystmts)->Sup, sizeof(Tnode292802*));
asgnRefNoCycle((void**) (&(*p0).nestedtrystmts->data[(*p0).nestedtrystmts->Sup.len]), t0);
++(*p0).nestedtrystmts->Sup.len;
expr_539248_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 0)], d0);
memset((void*)LOC39, 0, sizeof(LOC39));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_605), LOC39, 0);
endblock_544060_839829468(p0);
memset((void*)LOC40, 0, sizeof(LOC40));
LOC41 = (NI)0;
LOC41 = startblock_543978_839829468(p0, ((NimStringDesc*) &T839829468_634), LOC40, 0);
memset((void*)LOC42, 0, sizeof(LOC42));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_605), LOC42, 0);
{
TY533289 LOC47;
if (!(((*p0).options &(1U<<((NU)(((Toption169009) 15))&31U)))!=0)) goto LA45;
memset((void*)LOC47, 0, sizeof(LOC47));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_619), LOC47, 0);
}
LA45: ;
(*p0).inexceptblock += ((NI) 1);
i0 = ((NI) 1);
{
while (1) {
NIM_BOOL LOC50;
NI blen0;
LOC50 = (NIM_BOOL)0;
LOC50 = (i0 < length0);
if (!(LOC50)) goto LA51;
LOC50 = ((*(*t0).kindU.S6.sons->data[i0]).kind == ((Tnodekind292020) 87));
LA51: ;
if (!LOC50) goto LA49;
{
NIM_BOOL LOC54;
LOC54 = (NIM_BOOL)0;
LOC54 = ((*d0).k == ((Tlockind292808) 1));
if (!(LOC54)) goto LA55;
LOC54 = isemptytype_297440_850551059((*t0).typ);
LA55: ;
if (!LOC54) goto LA56;
(*d0).k = ((Tlockind292808) 0);
}
LA56: ;
blen0 = sonslen_295351_850551059((*t0).kindU.S6.sons->data[i0]);
{
TY533289 LOC67;
NI LOC68;
TY178507 LOC69;
TY533289 LOC70;
if (!(blen0 == ((NI) 1))) goto LA60;
{
TY533289 LOC66;
if (!(((NI) 1) < i0)) goto LA64;
memset((void*)LOC66, 0, sizeof(LOC66));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_635), LOC66, 0);
}
LA64: ;
memset((void*)LOC67, 0, sizeof(LOC67));
LOC68 = (NI)0;
LOC68 = startblock_543978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC67, 0);
memset((void*)LOC69, 0, sizeof(LOC69));
LOC69[0] = safepoint0;
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_636), LOC69, 1);
expr_539248_839829468(p0, (*(*t0).kindU.S6.sons->data[i0]).kindU.S6.sons->data[((NI) 0)], d0);
memset((void*)LOC70, 0, sizeof(LOC70));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_606), LOC70, 0);
endblock_544060_839829468(p0);
}
goto LA58;
LA60: ;
{
Ropeobj178006* orexpr0;
TY178507 LOC91;
NI LOC92;
TY178507 LOC93;
TY533289 LOC94;
orexpr0 = NIM_NIL;
{
NI j_548247_839829468;
NI HEX3Atmp_548521_839829468;
NI res_548524_839829468;
j_548247_839829468 = (NI)0;
HEX3Atmp_548521_839829468 = (NI)0;
HEX3Atmp_548521_839829468 = (NI)(blen0 - ((NI) 2));
res_548524_839829468 = ((NI) 0);
{
while (1) {
NimStringDesc* isobjformat0;
TY178507 LOC86;
if (!(res_548524_839829468 <= HEX3Atmp_548521_839829468)) goto LA74;
j_548247_839829468 = res_548524_839829468;
{
if (!!((orexpr0 == NIM_NIL))) goto LA77;
add_178487_2381377266(&orexpr0, ((NimStringDesc*) &T839829468_229));
}
LA77: ;
{
NIM_BOOL LOC81;
LOC81 = (NIM_BOOL)0;
LOC81 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC81) goto LA82;
LOC81 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA82: ;
if (!!(LOC81)) goto LA83;
isobjformat0 = copyString(((NimStringDesc*) &T839829468_637));
}
goto LA79;
LA83: ;
{
isobjformat0 = copyString(((NimStringDesc*) &T839829468_638));
}
LA79: ;
memset((void*)LOC86, 0, sizeof(LOC86));
LOC86[0] = gentypeinfo_535941_839829468((*p0).module, (*(*(*t0).kindU.S6.sons->data[i0]).kindU.S6.sons->data[j_548247_839829468]).typ);
appcg_532632_839829468((*p0).module, &orexpr0, isobjformat0, LOC86, 1);
res_548524_839829468 += ((NI) 1);
} LA74: ;
}
}
{
if (!(((NI) 1) < i0)) goto LA89;
line_532695_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_620));
}
LA89: ;
memset((void*)LOC91, 0, sizeof(LOC91));
LOC91[0] = orexpr0;
LOC92 = (NI)0;
LOC92 = startblock_543978_839829468(p0, ((NimStringDesc*) &T839829468_639), LOC91, 1);
memset((void*)LOC93, 0, sizeof(LOC93));
LOC93[0] = safepoint0;
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_636), LOC93, 1);
expr_539248_839829468(p0, (*(*t0).kindU.S6.sons->data[i0]).kindU.S6.sons->data[(NI)(blen0 - ((NI) 1))], d0);
memset((void*)LOC94, 0, sizeof(LOC94));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_606), LOC94, 0);
endblock_544060_839829468(p0);
}
LA58: ;
i0 += ((NI) 1);
} LA49: ;
}
(*p0).inexceptblock -= ((NI) 1);
LOC95 = (Tnode292802*)0;
LOC95 = pop_318246_1689653243((&(*p0).nestedtrystmts));
endblock_544060_839829468(p0);
{
NIM_BOOL LOC98;
Ropeobj178006* LOC102;
LOC98 = (NIM_BOOL)0;
LOC98 = (i0 < length0);
if (!(LOC98)) goto LA99;
LOC98 = ((*(*t0).kindU.S6.sons->data[i0]).kind == ((Tnodekind292020) 107));
LA99: ;
if (!LOC98) goto LA100;
(*p0).finallysafepoints = (TY191350*) incrSeqV2(&((*p0).finallysafepoints)->Sup, sizeof(Ropeobj178006*));
asgnRefNoCycle((void**) (&(*p0).finallysafepoints->data[(*p0).finallysafepoints->Sup.len]), safepoint0);
++(*p0).finallysafepoints->Sup.len;
gensimpleblock_544095_839829468(p0, (*(*t0).kindU.S6.sons->data[i0]).kindU.S6.sons->data[((NI) 0)]);
LOC102 = (Ropeobj178006*)0;
LOC102 = pop_178530_1689653243((&(*p0).finallysafepoints));
}
LA100: ;
memset((void*)LOC103, 0, sizeof(LOC103));
LOC103[0] = safepoint0;
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_640), LOC103, 1);
}
N_NIMCALL(NimStringDesc*, getraisefrmt_546824_839829468)(Tcproc529021* p0) {
NimStringDesc* result0;
result0 = (NimStringDesc*)0;
result0 = copyString(((NimStringDesc*) &T839829468_641));
return result0;
}
N_NIMCALL(void, genraisestmt_546828_839829468)(Tcproc529021* p0, Tnode292802* t0) {
{
Tnode292802* finallyblock0;
if (!(((NI) 0) < (*p0).inexceptblock)) goto LA3;
finallyblock0 = lastson_295364_850551059((*p0).nestedtrystmts->data[(NI)(((*p0).nestedtrystmts ? (*p0).nestedtrystmts->Sup.len : 0) - ((NI) 1))]);
{
if (!((*finallyblock0).kind == ((Tnodekind292020) 107))) goto LA7;
gensimpleblock_544095_839829468(p0, (*finallyblock0).kindU.S6.sons->data[((NI) 0)]);
}
LA7: ;
}
LA3: ;
{
Tloc292816 a0;
Ropeobj178006* e0;
Ttype292840* typ0;
NimStringDesc* LOC13;
TY532811 LOC14;
if (!!(((*(*t0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 1)))) goto LA11;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 0)], (&a0));
e0 = rdloc_538188_839829468(a0);
typ0 = skiptypes_296099_850551059((*(*t0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106247256320));
genlinedir_532823_839829468(p0, t0);
LOC13 = (NimStringDesc*)0;
LOC13 = getraisefrmt_546824_839829468(p0);
memset((void*)LOC14, 0, sizeof(LOC14));
LOC14[0] = e0;
LOC14[1] = makecstring_191638_155036129((*(*(*typ0).sym).name).s);
linecg_532707_839829468(p0, ((Tcprocsection529011) 2), LOC13, LOC14, 2);
}
goto LA9;
LA11: ;
{
genlinedir_532823_839829468(p0, t0);
{
NIM_BOOL LOC18;
NIM_BOOL LOC19;
TY533289 LOC24;
Ropeobj178006* LOC25;
LOC18 = (NIM_BOOL)0;
LOC19 = (NIM_BOOL)0;
LOC19 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC19) goto LA20;
LOC19 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA20: ;
LOC18 = LOC19;
if (!(LOC18)) goto LA21;
LOC18 = !(((gglobaloptions_169130_2607990831 &((NU64)1<<((NU)(((Tglobaloption169013) 31))&63U)))!=0));
LA21: ;
if (!LOC18) goto LA22;
memset((void*)LOC24, 0, sizeof(LOC24));
LOC25 = (Ropeobj178006*)0;
LOC25 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_623), LOC24, 0);
line_532690_839829468(p0, ((Tcprocsection529011) 2), LOC25);
}
goto LA16;
LA22: ;
{
TY533289 LOC27;
memset((void*)LOC27, 0, sizeof(LOC27));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_642), LOC27, 0);
}
LA16: ;
}
LA9: ;
}
N_NIMCALL(void, gentypesection_538184_839829468)(Tcgen529027* m0, Tnode292802* n0) {
}
N_NIMCALL(Tcfilesection529005, determinesection_548819_839829468)(Tnode292802* n0) {
Tcfilesection529005 result0;
result0 = (Tcfilesection529005)0;
result0 = ((Tcfilesection529005) 7);
{
NIM_BOOL LOC3;
NI LOC4;
NimStringDesc* sec0;
LOC3 = (NIM_BOOL)0;
LOC4 = (NI)0;
LOC4 = len_293081_850551059(n0);
LOC3 = (((NI) 1) <= LOC4);
if (!(LOC3)) goto LA5;
LOC3 = ((*(*n0).kindU.S6.sons->data[((NI) 0)]).kind >= ((Tnodekind292020) 20) && (*(*n0).kindU.S6.sons->data[((NI) 0)]).kind <= ((Tnodekind292020) 22));
LA5: ;
if (!LOC3) goto LA6;
sec0 = (*(*n0).kindU.S6.sons->data[((NI) 0)]).kindU.S3.strval;
{
NIM_BOOL LOC10;
LOC10 = (NIM_BOOL)0;
LOC10 = nsuStartsWith(sec0, ((NimStringDesc*) &T839829468_643));
if (!LOC10) goto LA11;
result0 = ((Tcfilesection529005) 3);
}
goto LA8;
LA11: ;
{
NIM_BOOL LOC14;
LOC14 = (NIM_BOOL)0;
LOC14 = nsuStartsWith(sec0, ((NimStringDesc*) &T839829468_644));
if (!LOC14) goto LA15;
result0 = ((Tcfilesection529005) 9);
}
goto LA8;
LA15: ;
{
NIM_BOOL LOC18;
LOC18 = (NIM_BOOL)0;
LOC18 = nsuStartsWith(sec0, ((NimStringDesc*) &T839829468_645));
if (!LOC18) goto LA19;
result0 = ((Tcfilesection529005) 1);
}
goto LA8;
LA19: ;
LA8: ;
}
LA6: ;
return result0;
}
N_NIMCALL(void, genemit_548839_839829468)(Tcproc529021* p0, Tnode292802* t0) {
Ropeobj178006* s0;
s0 = genasmoremitstmt_548529_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 1)], NIM_FALSE);
{
Tcfilesection529005 section0;
Tnode292802* LOC5;
if (!((*p0).prc == NIM_NIL)) goto LA3;
LOC5 = (Tnode292802*)0;
LOC5 = HEX5BHEX5D_293238_850551059(t0, ((NI) 1));
section0 = determinesection_548819_839829468(LOC5);
genclinedir_532813_839829468(&(*(*p0).module).s[(section0)- 0], (*t0).info);
add_178482_2381377266(&(*(*p0).module).s[(section0)- 0], s0);
}
goto LA1;
LA3: ;
{
genlinedir_532823_839829468(p0, t0);
line_532690_839829468(p0, ((Tcprocsection529011) 2), s0);
}
LA1: ;
}
N_NIMCALL(void, genbreakpoint_548862_839829468)(Tcproc529021* p0, Tnode292802* t0) {
NimStringDesc* name0;
name0 = (NimStringDesc*)0;
{
TY535238 LOC12;
NI LOC13;
NimStringDesc* LOC14;
if (!(((*p0).options &(1U<<((NU)(((Toption169009) 17))&31U)))!=0)) goto LA3;
{
if (!((*t0).kind == ((Tnodekind292020) 34))) goto LA7;
name0 = nsuNormalize((*(*t0).kindU.S6.sons->data[((NI) 1)]).kindU.S3.strval);
}
goto LA5;
LA7: ;
{
NimStringDesc* LOC10;
NimStringDesc* LOC11;
breakpointid_548860_839829468 += ((NI) 1);
LOC10 = (NimStringDesc*)0;
LOC11 = (NimStringDesc*)0;
LOC11 = nimIntToStr(breakpointid_548860_839829468);
LOC10 = rawNewString(LOC11->Sup.len + 2);
appendString(LOC10, ((NimStringDesc*) &T839829468_646));
appendString(LOC10, LOC11);
name0 = LOC10;
}
LA5: ;
genlinedir_532823_839829468(p0, t0);
memset((void*)LOC12, 0, sizeof(LOC12));
LOC13 = (NI)0;
LOC13 = tolinenumber_192415_155036129((*t0).info);
LOC12[0] = rope_178401_2381377266(((NI64) (LOC13)));
LOC14 = (NimStringDesc*)0;
LOC14 = tofilename_192260_155036129((*t0).info.fileindex);
LOC12[1] = makecstring_191638_155036129(LOC14);
LOC12[2] = makecstring_191638_155036129(name0);
appcg_532632_839829468((*p0).module, &gbreakpoints_548861_839829468, ((NimStringDesc*) &T839829468_647), LOC12, 3);
}
LA3: ;
}
N_NIMCALL(void, genwatchpoint_549016_839829468)(Tcproc529021* p0, Tnode292802* n0) {
Tloc292816 a0;
Ttype292840* typ0;
TY535238 LOC5;
NimStringDesc* LOC6;
{ {
if (!!((((*p0).options &(1U<<((NU)(((Toption169009) 17))&31U)))!=0))) goto LA3;
goto BeforeRet;
}
LA3: ;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 1)], (&a0));
typ0 = skiptypes_296099_850551059((*(*n0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106242013440));
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = addrloc_538204_839829468(a0);
LOC6 = (NimStringDesc*)0;
LOC6 = rendertree_311044_382274130((*n0).kindU.S6.sons->data[((NI) 1)], 0);
LOC5[1] = makecstring_191638_155036129(LOC6);
LOC5[2] = gentypeinfo_535941_839829468((*p0).module, typ0);
linecg_532707_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_648), LOC5, 3);
}BeforeRet: ;
}
N_NIMCALL(void, genpragma_549039_839829468)(Tcproc529021* p_549041_839829468, Tnode292802* n0) {
{
NI i_549054_839829468;
NI HEX3Atmp_549073_839829468;
NI LOC2;
NI res_549076_839829468;
i_549054_839829468 = (NI)0;
HEX3Atmp_549073_839829468 = (NI)0;
LOC2 = (NI)0;
LOC2 = sonslen_295351_850551059(n0);
HEX3Atmp_549073_839829468 = (NI)(LOC2 - ((NI) 1));
res_549076_839829468 = ((NI) 0);
{
while (1) {
Tnode292802* it0;
Tspecialword275003 LOC5;
if (!(res_549076_839829468 <= HEX3Atmp_549073_839829468)) goto LA4;
i_549054_839829468 = res_549076_839829468;
it0 = (*n0).kindU.S6.sons->data[i_549054_839829468];
LOC5 = (Tspecialword275003)0;
LOC5 = whichpragma_318911_2616423590(it0);
switch (LOC5) {
case ((Tspecialword275003) 191):
{
genemit_548839_839829468(p_549041_839829468, it0);
}
break;
case ((Tspecialword275003) 131):
{
genbreakpoint_548862_839829468(p_549041_839829468, it0);
}
break;
case ((Tspecialword275003) 176):
{
genwatchpoint_549016_839829468(p_549041_839829468, it0);
}
break;
case ((Tspecialword275003) 183):
{
Tcproc529021* p0;
Ropeobj178006** LOC10;
p0 = newproc_529206_3723162438(NIM_NIL, (*p_549041_839829468).module);
(*p0).options = ((*p0).options & ~ 98304);
genstmts_539244_839829468(p0, (*it0).kindU.S6.sons->data[((NI) 1)]);
LOC10 = (Ropeobj178006**)0;
LOC10 = s_529179_3723162438(p0, ((Tcprocsection529011) 2));
asgnRefNoCycle((void**) (&(*(*p0).module).injectstmt), (*LOC10));
}
break;
default:
{
}
break;
}
res_549076_839829468 += ((NI) 1);
} LA4: ;
}
}
}
N_NIMCALL(void, genparforstmt_546208_839829468)(Tcproc529021* p0, Tnode292802* t0) {
NI oldbreakidx_546411_839829468;
Tsym292834* forloopvar0;
Tloc292816 rangea0;
Tloc292816 rangeb0;
Tnode292802* call0;
TY535235 LOC1;
NimStringDesc* LOC2;
TY533289 LOC3;
(*p0).withinloop += ((NI) 1);
genlinedir_532823_839829468(p0, t0);
oldbreakidx_546411_839829468 = (*p0).breakidx;
forloopvar0 = (*(*t0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym;
memset((void*)(&rangea0), 0, sizeof(rangea0));
memset((void*)(&rangeb0), 0, sizeof(rangeb0));
assignlocalvar_538614_839829468(p0, forloopvar0);
call0 = (*t0).kindU.S6.sons->data[((NI) 1)];
initlocexpr_539283_839829468(p0, (*call0).kindU.S6.sons->data[((NI) 1)], (&rangea0));
initlocexpr_539283_839829468(p0, (*call0).kindU.S6.sons->data[((NI) 2)], (&rangeb0));
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = rdloc_538188_839829468((*forloopvar0).loc);
LOC1[1] = rdloc_538188_839829468(rangea0);
LOC1[2] = rdloc_538188_839829468(rangeb0);
LOC2 = (NimStringDesc*)0;
LOC2 = getstr_297230_850551059((*call0).kindU.S6.sons->data[((NI) 3)]);
LOC1[3] = rope_178277_2381377266(LOC2);
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_649), LOC1, 4);
memset((void*)LOC3, 0, sizeof(LOC3));
(*p0).breakidx = startblock_543978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC3, 0);
(*p0).blocks->data[(*p0).breakidx].isloop = NIM_TRUE;
genstmts_539244_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 2)]);
endblock_544060_839829468(p0);
(*p0).breakidx = oldbreakidx_546411_839829468;
(*p0).withinloop -= ((NI) 1);
}
N_NIMCALL(void, genstate_544117_839829468)(Tcproc529021* p0, Tnode292802* n0) {
NI64 idx0;
TY178507 LOC9;
{
NIM_BOOL LOC3;
NI LOC4;
NimStringDesc* LOC8;
LOC3 = (NIM_BOOL)0;
LOC4 = (NI)0;
LOC4 = len_293081_850551059(n0);
LOC3 = (LOC4 == ((NI) 1));
if (!(LOC3)) goto LA5;
LOC3 = ((*(*n0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 6));
LA5: ;
if (!!(LOC3)) goto LA6;
LOC8 = (NimStringDesc*)0;
LOC8 = HEX24_196185_1689653243(T839829468_650);
internalerror_196113_155036129(LOC8);
}
LA6: ;
idx0 = (*(*n0).kindU.S6.sons->data[((NI) 0)]).kindU.S1.intval;
memset((void*)LOC9, 0, sizeof(LOC9));
LOC9[0] = rope_178401_2381377266(idx0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_652), LOC9, 1);
}
N_NIMCALL(void, gengotostate_544144_839829468)(Tcproc529021* p0, Tnode292802* n0) {
Tloc292816 a0;
TY178507 LOC1;
TY533289 LOC2;
TY533289 LOC7;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0));
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = rdloc_538188_839829468(a0);
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_603), LOC1, 1);
(*p0).beforeretneeded = NIM_TRUE;
memset((void*)LOC2, 0, sizeof(LOC2));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_653), LOC2, 0);
{
NI64 i_544214_839829468;
NI64 HEX3Atmp_544223_839829468;
NI64 res_544226_839829468;
i_544214_839829468 = (NI64)0;
HEX3Atmp_544223_839829468 = (NI64)0;
HEX3Atmp_544223_839829468 = lastord_320004_3876443242((*(*n0).kindU.S6.sons->data[((NI) 0)]).typ);
res_544226_839829468 = IL64(0);
{
while (1) {
TY178507 LOC6;
if (!(res_544226_839829468 <= HEX3Atmp_544223_839829468)) goto LA5;
i_544214_839829468 = res_544226_839829468;
memset((void*)LOC6, 0, sizeof(LOC6));
LOC6[0] = rope_178401_2381377266(i_544214_839829468);
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_654), LOC6, 1);
res_544226_839829468 += ((NI) 1);
} LA5: ;
}
}
memset((void*)LOC7, 0, sizeof(LOC7));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_160), LOC7, 0);
}
N_NIMCALL(void, genbreakstate_544229_839829468)(Tcproc529021* p0, Tnode292802* n0) {
Tloc292816 a0;
memset((void*)(&a0), 0, sizeof(a0));
{
TY178507 LOC5;
if (!((*(*n0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 155))) goto LA3;
initlocexpr_539283_839829468(p0, (*(*n0).kindU.S6.sons->data[((NI) 0)]).kindU.S6.sons->data[((NI) 1)], (&a0));
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = rdloc_538188_839829468(a0);
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_655), LOC5, 1);
}
goto LA1;
LA3: ;
{
TY178507 LOC7;
initlocexpr_539283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0));
memset((void*)LOC7, 0, sizeof(LOC7));
LOC7[0] = rdloc_538188_839829468(a0);
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_656), LOC7, 1);
}
LA1: ;
}
N_NIMCALL(void, expr_539248_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0) {
switch ((*n0).kind) {
case ((Tnodekind292020) 3):
{
Tsym292834* sym0;
sym0 = (*n0).kindU.S4.sym;
switch ((*sym0).kind) {
case ((Tsymkind292435) 13):
{
{
if (!!(((33554448 & (*sym0).flags) == 0))) goto LA5;
fillprocloc_539201_839829468(sym0);
genprocprototype_539254_839829468((*p0).module, sym0);
}
goto LA3;
LA5: ;
{
genproc_532951_839829468((*p0).module, sym0);
}
LA3: ;
putlocintodest_539258_839829468(p0, d0, (*sym0).loc);
}
break;
case ((Tsymkind292435) 12):
case ((Tsymkind292435) 15):
case ((Tsymkind292435) 14):
{
{
NimStringDesc* LOC13;
if (!(((*sym0).flags &(1U<<((NU)(((Tsymflag292184) 23))&31U)))!=0)) goto LA11;
LOC13 = (NimStringDesc*)0;
LOC13 = rawNewString((*(*sym0).name).s->Sup.len + 48);
appendString(LOC13, ((NimStringDesc*) &T839829468_270));
appendString(LOC13, (*(*sym0).name).s);
localerror_196085_155036129((*n0).info, LOC13);
}
LA11: ;
genproc_532951_839829468((*p0).module, sym0);
{
NIM_BOOL LOC16;
NimStringDesc* LOC20;
LOC16 = (NIM_BOOL)0;
LOC16 = ((*sym0).loc.r == NIM_NIL);
if (LOC16) goto LA17;
LOC16 = ((*sym0).loc.t == NIM_NIL);
LA17: ;
if (!LOC16) goto LA18;
LOC20 = (NimStringDesc*)0;
LOC20 = rawNewString((*(*sym0).name).s->Sup.len + 20);
appendString(LOC20, ((NimStringDesc*) &T839829468_271));
appendString(LOC20, (*(*sym0).name).s);
internalerror_196100_155036129((*n0).info, LOC20);
}
LA18: ;
putlocintodest_539258_839829468(p0, d0, (*sym0).loc);
}
break;
case ((Tsymkind292435) 10):
{
{
NIM_BOOL LOC24;
Ropeobj178006* LOC27;
LOC24 = (NIM_BOOL)0;
LOC24 = issimpleconst_532311_839829468((*sym0).typ);
if (!LOC24) goto LA25;
LOC27 = (Ropeobj178006*)0;
LOC27 = genliteral_549476_839829468(p0, (*sym0).ast, (*sym0).typ);
putintodest_550468_839829468(p0, d0, (*n0).typ, LOC27, ((Tstorageloc292812) 1));
}
goto LA22;
LA25: ;
{
gencomplexconst_558249_839829468(p0, sym0, d0);
}
LA22: ;
}
break;
case ((Tsymkind292435) 19):
{
Ropeobj178006* LOC30;
LOC30 = (Ropeobj178006*)0;
LOC30 = rope_178401_2381377266(((NI64) ((*sym0).position)));
putintodest_550468_839829468(p0, d0, (*n0).typ, LOC30, ((Tstorageloc292812) 0));
}
break;
case ((Tsymkind292435) 8):
case ((Tsymkind292435) 20):
case ((Tsymkind292435) 11):
case ((Tsymkind292435) 9):
{
{
if (!!(((4194312 & (*sym0).flags) == 0))) goto LA34;
genvarprototype_539236_839829468((*p0).module, sym0);
}
LA34: ;
{
NIM_BOOL LOC38;
NimStringDesc* LOC42;
NimStringDesc* LOC43;
LOC38 = (NIM_BOOL)0;
LOC38 = ((*sym0).loc.r == NIM_NIL);
if (LOC38) goto LA39;
LOC38 = ((*sym0).loc.t == NIM_NIL);
LA39: ;
if (!LOC38) goto LA40;
LOC42 = (NimStringDesc*)0;
LOC43 = (NimStringDesc*)0;
LOC43 = nimIntToStr((*sym0).Sup.id);
LOC42 = rawNewString((*(*sym0).name).s->Sup.len + LOC43->Sup.len + 20);
appendString(LOC42, ((NimStringDesc*) &T839829468_285));
appendString(LOC42, (*(*sym0).name).s);
appendString(LOC42, ((NimStringDesc*) &T839829468_12));
appendString(LOC42, LOC43);
internalerror_196100_155036129((*n0).info, LOC42);
}
LA40: ;
{
if (!(((*sym0).flags &(1U<<((NU)(((Tsymflag292184) 22))&31U)))!=0)) goto LA46;
accessthreadlocalvar_532945_839829468(p0, sym0);
{
NIM_BOOL LOC50;
Ropeobj178006* LOC53;
LOC50 = (NIM_BOOL)0;
LOC50 = emulatedthreadvars_532949_839829468();
if (!LOC50) goto LA51;
LOC53 = (Ropeobj178006*)0;
LOC53 = HEX26_178452_2381377266(((NimStringDesc*) &T839829468_288), (*sym0).loc.r);
putintodest_550468_839829468(p0, d0, (*sym0).loc.t, LOC53, ((Tstorageloc292812) 0));
}
goto LA48;
LA51: ;
{
putlocintodest_539258_839829468(p0, d0, (*sym0).loc);
}
LA48: ;
}
goto LA44;
LA46: ;
{
putlocintodest_539258_839829468(p0, d0, (*sym0).loc);
}
LA44: ;
}
break;
case ((Tsymkind292435) 5):
{
{
NIM_BOOL LOC59;
NimStringDesc* LOC63;
NimStringDesc* LOC64;
LOC59 = (NIM_BOOL)0;
LOC59 = ((*sym0).loc.r == NIM_NIL);
if (LOC59) goto LA60;
LOC59 = ((*sym0).loc.t == NIM_NIL);
LA60: ;
if (!LOC59) goto LA61;
LOC63 = (NimStringDesc*)0;
LOC64 = (NimStringDesc*)0;
LOC64 = nimIntToStr((*sym0).Sup.id);
LOC63 = rawNewString((*(*sym0).name).s->Sup.len + LOC64->Sup.len + 21);
appendString(LOC63, ((NimStringDesc*) &T839829468_289));
appendString(LOC63, (*(*sym0).name).s);
appendString(LOC63, ((NimStringDesc*) &T839829468_12));
appendString(LOC63, LOC64);
internalerror_196100_155036129((*n0).info, LOC63);
}
LA61: ;
putlocintodest_539258_839829468(p0, d0, (*sym0).loc);
}
break;
case ((Tsymkind292435) 3):
{
{
NIM_BOOL LOC68;
NimStringDesc* LOC72;
NimStringDesc* LOC73;
LOC68 = (NIM_BOOL)0;
LOC68 = ((*sym0).loc.r == NIM_NIL);
if (LOC68) goto LA69;
LOC68 = ((*sym0).loc.t == NIM_NIL);
LA69: ;
if (!LOC68) goto LA70;
LOC72 = (NimStringDesc*)0;
LOC73 = (NimStringDesc*)0;
LOC73 = nimIntToStr((*sym0).Sup.id);
LOC72 = rawNewString((*(*sym0).name).s->Sup.len + LOC73->Sup.len + 22);
appendString(LOC72, ((NimStringDesc*) &T839829468_290));
appendString(LOC72, (*(*sym0).name).s);
appendString(LOC72, ((NimStringDesc*) &T839829468_12));
appendString(LOC72, LOC73);
internalerror_196100_155036129((*n0).info, LOC72);
}
LA70: ;
putlocintodest_539258_839829468(p0, d0, (*sym0).loc);
}
break;
default:
{
NimStringDesc* LOC75;
LOC75 = (NimStringDesc*)0;
LOC75 = rawNewString(reprEnum((NI)(*sym0).kind, (&NTI292435))->Sup.len + 22);
appendString(LOC75, ((NimStringDesc*) &T839829468_291));
appendString(LOC75, reprEnum((NI)(*sym0).kind, (&NTI292435)));
appendString(LOC75, ((NimStringDesc*) &T839829468_292));
internalerror_196100_155036129((*n0).info, LOC75);
}
break;
}
}
break;
case ((Tnodekind292020) 23):
{
{
NIM_BOOL LOC79;
Ropeobj178006* LOC82;
LOC79 = (NIM_BOOL)0;
LOC79 = isemptytype_297440_850551059((*n0).typ);
if (!!(LOC79)) goto LA80;
LOC82 = (Ropeobj178006*)0;
LOC82 = genliteral_539273_839829468(p0, n0);
putintodest_550468_839829468(p0, d0, (*n0).typ, LOC82, ((Tstorageloc292812) 0));
}
LA80: ;
}
break;
case ((Tnodekind292020) 20) ... ((Tnodekind292020) 22):
{
Ropeobj178006* LOC84;
LOC84 = (Ropeobj178006*)0;
LOC84 = genliteral_539273_839829468(p0, n0);
putdataintodest_550436_839829468(p0, d0, (*n0).typ, LOC84);
}
break;
case ((Tnodekind292020) 6) ... ((Tnodekind292020) 15):
case ((Tnodekind292020) 16) ... ((Tnodekind292020) 19):
case ((Tnodekind292020) 5):
{
Ropeobj178006* LOC86;
LOC86 = (Ropeobj178006*)0;
LOC86 = genliteral_539273_839829468(p0, n0);
putintodest_550468_839829468(p0, d0, (*n0).typ, LOC86, ((Tstorageloc292812) 0));
}
break;
case ((Tnodekind292020) 27):
case ((Tnodekind292020) 32):
case ((Tnodekind292020) 29):
case ((Tnodekind292020) 30):
case ((Tnodekind292020) 31):
case ((Tnodekind292020) 26):
case ((Tnodekind292020) 28):
{
Tnode292802* op0;
genlinedir_532823_839829468(p0, n0);
op0 = (*n0).kindU.S6.sons->data[((NI) 0)];
{
Tloc292816 a0;
if (!(*n0).typ == 0) goto LA90;
memset((void*)(&a0), 0, sizeof(a0));
{
NIM_BOOL LOC94;
LOC94 = (NIM_BOOL)0;
LOC94 = ((*op0).kind == ((Tnodekind292020) 3));
if (!(LOC94)) goto LA95;
LOC94 = !(((*(*op0).kindU.S4.sym).magic == ((Tmagic292524) 0)));
LA95: ;
if (!LOC94) goto LA96;
genmagicexpr_557033_839829468(p0, n0, (&a0), (*(*op0).kindU.S4.sym).magic);
}
goto LA92;
LA96: ;
{
gencall_543632_839829468(p0, n0, (&a0));
}
LA92: ;
}
goto LA88;
LA90: ;
{
{
NIM_BOOL LOC102;
LOC102 = (NIM_BOOL)0;
LOC102 = ((*op0).kind == ((Tnodekind292020) 3));
if (!(LOC102)) goto LA103;
LOC102 = !(((*(*op0).kindU.S4.sym).magic == ((Tmagic292524) 0)));
LA103: ;
if (!LOC102) goto LA104;
genmagicexpr_557033_839829468(p0, n0, d0, (*(*op0).kindU.S4.sym).magic);
}
goto LA100;
LA104: ;
{
gencall_543632_839829468(p0, n0, d0);
}
LA100: ;
}
LA88: ;
}
break;
case ((Tnodekind292020) 39):
{
{
NIM_BOOL LOC110;
NI LOC112;
Ropeobj178006* LOC115;
LOC110 = (NIM_BOOL)0;
LOC110 = isdeepconstexpr_318566_2616423590(n0);
if (!(LOC110)) goto LA111;
LOC112 = (NI)0;
LOC112 = len_293081_850551059(n0);
LOC110 = !((LOC112 == ((NI) 0)));
LA111: ;
if (!LOC110) goto LA113;
LOC115 = (Ropeobj178006*)0;
LOC115 = gensetnode_549664_839829468(p0, n0);
putintodest_550468_839829468(p0, d0, (*n0).typ, LOC115, ((Tstorageloc292812) 0));
}
goto LA108;
LA113: ;
{
gensetconstr_557496_839829468(p0, n0, d0);
}
LA108: ;
}
break;
case ((Tnodekind292020) 41):
{
{
NIM_BOOL LOC120;
NI LOC122;
LOC120 = (NIM_BOOL)0;
LOC120 = isdeepconstexpr_318566_2616423590(n0);
if (!(LOC120)) goto LA121;
LOC122 = (NI)0;
LOC122 = len_293081_850551059(n0);
LOC120 = !((LOC122 == ((NI) 0)));
LA121: ;
if (!LOC120) goto LA123;
exprcomplexconst_558684_839829468(p0, n0, d0);
}
goto LA118;
LA123: ;
{
Ttype292840* LOC126;
LOC126 = (Ttype292840*)0;
LOC126 = skiptypes_296099_850551059((*n0).typ, IL64(211106242013440));
if (!((*LOC126).kind == ((Ttypekind292244) 24))) goto LA127;
genseqconstr_555004_839829468(p0, n0, d0);
}
goto LA118;
LA127: ;
{
genarrayconstr_558207_839829468(p0, n0, d0);
}
LA118: ;
}
break;
case ((Tnodekind292020) 37):
{
{
NIM_BOOL LOC133;
NI LOC135;
LOC133 = (NIM_BOOL)0;
LOC133 = isdeepconstexpr_318566_2616423590(n0);
if (!(LOC133)) goto LA134;
LOC135 = (NI)0;
LOC135 = len_293081_850551059(n0);
LOC133 = !((LOC135 == ((NI) 0)));
LA134: ;
if (!LOC133) goto LA136;
exprcomplexconst_558684_839829468(p0, n0, d0);
}
goto LA131;
LA136: ;
{
gentupleconstr_557618_839829468(p0, n0, d0);
}
LA131: ;
}
break;
case ((Tnodekind292020) 38):
{
genobjconstr_554903_839829468(p0, n0, d0);
}
break;
case ((Tnodekind292020) 61):
{
gencast_556537_839829468(p0, n0, d0);
}
break;
case ((Tnodekind292020) 58):
case ((Tnodekind292020) 59):
case ((Tnodekind292020) 60):
{
genconv_556632_839829468(p0, n0, d0);
}
break;
case ((Tnodekind292020) 64):
case ((Tnodekind292020) 63):
{
genaddr_553051_839829468(p0, n0, d0);
}
break;
case ((Tnodekind292020) 42):
{
genbracketexpr_554277_839829468(p0, n0, d0);
}
break;
case ((Tnodekind292020) 47):
case ((Tnodekind292020) 65):
{
genderef_543921_839829468(p0, n0, d0, NIM_FALSE);
}
break;
case ((Tnodekind292020) 45):
{
genrecordfield_553448_839829468(p0, n0, d0);
}
break;
case ((Tnodekind292020) 46):
{
gencheckedrecordfield_554046_839829468(p0, n0, d0);
}
break;
case ((Tnodekind292020) 127):
case ((Tnodekind292020) 112):
{
genblock_546083_839829468(p0, n0, d0);
}
break;
case ((Tnodekind292020) 126):
{
genstmtlistexpr_558402_839829468(p0, n0, d0);
}
break;
case ((Tnodekind292020) 115):
{
{
NI i_559023_839829468;
NI HEX3Atmp_559276_839829468;
NI LOC151;
NI res_559279_839829468;
i_559023_839829468 = (NI)0;
HEX3Atmp_559276_839829468 = (NI)0;
LOC151 = (NI)0;
LOC151 = sonslen_295351_850551059(n0);
HEX3Atmp_559276_839829468 = (NI)(LOC151 - ((NI) 1));
res_559279_839829468 = ((NI) 0);
{
while (1) {
if (!(res_559279_839829468 <= HEX3Atmp_559276_839829468)) goto LA153;
i_559023_839829468 = res_559279_839829468;
genstmts_539244_839829468(p0, (*n0).kindU.S6.sons->data[i_559023_839829468]);
res_559279_839829468 += ((NI) 1);
} LA153: ;
}
}
}
break;
case ((Tnodekind292020) 48):
case ((Tnodekind292020) 92):
{
genif_544982_839829468(p0, n0, d0);
}
break;
case ((Tnodekind292020) 93):
{
expr_539248_839829468(p0, (*(*n0).kindU.S6.sons->data[((NI) 1)]).kindU.S6.sons->data[((NI) 0)], d0);
}
break;
case ((Tnodekind292020) 66):
{
downconv_558581_839829468(p0, n0, d0);
}
break;
case ((Tnodekind292020) 67):
{
upconv_558431_839829468(p0, n0, d0);
}
break;
case ((Tnodekind292020) 68):
{
genrangechck_556590_839829468(p0, n0, d0, ((NimStringDesc*) &T839829468_563));
}
break;
case ((Tnodekind292020) 69):
{
genrangechck_556590_839829468(p0, n0, d0, ((NimStringDesc*) &T839829468_564));
}
break;
case ((Tnodekind292020) 70):
{
genrangechck_556590_839829468(p0, n0, d0, ((NimStringDesc*) &T839829468_565));
}
break;
case ((Tnodekind292020) 71):
{
convstrtocstr_556642_839829468(p0, n0, d0);
}
break;
case ((Tnodekind292020) 72):
{
convcstrtostr_556654_839829468(p0, n0, d0);
}
break;
case ((Tnodekind292020) 51):
case ((Tnodekind292020) 52):
{
Tsym292834* sym0;
sym0 = (*(*n0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym;
genproc_532951_839829468((*p0).module, sym0);
{
NIM_BOOL LOC166;
NimStringDesc* LOC170;
LOC166 = (NIM_BOOL)0;
LOC166 = ((*sym0).loc.r == NIM_NIL);
if (LOC166) goto LA167;
LOC166 = ((*sym0).loc.t == NIM_NIL);
LA167: ;
if (!LOC166) goto LA168;
LOC170 = (NimStringDesc*)0;
LOC170 = rawNewString((*(*sym0).name).s->Sup.len + 20);
appendString(LOC170, ((NimStringDesc*) &T839829468_271));
appendString(LOC170, (*(*sym0).name).s);
internalerror_196100_155036129((*n0).info, LOC170);
}
LA168: ;
putlocintodest_539258_839829468(p0, d0, (*sym0).loc);
}
break;
case ((Tnodekind292020) 155):
{
genclosure_557836_839829468(p0, n0, d0);
}
break;
case ((Tnodekind292020) 1):
{
}
break;
case ((Tnodekind292020) 96):
{
genwhilestmt_545984_839829468(p0, n0);
}
break;
case ((Tnodekind292020) 99):
case ((Tnodekind292020) 100):
{
genvarstmt_544854_839829468(p0, n0);
}
break;
case ((Tnodekind292020) 101):
{
genconststmt_544909_839829468(p0, n0);
}
break;
case ((Tnodekind292020) 94):
{
internalerror_196100_155036129((*n0).info, ((NimStringDesc*) &T839829468_594));
}
break;
case ((Tnodekind292020) 97):
{
gencase_547826_839829468(p0, n0, d0);
}
break;
case ((Tnodekind292020) 109):
{
genreturnstmt_545617_839829468(p0, n0);
}
break;
case ((Tnodekind292020) 110):
{
genbreakstmt_546444_839829468(p0, n0);
}
break;
case ((Tnodekind292020) 73):
{
{
if (!!((((*n0).flags &(1U<<((NU)(((Tnodeflag292427) 14))&15U)))!=0))) goto LA183;
genasgn_549239_839829468(p0, n0, NIM_FALSE);
}
LA183: ;
}
break;
case ((Tnodekind292020) 74):
{
{
if (!!((((*n0).flags &(1U<<((NU)(((Tnodeflag292427) 14))&15U)))!=0))) goto LA188;
genasgn_549239_839829468(p0, n0, !(((*p0).prc == NIM_NIL)));
}
LA188: ;
}
break;
case ((Tnodekind292020) 114):
{
{
Tloc292816 a0;
if (!!(((*(*n0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 1)))) goto LA193;
genlinedir_532823_839829468(p0, n0);
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0));
}
LA193: ;
}
break;
case ((Tnodekind292020) 89):
{
genasmstmt_548659_839829468(p0, n0);
}
break;
case ((Tnodekind292020) 106):
{
{
NIM_BOOL LOC199;
NIM_BOOL LOC200;
LOC199 = (NIM_BOOL)0;
LOC200 = (NIM_BOOL)0;
LOC200 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC200) goto LA201;
LOC200 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA201: ;
LOC199 = LOC200;
if (!(LOC199)) goto LA202;
LOC199 = !(((gglobaloptions_169130_2607990831 &((NU64)1<<((NU)(((Tglobaloption169013) 31))&63U)))!=0));
LA202: ;
if (!LOC199) goto LA203;
gentrycpp_547865_839829468(p0, n0, d0);
}
goto LA197;
LA203: ;
{
gentry_548114_839829468(p0, n0, d0);
}
LA197: ;
}
break;
case ((Tnodekind292020) 108):
{
genraisestmt_546828_839829468(p0, n0);
}
break;
case ((Tnodekind292020) 98):
{
gentypesection_538184_839829468((*p0).module, n0);
}
break;
case ((Tnodekind292020) 125):
case ((Tnodekind292020) 84):
case ((Tnodekind292020) 121):
case ((Tnodekind292020) 116):
case ((Tnodekind292020) 117):
case ((Tnodekind292020) 118):
case ((Tnodekind292020) 119):
case ((Tnodekind292020) 120):
case ((Tnodekind292020) 83):
case ((Tnodekind292020) 82):
{
}
break;
case ((Tnodekind292020) 90):
{
genpragma_549039_839829468(p0, n0);
}
break;
case ((Tnodekind292020) 91):
{
Tnode292802* LOC211;
LOC211 = (Tnode292802*)0;
LOC211 = lastson_295364_850551059(n0);
expr_539248_839829468(p0, LOC211, d0);
}
break;
case ((Tnodekind292020) 79):
case ((Tnodekind292020) 80):
case ((Tnodekind292020) 81):
{
{
Tsym292834* prc0;
if (!((*(*n0).kindU.S6.sons->data[((NI) 2)]).kind == ((Tnodekind292020) 1))) goto LA215;
prc0 = (*(*n0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym;
{
NIM_BOOL LOC219;
Tsym292834* LOC220;
LOC219 = (NIM_BOOL)0;
LOC220 = (Tsym292834*)0;
LOC220 = skipgenericowner_297279_850551059(prc0);
LOC219 = ((*LOC220).kind == ((Tsymkind292435) 6));
if (!(LOC219)) goto LA221;
LOC219 = !((((*prc0).flags &(1U<<((NU)(((Tsymflag292184) 23))&31U)))!=0));
LA221: ;
if (!LOC219) goto LA222;
{
NIM_BOOL LOC226;
NIM_BOOL LOC227;
NIM_BOOL LOC228;
NIM_BOOL LOC229;
Tsym292834* LOC231;
NIM_BOOL LOC234;
LOC226 = (NIM_BOOL)0;
LOC227 = (NIM_BOOL)0;
LOC228 = (NIM_BOOL)0;
LOC229 = (NIM_BOOL)0;
LOC229 = !(((gglobaloptions_169130_2607990831 &((NU64)1<<((NU)(((Tglobaloption169013) 2))&63U)))!=0));
if (!(LOC229)) goto LA230;
LOC231 = (Tsym292834*)0;
LOC231 = getmodule_299123_2984716966(prc0);
LOC229 = !((((*LOC231).flags &(1U<<((NU)(((Tsymflag292184) 25))&31U)))!=0));
LA230: ;
LOC228 = LOC229;
if (LOC228) goto LA232;
LOC228 = ((65600 & (*prc0).flags) == 64);
LA232: ;
LOC227 = LOC228;
if (LOC227) goto LA233;
LOC234 = (NIM_BOOL)0;
LOC234 = (((*prc0).flags &(1U<<((NU)(((Tsymflag292184) 6))&31U)))!=0);
if (!(LOC234)) goto LA235;
LOC234 = (((*prc0).loc.flags &(1U<<((NU)(((Tlocflag292810) 5))&15U)))!=0);
LA235: ;
LOC227 = LOC234;
LA233: ;
LOC226 = LOC227;
if (LOC226) goto LA236;
LOC226 = ((*prc0).kind == ((Tsymkind292435) 13));
LA236: ;
if (!LOC226) goto LA237;
{
NIM_BOOL LOC241;
Tnode292802* LOC242;
LOC241 = (NIM_BOOL)0;
LOC242 = (Tnode292802*)0;
LOC242 = getbody_335227_1724185294(prc0);
LOC241 = !(((*LOC242).kind == ((Tnodekind292020) 1)));
if (LOC241) goto LA243;
LOC241 = (((*prc0).loc.flags &(1U<<((NU)(((Tlocflag292810) 4))&15U)))!=0);
LA243: ;
if (!LOC241) goto LA244;
genproc_532951_839829468((*p0).module, prc0);
}
LA244: ;
}
LA237: ;
}
LA222: ;
}
LA215: ;
}
break;
case ((Tnodekind292020) 95):
{
genparforstmt_546208_839829468(p0, n0);
}
break;
case ((Tnodekind292020) 157):
{
genstate_544117_839829468(p0, n0);
}
break;
case ((Tnodekind292020) 156):
{
gengotostate_544144_839829468(p0, n0);
}
break;
case ((Tnodekind292020) 158):
{
genbreakstate_544229_839829468(p0, n0);
}
break;
default:
{
NimStringDesc* LOC251;
LOC251 = (NimStringDesc*)0;
LOC251 = rawNewString(reprEnum((NI)(*n0).kind, (&NTI292020))->Sup.len + 25);
appendString(LOC251, ((NimStringDesc*) &T839829468_291));
appendString(LOC251, reprEnum((NI)(*n0).kind, (&NTI292020)));
appendString(LOC251, ((NimStringDesc*) &T839829468_657));
internalerror_196100_155036129((*n0).info, LOC251);
}
break;
}
}
N_NIMCALL(void, genstmts_539244_839829468)(Tcproc529021* p0, Tnode292802* t0) {
Tloc292816 a0;
memset((void*)(&a0), 0, sizeof(a0));
expr_539248_839829468(p0, t0, (&a0));
{
NimStringDesc* LOC5;
if (!!(((7 &(1U<<((NU)(a0.k)&15U)))!=0))) goto LA3;
LOC5 = (NimStringDesc*)0;
LOC5 = HEX24_196185_1689653243(T839829468_658);
internalerror_196113_155036129(LOC5);
}
LA3: ;
}
N_NIMCALL(Tnode292802*, myprocess_563402_839829468)(Tpasscontext341002* b0, Tnode292802* n0) {
Tnode292802* result0;
Tcgen529027* m0;
{ result0 = (Tnode292802*)0;
result0 = n0;
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = (b0 == NIM_NIL);
if (LOC3) goto LA4;
LOC3 = skipcodegen_341085_2355241294(n0);
LA4: ;
if (!LOC3) goto LA5;
goto BeforeRet;
}
LA5: ;
m0 = ((Tcgen529027*) (b0));
(*(*m0).initproc).options = initprocoptions_562635_839829468(m0);
genstmts_539244_839829468((*m0).initproc, n0);
}BeforeRet: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, getsomeinitname_561904_839829468)(Tsym292834* m0, NimStringDesc* suffix0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
{
NimStringDesc* LOC5;
if (!((12288 & (*m0).flags) == 0)) goto LA3;
LOC5 = (NimStringDesc*)0;
LOC5 = mangle_528847_2036603609((*(*(*m0).owner).name).s);
result0 = rope_178277_2381377266(LOC5);
add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_12));
}
LA3: ;
add_178487_2381377266(&result0, (*(*m0).name).s);
add_178487_2381377266(&result0, suffix0);
return result0;
}
N_NIMCALL(Ropeobj178006*, getinitname_562235_839829468)(Tsym292834* m0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
result0 = getsomeinitname_561904_839829468(m0, ((NimStringDesc*) &T839829468_659));
return result0;
}
N_NIMCALL(Ropeobj178006*, getdatinitname_562239_839829468)(Tsym292834* m0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
result0 = getsomeinitname_561904_839829468(m0, ((NimStringDesc*) &T839829468_660));
return result0;
}
N_NIMCALL(void, registermoduletomain_562243_839829468)(Tsym292834* m0) {
Ropeobj178006* init0;
Ropeobj178006* datinit0;
TY178507 LOC1;
TY178507 LOC2;
init0 = getinitname_562235_839829468(m0);
datinit0 = getdatinitname_562239_839829468(m0);
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = init0;
addf_179205_2381377266(&mainmodprocs_529148_3723162438, ((NimStringDesc*) &T839829468_661), LOC1, 1);
memset((void*)LOC2, 0, sizeof(LOC2));
LOC2[0] = datinit0;
addf_179205_2381377266(&mainmodprocs_529148_3723162438, ((NimStringDesc*) &T839829468_661), LOC2, 1);
{
TY178507 LOC7;
Ropeobj178006* initcall0;
TY178507 LOC8;
if (!!((((*m0).flags &(1U<<((NU)(((Tsymflag292184) 13))&31U)))!=0))) goto LA5;
memset((void*)LOC7, 0, sizeof(LOC7));
LOC7[0] = datinit0;
addf_179205_2381377266(&maindatinit_529151_3723162438, ((NimStringDesc*) &T839829468_662), LOC7, 1);
memset((void*)LOC8, 0, sizeof(LOC8));
LOC8[0] = init0;
initcall0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_662), LOC8, 1);
{
if (!(((*m0).flags &(1U<<((NU)(((Tsymflag292184) 12))&31U)))!=0)) goto LA11;
add_178482_2381377266(&mainmodinit_529149_3723162438, initcall0);
}
goto LA9;
LA11: ;
{
add_178482_2381377266(&othermodsinit_529150_3723162438, initcall0);
}
LA9: ;
}
LA5: ;
}
N_NIMCALL(Ropeobj178006*, genfilenames_561688_839829468)(Tcgen529027* m0) {
Ropeobj178006* result0;
Ropeobj178006* LOC1;
result0 = (Ropeobj178006*)0;
LOC1 = (Ropeobj178006*)0;
LOC1 = cgsym_532403_839829468(m0, ((NimStringDesc*) &T839829468_673));
result0 = NIM_NIL;
{
NI i_561717_839829468;
NI HEX3Atmp_561722_839829468;
NI res_561725_839829468;
i_561717_839829468 = (NI)0;
HEX3Atmp_561722_839829468 = (NI)0;
HEX3Atmp_561722_839829468 = ((fileinfos_191629_155036129 ? fileinfos_191629_155036129->Sup.len : 0) - 1);
res_561725_839829468 = ((NI) 0);
{
while (1) {
TY178507 LOC5;
if (!(res_561725_839829468 <= HEX3Atmp_561722_839829468)) goto LA4;
i_561717_839829468 = res_561725_839829468;
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = makecstring_191638_155036129(fileinfos_191629_155036129->data[i_561717_839829468].projpath);
addf_179205_2381377266(&result0, ((NimStringDesc*) &T839829468_674), LOC5, 1);
res_561725_839829468 += ((NI) 1);
} LA4: ;
}
}
return result0;
}
N_NIMCALL(void, genmainproc_561729_839829468)(Tcgen529027* m0) {
NimStringDesc* nimmain0;
NimStringDesc* othermain0;
Ropeobj178006* initstackbottomcall0;
TY536475 LOC38;
TY535238 LOC47;
nimmain0 = (NimStringDesc*)0;
othermain0 = (NimStringDesc*)0;
{
NIM_BOOL LOC3;
NIM_BOOL LOC12;
LOC3 = (NIM_BOOL)0;
LOC3 = (targetos_176629_4151366050 == ((Tsystemos176004) 2));
if (!(LOC3)) goto LA4;
LOC3 = !(((gglobaloptions_169130_2607990831 & 1280) == 0));
LA4: ;
if (!LOC3) goto LA5;
{
if (!((gglobaloptions_169130_2607990831 &((NU64)1<<((NU)(((Tglobaloption169013) 10))&63U)))!=0)) goto LA9;
nimmain0 = copyString(((NimStringDesc*) &T839829468_663));
othermain0 = copyString(((NimStringDesc*) &T839829468_664));
}
goto LA7;
LA9: ;
{
nimmain0 = copyString(((NimStringDesc*) &T839829468_665));
othermain0 = copyString(((NimStringDesc*) &T839829468_666));
}
LA7: ;
LOC12 = (NIM_BOOL)0;
LOC12 = includestr_147249_3771138726((&(*m0).headerfiles), ((NimStringDesc*) &T839829468_667));
}
goto LA1;
LA5: ;
{
if (!((gglobaloptions_169130_2607990831 &((NU64)1<<((NU)(((Tglobaloption169013) 8))&63U)))!=0)) goto LA14;
nimmain0 = copyString(((NimStringDesc*) &T839829468_665));
othermain0 = copyString(((NimStringDesc*) &T839829468_668));
}
goto LA1;
LA14: ;
{
if (!(targetos_176629_4151366050 == ((Tsystemos176004) 24))) goto LA17;
nimmain0 = copyString(((NimStringDesc*) &T839829468_669));
othermain0 = copyString(((NimStringDesc*) &T839829468_670));
}
goto LA1;
LA17: ;
{
nimmain0 = copyString(((NimStringDesc*) &T839829468_669));
othermain0 = copyString(((NimStringDesc*) &T839829468_671));
}
LA1: ;
{
Ropeobj178006* LOC24;
if (!!((gbreakpoints_548861_839829468 == NIM_NIL))) goto LA22;
LOC24 = (Ropeobj178006*)0;
LOC24 = cgsym_532403_839829468(m0, ((NimStringDesc*) &T839829468_672));
}
LA22: ;
{
Ropeobj178006* LOC29;
if (!((goptions_169128_2607990831 &(1U<<((NU)(((Toption169009) 17))&31U)))!=0)) goto LA27;
LOC29 = (Ropeobj178006*)0;
LOC29 = genfilenames_561688_839829468(m0);
add_178482_2381377266(&gbreakpoints_548861_839829468, LOC29);
}
LA27: ;
{
NIM_BOOL LOC32;
LOC32 = (NIM_BOOL)0;
LOC32 = (targetos_176629_4151366050 == ((Tsystemos176004) 24));
if (LOC32) goto LA33;
LOC32 = (gselectedgc_169133_2607990831 == ((Tgcmode169080) 0));
LA33: ;
if (!LOC32) goto LA34;
initstackbottomcall0 = rope_178277_2381377266(((NimStringDesc*) &T839829468_490));
}
goto LA30;
LA34: ;
{
TY533289 LOC37;
memset((void*)LOC37, 0, sizeof(LOC37));
initstackbottomcall0 = ropecg_532407_839829468(m0, ((NimStringDesc*) &T839829468_675), LOC37, 0);
}
LA30: ;
(*m0).labels += ((NI) 1);
memset((void*)LOC38, 0, sizeof(LOC38));
LOC38[0] = maindatinit_529151_3723162438;
LOC38[1] = gbreakpoints_548861_839829468;
LOC38[2] = othermodsinit_529150_3723162438;
{
NIM_BOOL LOC41;
TY533289 LOC45;
LOC41 = (NIM_BOOL)0;
LOC41 = emulatedthreadvars_532949_839829468();
if (!(LOC41)) goto LA42;
LOC41 = !((targetos_176629_4151366050 == ((Tsystemos176004) 24)));
LA42: ;
if (!LOC41) goto LA43;
memset((void*)LOC45, 0, sizeof(LOC45));
LOC38[3] = ropecg_532407_839829468(m0, ((NimStringDesc*) &T839829468_677), LOC45, 0);
}
goto LA39;
LA43: ;
{
LOC38[3] = rope_178277_2381377266(((NimStringDesc*) &T839829468_490));
}
LA39: ;
LOC38[4] = initstackbottomcall0;
appcg_532632_839829468(m0, &(*m0).s[(((Tcfilesection529005) 10))- 0], ((NimStringDesc*) &T839829468_676), LOC38, 5);
memset((void*)LOC47, 0, sizeof(LOC47));
LOC47[0] = mainmodinit_529149_3723162438;
LOC47[1] = initstackbottomcall0;
LOC47[2] = rope_178401_2381377266(((NI64) ((*m0).labels)));
appcg_532632_839829468(m0, &(*m0).s[(((Tcfilesection529005) 10))- 0], nimmain0, LOC47, 3);
{
TY533289 LOC52;
if (!!(((gglobaloptions_169130_2607990831 &((NU64)1<<((NU)(((Tglobaloption169013) 20))&63U)))!=0))) goto LA50;
memset((void*)LOC52, 0, sizeof(LOC52));
appcg_532632_839829468(m0, &(*m0).s[(((Tcfilesection529005) 10))- 0], othermain0, LOC52, 0);
}
LA50: ;
}
N_NIMCALL(Tnode292802*, myclose_563830_839829468)(Tpasscontext341002* b0, Tnode292802* n0) {
Tnode292802* result0;
Tcgen529027* m0;
{ result0 = (Tnode292802*)0;
result0 = n0;
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = (b0 == NIM_NIL);
if (LOC3) goto LA4;
LOC3 = skipcodegen_341085_2355241294(n0);
LA4: ;
if (!LOC3) goto LA5;
goto BeforeRet;
}
LA5: ;
m0 = ((Tcgen529027*) (b0));
{
if (!!((n0 == NIM_NIL))) goto LA9;
(*(*m0).initproc).options = initprocoptions_562635_839829468(m0);
genstmts_539244_839829468((*m0).initproc, n0);
}
LA9: ;
registermoduletomain_562243_839829468((*m0).module);
{
Tnode292802* disp0;
if (!(((*(*m0).module).flags &(1U<<((NU)(((Tsymflag292184) 12))&31U)))!=0)) goto LA13;
(*m0).flags |= ((NU8)1)<<((((Codegenflag529025) 5))%(sizeof(NU8)*8));
disp0 = generatemethoddispatchers_432151_3853300031();
{
NI i_563891_839829468;
NI HEX3Atmp_563895_839829468;
NI LOC16;
NI res_563898_839829468;
i_563891_839829468 = (NI)0;
HEX3Atmp_563895_839829468 = (NI)0;
LOC16 = (NI)0;
LOC16 = sonslen_295351_850551059(disp0);
HEX3Atmp_563895_839829468 = (NI)(LOC16 - ((NI) 1));
res_563898_839829468 = ((NI) 0);
{
while (1) {
if (!(res_563898_839829468 <= HEX3Atmp_563895_839829468)) goto LA18;
i_563891_839829468 = res_563898_839829468;
genprocaux_560284_839829468(m0, (*(*disp0).kindU.S6.sons->data[i_563891_839829468]).kindU.S4.sym);
res_563898_839829468 += ((NI) 1);
} LA18: ;
}
}
genmainproc_561729_839829468(m0);
}
LA13: ;
}BeforeRet: ;
return result0;
}
N_NIMCALL(void, finishmodule_563420_839829468)(Tcgen529027* m0) {
NI i0;
i0 = ((NI) 0);
{
while (1) {
Tsym292834* prc0;
if (!(i0 <= ((*m0).forwardedprocs ? ((*m0).forwardedprocs->Sup.len-1) : -1))) goto LA2;
prc0 = (*m0).forwardedprocs->data[i0];
{
NimStringDesc* LOC7;
if (!(((*prc0).flags &(1U<<((NU)(((Tsymflag292184) 4))&31U)))!=0)) goto LA5;
LOC7 = (NimStringDesc*)0;
LOC7 = rawNewString((*(*prc0).name).s->Sup.len + 17);
appendString(LOC7, ((NimStringDesc*) &T839829468_678));
appendString(LOC7, (*(*prc0).name).s);
internalerror_196100_155036129((*prc0).info, LOC7);
}
LA5: ;
genprocnoforward_560906_839829468(m0, prc0);
i0 += ((NI) 1);
} LA2: ;
}
gforwardedprocscounter_529171_3723162438 -= i0;
(*m0).forwardedprocs = (Tsymseq292804*) setLengthSeq(&((*m0).forwardedprocs)->Sup, sizeof(Tsym292834*), ((NI) 0));
}
N_NIMCALL(void, geninitcode_562286_839829468)(Tcgen529027* m0) {
Ropeobj178006* initname0;
Ropeobj178006* prc0;
TY178507 LOC1;
Ropeobj178006* LOC12;
Ropeobj178006* LOC13;
Ropeobj178006** LOC14;
Ropeobj178006** LOC15;
Ropeobj178006** LOC16;
Ropeobj178006* LOC17;
Ropeobj178006* LOC33;
Ropeobj178006** LOC34;
Ropeobj178006** LOC35;
Ropeobj178006** LOC36;
Ropeobj178006* LOC37;
Ropeobj178006* LOC38;
Ropeobj178006** LOC39;
Ropeobj178006** LOC40;
Ropeobj178006** LOC41;
Ropeobj178006* LOC42;
Ropeobj178006* LOC50;
TY533289 LOC51;
TY178507 LOC52;
TY533289 LOC58;
initname0 = getinitname_562235_839829468((*m0).module);
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = initname0;
prc0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_679), LOC1, 1);
{
TY532811 LOC6;
if (!(((NI) 0) < (*m0).typenodes)) goto LA4;
memset((void*)LOC6, 0, sizeof(LOC6));
LOC6[0] = (*m0).typenodesname;
LOC6[1] = rope_178401_2381377266(((NI64) ((*m0).typenodes)));
appcg_532632_839829468(m0, &(*m0).s[(((Tcfilesection529005) 12))- 0], ((NimStringDesc*) &T839829468_680), LOC6, 2);
}
LA4: ;
{
TY532811 LOC11;
if (!(((NI) 0) < (*m0).nimtypes)) goto LA9;
memset((void*)LOC11, 0, sizeof(LOC11));
LOC11[0] = (*m0).nimtypesname;
LOC11[1] = rope_178401_2381377266(((NI64) ((*m0).nimtypes)));
appcg_532632_839829468(m0, &(*m0).s[(((Tcfilesection529005) 12))- 0], ((NimStringDesc*) &T839829468_681), LOC11, 2);
}
LA9: ;
LOC12 = (Ropeobj178006*)0;
LOC12 = initgcframe_538435_839829468((*m0).initproc);
add_178482_2381377266(&prc0, LOC12);
LOC13 = (Ropeobj178006*)0;
LOC13 = gensectionstart_530081_2760143328(((Tcprocsection529011) 0));
add_178482_2381377266(&prc0, LOC13);
LOC14 = (Ropeobj178006**)0;
LOC14 = s_529179_3723162438((*m0).preinitproc, ((Tcprocsection529011) 0));
add_178482_2381377266(&prc0, (*LOC14));
LOC15 = (Ropeobj178006**)0;
LOC15 = s_529179_3723162438((*m0).initproc, ((Tcprocsection529011) 0));
add_178482_2381377266(&prc0, (*LOC15));
LOC16 = (Ropeobj178006**)0;
LOC16 = s_529179_3723162438((*m0).postinitproc, ((Tcprocsection529011) 0));
add_178482_2381377266(&prc0, (*LOC16));
LOC17 = (Ropeobj178006*)0;
LOC17 = gensectionend_530116_2760143328(((Tcprocsection529011) 0));
add_178482_2381377266(&prc0, LOC17);
{
NIM_BOOL LOC20;
LOC20 = (NIM_BOOL)0;
LOC20 = (((*(*m0).initproc).options &(1U<<((NU)(((Toption169009) 15))&31U)))!=0);
if (!(LOC20)) goto LA21;
LOC20 = !((((*m0).flags &(1U<<((NU)(((Codegenflag529025) 2))&7U)))!=0));
LA21: ;
if (!LOC20) goto LA22;
(*m0).flags |= ((NU8)1)<<((((Codegenflag529025) 2))%(sizeof(NU8)*8));
{
Ropeobj178006* procname0;
Ropeobj178006* LOC28;
Ropeobj178006* LOC29;
if (!!((((*m0).flags &(1U<<((NU)(((Codegenflag529025) 0))&7U)))!=0))) goto LA26;
procname0 = makecstring_191638_155036129((*(*(*m0).module).name).s);
LOC28 = (Ropeobj178006*)0;
LOC28 = quotedfilename_196818_155036129((*(*m0).module).info);
LOC29 = (Ropeobj178006*)0;
LOC29 = initframe_560140_839829468((*m0).initproc, procname0, LOC28);
add_178482_2381377266(&prc0, LOC29);
}
goto LA24;
LA26: ;
{
TY533289 LOC31;
Ropeobj178006* LOC32;
memset((void*)LOC31, 0, sizeof(LOC31));
LOC32 = (Ropeobj178006*)0;
LOC32 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_682), LOC31, 0);
add_178482_2381377266(&prc0, LOC32);
}
LA24: ;
}
LA22: ;
LOC33 = (Ropeobj178006*)0;
LOC33 = gensectionstart_530081_2760143328(((Tcprocsection529011) 1));
add_178482_2381377266(&prc0, LOC33);
LOC34 = (Ropeobj178006**)0;
LOC34 = s_529179_3723162438((*m0).preinitproc, ((Tcprocsection529011) 1));
add_178482_2381377266(&prc0, (*LOC34));
LOC35 = (Ropeobj178006**)0;
LOC35 = s_529179_3723162438((*m0).initproc, ((Tcprocsection529011) 1));
add_178482_2381377266(&prc0, (*LOC35));
LOC36 = (Ropeobj178006**)0;
LOC36 = s_529179_3723162438((*m0).postinitproc, ((Tcprocsection529011) 1));
add_178482_2381377266(&prc0, (*LOC36));
LOC37 = (Ropeobj178006*)0;
LOC37 = gensectionend_530116_2760143328(((Tcprocsection529011) 1));
add_178482_2381377266(&prc0, LOC37);
LOC38 = (Ropeobj178006*)0;
LOC38 = gensectionstart_530081_2760143328(((Tcprocsection529011) 2));
add_178482_2381377266(&prc0, LOC38);
LOC39 = (Ropeobj178006**)0;
LOC39 = s_529179_3723162438((*m0).preinitproc, ((Tcprocsection529011) 2));
add_178482_2381377266(&prc0, (*LOC39));
LOC40 = (Ropeobj178006**)0;
LOC40 = s_529179_3723162438((*m0).initproc, ((Tcprocsection529011) 2));
add_178482_2381377266(&prc0, (*LOC40));
LOC41 = (Ropeobj178006**)0;
LOC41 = s_529179_3723162438((*m0).postinitproc, ((Tcprocsection529011) 2));
add_178482_2381377266(&prc0, (*LOC41));
LOC42 = (Ropeobj178006*)0;
LOC42 = gensectionend_530116_2760143328(((Tcprocsection529011) 2));
add_178482_2381377266(&prc0, LOC42);
{
NIM_BOOL LOC45;
Ropeobj178006* LOC49;
LOC45 = (NIM_BOOL)0;
LOC45 = (((*(*m0).initproc).options &(1U<<((NU)(((Toption169009) 15))&31U)))!=0);
if (!(LOC45)) goto LA46;
LOC45 = !((((*m0).flags &(1U<<((NU)(((Codegenflag529025) 0))&7U)))!=0));
LA46: ;
if (!LOC45) goto LA47;
LOC49 = (Ropeobj178006*)0;
LOC49 = deinitframe_560150_839829468((*m0).initproc);
add_178482_2381377266(&prc0, LOC49);
}
LA47: ;
LOC50 = (Ropeobj178006*)0;
LOC50 = deinitgcframe_538441_839829468((*m0).initproc);
add_178482_2381377266(&prc0, LOC50);
memset((void*)LOC51, 0, sizeof(LOC51));
addf_179205_2381377266(&prc0, ((NimStringDesc*) &T839829468_683), LOC51, 0);
memset((void*)LOC52, 0, sizeof(LOC52));
LOC52[0] = getdatinitname_562239_839829468((*m0).module);
addf_179205_2381377266(&prc0, ((NimStringDesc*) &T839829468_679), LOC52, 1);
{
Tcfilesection529005 i_562401_839829468;
NI res_562482_839829468;
i_562401_839829468 = (Tcfilesection529005)0;
res_562482_839829468 = ((NI) 12);
{
while (1) {
Ropeobj178006* LOC56;
Ropeobj178006* LOC57;
if (!(res_562482_839829468 <= ((NI) 16))) goto LA55;
i_562401_839829468 = ((Tcfilesection529005) (res_562482_839829468));
LOC56 = (Ropeobj178006*)0;
LOC56 = gensectionstart_530015_2760143328(i_562401_839829468);
add_178482_2381377266(&prc0, LOC56);
add_178482_2381377266(&prc0, (*m0).s[(i_562401_839829468)- 0]);
LOC57 = (Ropeobj178006*)0;
LOC57 = gensectionend_530050_2760143328(i_562401_839829468);
add_178482_2381377266(&prc0, LOC57);
res_562482_839829468 += ((NI) 1);
} LA55: ;
}
}
memset((void*)LOC58, 0, sizeof(LOC58));
addf_179205_2381377266(&prc0, ((NimStringDesc*) &T839829468_683), LOC58, 0);
add_178482_2381377266(&(*m0).s[(((Tcfilesection529005) 11))- 0], prc0);
{
NIM_CHAR i_562442_839829468;
Ropeobj178006* el_562443_839829468;
TY529136 HEX3Atmp_562487_839829468;
NIM_CHAR i_562490_839829468;
i_562442_839829468 = (NIM_CHAR)0;
el_562443_839829468 = (Ropeobj178006*)0;
memset((void*)HEX3Atmp_562487_839829468, 0, sizeof(HEX3Atmp_562487_839829468));
memcpy((void*)HEX3Atmp_562487_839829468, (NIM_CONST void*)(*m0).extensionloaders, sizeof(HEX3Atmp_562487_839829468));
i_562490_839829468 = 48;
{
if (!((NU8)(((NIM_CHAR) (((NU8)(i_562490_839829468))))) <= (NU8)(57))) goto LA62;
{
while (1) {
i_562442_839829468 = i_562490_839829468;
el_562443_839829468 = HEX3Atmp_562487_839829468[(((NU8)(i_562490_839829468)))- 48];
{
Ropeobj178006* ex0;
TY532811 LOC70;
if (!!((el_562443_839829468 == NIM_NIL))) goto LA68;
memset((void*)LOC70, 0, sizeof(LOC70));
LOC70[0] = rope_178401_2381377266(((NI64) ((NI)(((NI) (((NU8)(i_562442_839829468)))) - ((NI) 48)))));
LOC70[1] = el_562443_839829468;
ex0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_684), LOC70, 2);
add_178482_2381377266(&(*m0).s[(((Tcfilesection529005) 11))- 0], ex0);
}
LA68: ;
{
if (!((NU8)(57) <= (NU8)(((NIM_CHAR) (((NU8)(i_562490_839829468))))))) goto LA73;
goto LA64;
}
LA73: ;
i_562490_839829468 += ((NI) 1);
}
} LA64: ;
}
LA62: ;
}
}
N_NIMCALL(void, finishtypedescriptions_535842_839829468)(Tcgen529027* m0) {
NI i0;
i0 = ((NI) 0);
{
while (1) {
Ropeobj178006* LOC3;
if (!(i0 < ((*m0).typestack ? (*m0).typestack->Sup.len : 0))) goto LA2;
LOC3 = (Ropeobj178006*)0;
LOC3 = gettypedesc_535671_839829468(m0, (*m0).typestack->data[i0]);
i0 += ((NI) 1);
} LA2: ;
}
}
N_NIMCALL(Ropeobj178006*, getcopyright_561665_839829468)(NimStringDesc* cfile0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
{
TY178507 LOC5;
if (!((gglobaloptions_169130_2607990831 &((NU64)1<<((NU)(((Tglobaloption169013) 4))&63U)))!=0)) goto LA3;
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = rope_178277_2381377266(((NimStringDesc*) &T839829468_686));
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_685), LOC5, 1);
}
goto LA1;
LA3: ;
{
TY536475 LOC7;
NimStringDesc* LOC8;
memset((void*)LOC7, 0, sizeof(LOC7));
LOC7[0] = rope_178277_2381377266(((NimStringDesc*) &T839829468_686));
LOC7[1] = rope_178277_2381377266(Os_176068_4151366050[(targetos_176629_4151366050)- 1].Field0);
LOC7[2] = rope_178277_2381377266(Cpu_176496_4151366050[(targetcpu_176627_4151366050)- 1].Field0);
LOC7[3] = rope_178277_2381377266(Cc_273413_2528170400[(ccompiler_273431_2528170400)- 1].Field0);
LOC8 = (NimStringDesc*)0;
LOC8 = getcompilecfilecmd_274284_2528170400(cfile0, NIM_FALSE);
LOC7[4] = rope_178277_2381377266(LOC8);
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_687), LOC7, 5);
}
LA1: ;
return result0;
}
static N_INLINE(void, addinttypes_561659_839829468)(Ropeobj178006** result0) {
NimStringDesc* LOC1;
TY178507 LOC2;
LOC1 = (NimStringDesc*)0;
LOC1 = rawNewString(tnl_176644_4151366050->Sup.len + 22);
appendString(LOC1, ((NimStringDesc*) &T839829468_688));
appendString(LOC1, tnl_176644_4151366050);
memset((void*)LOC2, 0, sizeof(LOC2));
LOC2[0] = rope_178401_2381377266(((NI64) (Cpu_176496_4151366050[(targetcpu_176627_4151366050)- 1].Field1)));
addf_179205_2381377266(result0, LOC1, LOC2, 1);
}
N_NIMCALL(Ropeobj178006*, getfileheader_561683_839829468)(NimStringDesc* cfile0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
result0 = getcopyright_561665_839829468(cfile0);
addinttypes_561659_839829468(&result0);
return result0;
}
N_NIMCALL(void, generatethreadlocalstorage_538717_839829468)(Tcgen529027* m0) {
{
NIM_BOOL LOC3;
NIM_BOOL LOC5;
TY178507 LOC13;
LOC3 = (NIM_BOOL)0;
LOC3 = !((nimtv_538656_839829468 == NIM_NIL));
if (!(LOC3)) goto LA4;
LOC5 = (NIM_BOOL)0;
LOC5 = (((*m0).flags &(1U<<((NU)(((Codegenflag529025) 1))&7U)))!=0);
if (LOC5) goto LA6;
LOC5 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag292184) 12))&31U)))!=0);
LA6: ;
LOC3 = LOC5;
LA4: ;
if (!LOC3) goto LA7;
{
Ttype292840* t_538761_839829468;
NI i_538768_839829468;
NI L_538770_839829468;
t_538761_839829468 = (Ttype292840*)0;
i_538768_839829468 = ((NI) 0);
L_538770_839829468 = (nimtvdeps_538674_839829468 ? nimtvdeps_538674_839829468->Sup.len : 0);
{
while (1) {
Ropeobj178006* LOC12;
if (!(i_538768_839829468 < L_538770_839829468)) goto LA11;
t_538761_839829468 = nimtvdeps_538674_839829468->data[i_538768_839829468];
LOC12 = (Ropeobj178006*)0;
LOC12 = gettypedesc_535671_839829468(m0, t_538761_839829468);
i_538768_839829468 += ((NI) 1);
} LA11: ;
}
}
memset((void*)LOC13, 0, sizeof(LOC13));
LOC13[0] = nimtv_538656_839829468;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 4))- 0], ((NimStringDesc*) &T839829468_689), LOC13, 1);
}
LA7: ;
}
N_NIMCALL(void, generateheaders_560104_839829468)(Tcgen529027* m0) {
NimStringDesc* LOC1;
Tstrentry147009* it0;
LOC1 = (NimStringDesc*)0;
LOC1 = rawNewString(tnl_176644_4151366050->Sup.len + tnl_176644_4151366050->Sup.len + 20);
appendString(LOC1, tnl_176644_4151366050);
appendString(LOC1, ((NimStringDesc*) &T839829468_690));
appendString(LOC1, tnl_176644_4151366050);
add_178487_2381377266(&(*m0).s[(((Tcfilesection529005) 1))- 0], LOC1);
it0 = ((Tstrentry147009*) ((*m0).headerfiles.head));
{
while (1) {
if (!!((it0 == NIM_NIL))) goto LA3;
{
NimStringDesc* LOC8;
NimStringDesc* LOC9;
Ropeobj178006* LOC10;
if (!((NU8)((*it0).data->data[((NI) 0)]) == (NU8)(35))) goto LA6;
LOC8 = (NimStringDesc*)0;
LOC9 = (NimStringDesc*)0;
LOC9 = nsuReplaceChar((*it0).data, 96, 34);
LOC8 = rawNewString(LOC9->Sup.len + tnl_176644_4151366050->Sup.len + 0);
appendString(LOC8, LOC9);
appendString(LOC8, tnl_176644_4151366050);
LOC10 = (Ropeobj178006*)0;
LOC10 = rope_178277_2381377266(LOC8);
add_178482_2381377266(&(*m0).s[(((Tcfilesection529005) 1))- 0], LOC10);
}
goto LA4;
LA6: ;
{
TY178507 LOC14;
if (!!((((NU8)((*it0).data->data[((NI) 0)])) == ((NU8)(34)) || ((NU8)((*it0).data->data[((NI) 0)])) == ((NU8)(60))))) goto LA12;
memset((void*)LOC14, 0, sizeof(LOC14));
LOC14[0] = rope_178277_2381377266((*it0).data);
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 1))- 0], ((NimStringDesc*) &T839829468_691), LOC14, 1);
}
goto LA4;
LA12: ;
{
TY178507 LOC16;
memset((void*)LOC16, 0, sizeof(LOC16));
LOC16[0] = rope_178277_2381377266((*it0).data);
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 1))- 0], ((NimStringDesc*) &T839829468_692), LOC16, 1);
}
LA4: ;
it0 = ((Tstrentry147009*) ((*it0).Sup.next));
} LA3: ;
}
}
N_NIMCALL(Ropeobj178006*, genmodule_562491_839829468)(Tcgen529027* m0, NimStringDesc* cfile0) {
Ropeobj178006* result0;
Ropeobj178006* LOC1;
result0 = (Ropeobj178006*)0;
result0 = getfileheader_561683_839829468(cfile0);
LOC1 = (Ropeobj178006*)0;
LOC1 = genmergeinfo_530203_2760143328(m0);
add_178482_2381377266(&result0, LOC1);
generatethreadlocalstorage_538717_839829468(m0);
generateheaders_560104_839829468(m0);
{
Tcfilesection529005 i_562614_839829468;
NI res_562622_839829468;
i_562614_839829468 = (Tcfilesection529005)0;
res_562622_839829468 = ((NI) 1);
{
while (1) {
Ropeobj178006* LOC5;
Ropeobj178006* LOC6;
if (!(res_562622_839829468 <= ((NI) 10))) goto LA4;
i_562614_839829468 = ((Tcfilesection529005) (res_562622_839829468));
LOC5 = (Ropeobj178006*)0;
LOC5 = gensectionstart_530015_2760143328(i_562614_839829468);
add_178482_2381377266(&result0, LOC5);
add_178482_2381377266(&result0, (*m0).s[(i_562614_839829468)- 0]);
LOC6 = (Ropeobj178006*)0;
LOC6 = gensectionend_530050_2760143328(i_562614_839829468);
add_178482_2381377266(&result0, LOC6);
res_562622_839829468 += ((NI) 1);
} LA4: ;
}
}
add_178482_2381377266(&result0, (*m0).s[(((Tcfilesection529005) 11))- 0]);
return result0;
}
N_NIMCALL(void, updatecachedmodule_563813_839829468)(Tcgen529027* m0) {
NimStringDesc* cfile0;
NimStringDesc* cfilenoext0;
cfile0 = getcfile_563204_839829468(m0);
cfilenoext0 = noschangeFileExt(cfile0, ((NimStringDesc*) &T839829468_490));
{
NIM_BOOL LOC3;
Ropeobj178006* code0;
LOC3 = (NIM_BOOL)0;
LOC3 = mergerequired_530832_2760143328(m0);
if (!(LOC3)) goto LA4;
LOC3 = !((((*(*m0).module).flags &(1U<<((NU)(((Tsymflag292184) 12))&31U)))!=0));
LA4: ;
if (!LOC3) goto LA5;
mergefiles_531241_2760143328(cfile0, m0);
geninitcode_562286_839829468(m0);
finishtypedescriptions_535842_839829468(m0);
code0 = genmodule_562491_839829468(m0, cfile0);
writerope_178836_2381377266(code0, cfile0, NIM_FALSE);
addfiletocompile_273863_2528170400(cfile0);
}
LA5: ;
addfiletolink_273872_2528170400(cfilenoext0);
}
N_NIMCALL(void, generatethreadvarssize_538771_839829468)(Tcgen529027* m0) {
{
NimStringDesc* externc0;
TY178507 LOC12;
if (!!((nimtv_538656_839829468 == NIM_NIL))) goto LA3;
{
NIM_BOOL LOC7;
LOC7 = (NIM_BOOL)0;
LOC7 = !((gcmd_169132_2607990831 == ((Tcommands169076) 2)));
if (!(LOC7)) goto LA8;
LOC7 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA8: ;
if (!LOC7) goto LA9;
externc0 = copyString(((NimStringDesc*) &T839829468_693));
}
goto LA5;
LA9: ;
{
externc0 = copyString(((NimStringDesc*) &T839829468_490));
}
LA5: ;
memset((void*)LOC12, 0, sizeof(LOC12));
LOC12[0] = rope_178277_2381377266(externc0);
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 10))- 0], ((NimStringDesc*) &T839829468_694), LOC12, 1);
}
LA3: ;
}
N_NIMCALL(NIM_BOOL, shouldrecompile_563621_839829468)(Ropeobj178006* code0, NimStringDesc* cfile0) {
NIM_BOOL result0;
{ result0 = (NIM_BOOL)0;
result0 = NIM_TRUE;
{
NimStringDesc* objfile0;
if (!!(((gglobaloptions_169130_2607990831 &((NU64)1<<((NU)(((Tglobaloption169013) 1))&63U)))!=0))) goto LA3;
objfile0 = toobjfile_273859_2528170400(cfile0);
{
NIM_BOOL LOC7;
LOC7 = (NIM_BOOL)0;
LOC7 = writeropeifnotequal_179511_2381377266(code0, cfile0);
if (!LOC7) goto LA8;
goto BeforeRet;
}
LA8: ;
{
NIM_BOOL LOC12;
LOC12 = (NIM_BOOL)0;
LOC12 = nosexistsFile(objfile0);
if (!(LOC12)) goto LA13;
LOC12 = nosfileNewer(objfile0, cfile0);
LA13: ;
if (!LOC12) goto LA14;
result0 = NIM_FALSE;
}
LA14: ;
}
goto LA1;
LA3: ;
{
writerope_178836_2381377266(code0, cfile0, NIM_FALSE);
}
LA1: ;
}BeforeRet: ;
return result0;
}
N_NIMCALL(void, writemodule_563637_839829468)(Tcgen529027* m0, NIM_BOOL pending0) {
NimStringDesc* cfile0;
NimStringDesc* cfilenoext0;
cfile0 = getcfile_563204_839829468(m0);
cfilenoext0 = noschangeFileExt(cfile0, ((NimStringDesc*) &T839829468_490));
{
NIM_BOOL LOC3;
Ropeobj178006* code0;
LOC3 = (NIM_BOOL)0;
LOC3 = !((*m0).Sup.fromcache);
if (LOC3) goto LA4;
LOC3 = ((gglobaloptions_169130_2607990831 &((NU64)1<<((NU)(((Tglobaloption169013) 1))&63U)))!=0);
LA4: ;
if (!LOC3) goto LA5;
geninitcode_562286_839829468(m0);
finishtypedescriptions_535842_839829468(m0);
{
if (!(((*(*m0).module).flags &(1U<<((NU)(((Tsymflag292184) 12))&31U)))!=0)) goto LA9;
add_178482_2381377266(&(*m0).s[(((Tcfilesection529005) 7))- 0], mainmodprocs_529148_3723162438);
generatethreadvarssize_538771_839829468(m0);
}
LA9: ;
code0 = genmodule_562491_839829468(m0, cfile0);
{
NIM_BOOL LOC13;
LOC13 = (NIM_BOOL)0;
LOC13 = shouldrecompile_563621_839829468(code0, cfile0);
if (!LOC13) goto LA14;
addfiletocompile_273863_2528170400(cfile0);
}
LA14: ;
}
goto LA1;
LA5: ;
{
NIM_BOOL LOC17;
NIM_BOOL LOC18;
Ropeobj178006* code0;
LOC17 = (NIM_BOOL)0;
LOC18 = (NIM_BOOL)0;
LOC18 = pending0;
if (!(LOC18)) goto LA19;
LOC18 = mergerequired_530832_2760143328(m0);
LA19: ;
LOC17 = LOC18;
if (!(LOC17)) goto LA20;
LOC17 = !((((*(*m0).module).flags &(1U<<((NU)(((Tsymflag292184) 12))&31U)))!=0));
LA20: ;
if (!LOC17) goto LA21;
mergefiles_531241_2760143328(cfile0, m0);
geninitcode_562286_839829468(m0);
finishtypedescriptions_535842_839829468(m0);
code0 = genmodule_562491_839829468(m0, cfile0);
writerope_178836_2381377266(code0, cfile0, NIM_FALSE);
addfiletocompile_273863_2528170400(cfile0);
}
goto LA1;
LA21: ;
{
NimStringDesc* LOC24;
NIM_BOOL LOC25;
LOC24 = (NimStringDesc*)0;
LOC24 = toobjfile_273859_2528170400(cfilenoext0);
LOC25 = (NIM_BOOL)0;
LOC25 = nosexistsFile(LOC24);
if (!!(LOC25)) goto LA26;
addfiletocompile_273863_2528170400(cfile0);
}
goto LA1;
LA26: ;
LA1: ;
addfiletolink_273872_2528170400(cfilenoext0);
}
N_NIMCALL(void, writeheader_563152_839829468)(Tcgen529027* m0) {
Ropeobj178006* result0;
Ropeobj178006* guard0;
TY178507 LOC1;
TY128506 LOC2;
TY178507 LOC3;
TY533289 LOC13;
TY178507 LOC14;
result0 = getcopyright_561665_839829468((*m0).filename);
memset((void*)LOC1, 0, sizeof(LOC1));
memset((void*)(&LOC2), 0, sizeof(LOC2));
nossplitFile((*m0).filename, (&LOC2));
LOC1[0] = rope_178277_2381377266(LOC2.Field1);
guard0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_695), LOC1, 1);
memset((void*)LOC3, 0, sizeof(LOC3));
LOC3[0] = guard0;
addf_179205_2381377266(&result0, ((NimStringDesc*) &T839829468_696), LOC3, 1);
addinttypes_561659_839829468(&result0);
generateheaders_560104_839829468(m0);
generatethreadlocalstorage_538717_839829468(m0);
{
Tcfilesection529005 i_563174_839829468;
NI res_563200_839829468;
i_563174_839829468 = (Tcfilesection529005)0;
res_563200_839829468 = ((NI) 1);
{
while (1) {
Ropeobj178006* LOC7;
Ropeobj178006* LOC8;
if (!(res_563200_839829468 <= ((NI) 10))) goto LA6;
i_563174_839829468 = ((Tcfilesection529005) (res_563200_839829468));
LOC7 = (Ropeobj178006*)0;
LOC7 = gensectionstart_530015_2760143328(i_563174_839829468);
add_178482_2381377266(&result0, LOC7);
add_178482_2381377266(&result0, (*m0).s[(i_563174_839829468)- 0]);
LOC8 = (Ropeobj178006*)0;
LOC8 = gensectionend_530050_2760143328(i_563174_839829468);
add_178482_2381377266(&result0, LOC8);
res_563200_839829468 += ((NI) 1);
} LA6: ;
}
}
add_178482_2381377266(&result0, (*m0).s[(((Tcfilesection529005) 11))- 0]);
{
if (!((gglobaloptions_169130_2607990831 &((NU64)1<<((NU)(((Tglobaloption169013) 8))&63U)))!=0)) goto LA11;
add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_22));
}
LA11: ;
memset((void*)LOC13, 0, sizeof(LOC13));
addf_179205_2381377266(&result0, ((NimStringDesc*) &T839829468_697), LOC13, 0);
memset((void*)LOC14, 0, sizeof(LOC14));
LOC14[0] = guard0;
addf_179205_2381377266(&result0, ((NimStringDesc*) &T839829468_698), LOC14, 1);
writerope_178836_2381377266(result0, (*m0).filename, NIM_FALSE);
}
N_NIMCALL(void, cgenwritemodules_563902_839829468)(void) {
{
if (!!((generatedheader_532201_839829468 == NIM_NIL))) goto LA3;
finishmodule_563420_839829468(generatedheader_532201_839829468);
}
LA3: ;
{
while (1) {
if (!(((NI) 0) < gforwardedprocscounter_529171_3723162438)) goto LA6;
{
Tcgen529027* m_563916_839829468;
m_563916_839829468 = (Tcgen529027*)0;
{
NI i_563935_839829468;
NI HEX3Atmp_563937_839829468;
NI res_563939_839829468;
i_563935_839829468 = (NI)0;
HEX3Atmp_563937_839829468 = (NI)0;
HEX3Atmp_563937_839829468 = (gmodules_529170_3723162438 ? (gmodules_529170_3723162438->Sup.len-1) : -1);
res_563939_839829468 = ((NI) 0);
{
while (1) {
if (!(res_563939_839829468 <= HEX3Atmp_563937_839829468)) goto LA10;
i_563935_839829468 = res_563939_839829468;
{
if (!!((gmodules_529170_3723162438->data[i_563935_839829468] == NIM_NIL))) goto LA13;
m_563916_839829468 = gmodules_529170_3723162438->data[i_563935_839829468];
{
if (!!((*m_563916_839829468).Sup.fromcache)) goto LA17;
finishmodule_563420_839829468(m_563916_839829468);
}
LA17: ;
}
LA13: ;
res_563939_839829468 += ((NI) 1);
} LA10: ;
}
}
}
} LA6: ;
}
{
Tcgen529027* m_563917_839829468;
m_563917_839829468 = (Tcgen529027*)0;
{
NI i_563946_839829468;
NI HEX3Atmp_563948_839829468;
NI res_563950_839829468;
i_563946_839829468 = (NI)0;
HEX3Atmp_563948_839829468 = (NI)0;
HEX3Atmp_563948_839829468 = (gmodules_529170_3723162438 ? (gmodules_529170_3723162438->Sup.len-1) : -1);
res_563950_839829468 = ((NI) 0);
{
while (1) {
if (!(res_563950_839829468 <= HEX3Atmp_563948_839829468)) goto LA22;
i_563946_839829468 = res_563950_839829468;
{
if (!!((gmodules_529170_3723162438->data[i_563946_839829468] == NIM_NIL))) goto LA25;
m_563917_839829468 = gmodules_529170_3723162438->data[i_563946_839829468];
{
if (!(*m_563917_839829468).Sup.fromcache) goto LA29;
updatecachedmodule_563813_839829468(m_563917_839829468);
}
goto LA27;
LA29: ;
{
writemodule_563637_839829468(m_563917_839829468, NIM_TRUE);
}
LA27: ;
}
LA25: ;
res_563950_839829468 += ((NI) 1);
} LA22: ;
}
}
}
writemapping_274789_2528170400(gmapping_529152_3723162438);
{
if (!!((generatedheader_532201_839829468 == NIM_NIL))) goto LA34;
writeheader_563152_839829468(generatedheader_532201_839829468);
}
LA34: ;
}
N_NIMCALL(void, nullify_562833_839829468)(Ropeobj178006** arr0) {
{
Tcfilesection529005 i_562848_839829468;
NI res_562853_839829468;
i_562848_839829468 = (Tcfilesection529005)0;
res_562853_839829468 = ((NI) 0);
{
while (1) {
if (!(res_562853_839829468 <= ((NI) 17))) goto LA3;
i_562848_839829468 = ((Tcfilesection529005) (res_562853_839829468));
unsureAsgnRef((void**) (&arr0[(i_562848_839829468)- 0]), NIM_NIL);
res_562853_839829468 += ((NI) 1);
} LA3: ;
}
}
}
N_NIMCALL(void, nullify_562858_839829468)(Ropeobj178006** arr0) {
{
NIM_CHAR i_563014_839829468;
NI res_563019_839829468;
i_563014_839829468 = (NIM_CHAR)0;
res_563019_839829468 = ((NI) 48);
{
while (1) {
if (!(res_563019_839829468 <= ((NI) 57))) goto LA3;
i_563014_839829468 = ((NIM_CHAR) (res_563019_839829468));
unsureAsgnRef((void**) (&arr0[(((NU8)(i_563014_839829468)))- 48]), NIM_NIL);
res_563019_839829468 += ((NI) 1);
} LA3: ;
}
}
}
N_NIMCALL(void, resetmodule_562763_839829468)(Tcgen529027* m0) {
initlinkedlist_147031_3771138726((&(*m0).headerfiles));
initintset_268885_2627731572((&(*m0).declaredprotos));
initidtable_296019_850551059((&(*m0).forwtypecache));
asgnRef((void**) (&(*m0).initproc), newproc_529206_3723162438(NIM_NIL, m0));
(*(*m0).initproc).options = initprocoptions_562635_839829468(m0);
asgnRef((void**) (&(*m0).preinitproc), newpreinitproc_562625_839829468(m0));
asgnRef((void**) (&(*m0).postinitproc), newpostinitproc_562630_839829468(m0));
initnodetable_296085_850551059((&(*m0).datacache));
if ((*m0).typestack) nimGCunrefNoCycle((*m0).typestack);
(*m0).typestack = (Ttypeseq292836*) newSeqRC1((&NTI292836), 0);
if ((*m0).forwardedprocs) nimGCunrefNoCycle((*m0).forwardedprocs);
(*m0).forwardedprocs = (Tsymseq292804*) newSeqRC1((&NTI292804), 0);
asgnRefNoCycle((void**) (&(*m0).typenodesname), gettempname_533596_839829468(m0));
asgnRefNoCycle((void**) (&(*m0).nimtypesname), gettempname_533596_839829468(m0));
{
if (!(((*(*m0).module).flags &(1U<<((NU)(((Tsymflag292184) 13))&31U)))!=0)) goto LA3;
(*m0).flags |= ((NU8)1)<<((((Codegenflag529025) 0))%(sizeof(NU8)*8));
}
goto LA1;
LA3: ;
{
(*m0).flags &= ~(((NU8)1) << ((((Codegenflag529025) 0)) % (sizeof(NU8)*8)));
}
LA1: ;
nullify_562833_839829468((*m0).s);
(*m0).typenodes = ((NI) 0);
(*m0).nimtypes = ((NI) 0);
nullify_562858_839829468((*m0).extensionloaders);
(*m0).Sup.fromcache = NIM_TRUE;
}
N_NIMCALL(void, resetcgenmodules_563024_839829468)(void) {
{
Tcgen529027* m_563026_839829468;
m_563026_839829468 = (Tcgen529027*)0;
{
NI i_563031_839829468;
NI HEX3Atmp_563033_839829468;
NI res_563035_839829468;
i_563031_839829468 = (NI)0;
HEX3Atmp_563033_839829468 = (NI)0;
HEX3Atmp_563033_839829468 = (gmodules_529170_3723162438 ? (gmodules_529170_3723162438->Sup.len-1) : -1);
res_563035_839829468 = ((NI) 0);
{
while (1) {
if (!(res_563035_839829468 <= HEX3Atmp_563033_839829468)) goto LA4;
i_563031_839829468 = res_563035_839829468;
{
if (!!((gmodules_529170_3723162438->data[i_563031_839829468] == NIM_NIL))) goto LA7;
m_563026_839829468 = gmodules_529170_3723162438->data[i_563031_839829468];
resetmodule_562763_839829468(m_563026_839829468);
}
LA7: ;
res_563035_839829468 += ((NI) 1);
} LA4: ;
}
}
}
}
NIM_EXTERNC N_NOINLINE(void, compiler_cgenInit000)(void) {
nimRegisterGlobalMarker(T839829468_2);
nimRegisterGlobalMarker(T839829468_3);
nimRegisterGlobalMarker(T839829468_5);
nimRegisterGlobalMarker(T839829468_6);
nimRegisterGlobalMarker(T839829468_7);
nimRegisterGlobalMarker(T839829468_8);
asgnRefNoCycle((void**) (&indent_532655_839829468), rope_178277_2381377266(((NimStringDesc*) &T839829468_4)));
if (nimtvdeps_538674_839829468) nimGCunrefNoCycle(nimtvdeps_538674_839829468);
nimtvdeps_538674_839829468 = (Ttypeseq292836*) newSeqRC1((&NTI292836), 0);
chckNil((void*)(&nimtvdeclared_538675_839829468));
genericReset((void*)(&nimtvdeclared_538675_839829468), (&NTI268030));
initintset_268885_2627731572((&nimtvdeclared_538675_839829468));
breakpointid_548860_839829468 = ((NI) 0);
}
NIM_EXTERNC N_NOINLINE(void, compiler_cgenDatInit000)(void) {
}
|
Main.c | #include "XSbench_header.h"
#ifdef MPI
#include<mpi.h>
#endif
// Example main arguments
// #define MARGS "-s small"
#include "lime.h"
int MAIN(int argc, char *argv[])
{
// =====================================================================
// Initialization & Command Line Read-In
// =====================================================================
int version = 16;
int mype = 0;
int i, thread, mat;
unsigned long seed;
tick_t tstart, tend;
double p_energy;
unsigned long long vhash = 0;
int nprocs = 1;
#ifdef MPI
MPI_Status stat;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &nprocs);
MPI_Comm_rank(MPI_COMM_WORLD, &mype);
#endif
// rand() is only used in the serial initialization stages.
// A custom RNG is used in parallel portions.
#ifdef VERIFICATION
srand(26);
#else
srand(1 /*time(NULL)*/);
#endif
// Process CLI Fields -- store in "Inputs" structure
Inputs in = read_CLI( argc, argv );
// to control the number of threads use: export OMP_NUM_THREADS=N
// Set number of OpenMP Threads
// omp_set_num_threads(in.nthreads);
// Print-out of Input Summary
if( mype == 0 )
print_inputs( in, nprocs, version );
// =====================================================================
// Prepare Nuclide Energy Grids, Unionized Energy Grid, & Material Data
// =====================================================================
// Allocate & fill energy grids
#ifndef BINARY_READ
if( mype == 0) printf("Generating Nuclide Energy Grids...\n");
#endif
NuclideGridPoint ** nuclide_grids = gpmatrix(in.n_isotopes,in.n_gridpoints);
#ifdef VERIFICATION
generate_grids_v( nuclide_grids, in.n_isotopes, in.n_gridpoints );
#else
generate_grids( nuclide_grids, in.n_isotopes, in.n_gridpoints );
#endif
// Sort grids by energy
#ifndef BINARY_READ
if( mype == 0) printf("Sorting Nuclide Energy Grids...\n");
sort_nuclide_grids( nuclide_grids, in.n_isotopes, in.n_gridpoints );
#endif
// If using a unionized grid search, initialize the energy grid
// Otherwise, leave these as null
GridPoint * energy_grid = NULL;
if( in.grid_type == UNIONIZED )
{
// Prepare Unionized Energy Grid Framework
#ifndef BINARY_READ
energy_grid = generate_energy_grid( in.n_isotopes,
in.n_gridpoints, nuclide_grids );
#else
int * index_data;
energy_grid = (GridPoint *)malloc( in.n_isotopes *
in.n_gridpoints * sizeof( GridPoint ) );
index_data = (int *) malloc( in.n_isotopes * in.n_gridpoints
* in.n_isotopes * sizeof(int));
for( i = 0; i < in.n_isotopes*in.n_gridpoints; i++ )
energy_grid[i].xs_ptrs = &index_data[i*in.n_isotopes];
#endif
// Double Indexing. Filling in energy_grid with pointers to the
// nuclide_energy_grids.
#ifndef BINARY_READ
initialization_do_not_profile_set_grid_ptrs( energy_grid, nuclide_grids, in.n_isotopes, in.n_gridpoints );
#endif
}
else if( in.grid_type == HASH )
{
energy_grid = generate_hash_table( nuclide_grids, in.n_isotopes, in.n_gridpoints, in.hash_bins );
}
#ifdef BINARY_READ
if( mype == 0 ) printf("Reading data from \"XS_data.dat\" file...\n");
binary_read(in.n_isotopes, in.n_gridpoints, nuclide_grids, energy_grid, in.grid_type);
#endif
// Get material data
if( mype == 0 )
printf("Loading Mats...\n");
int *num_nucs = load_num_nucs(in.n_isotopes);
int **mats = load_mats(num_nucs, in.n_isotopes);
#ifdef VERIFICATION
double **concs = load_concs_v(num_nucs);
#else
double **concs = load_concs(num_nucs);
#endif
#ifdef BINARY_DUMP
if( mype == 0 ) printf("Dumping data to binary file...\n");
binary_dump(in.n_isotopes, in.n_gridpoints, nuclide_grids, energy_grid, in.grid_type);
if( mype == 0 ) printf("Binary file \"XS_data.dat\" written! Exiting...\n");
return 0;
#endif
// =====================================================================
// Cross Section (XS) Parallel Lookup Simulation Begins
// =====================================================================
// Outer benchmark loop can loop through all possible # of threads
#ifdef BENCHMARK
for( int bench_n = 1; bench_n <=omp_get_num_procs(); bench_n++ )
{
in.nthreads = bench_n;
omp_set_num_threads(in.nthreads);
#endif
if( mype == 0 )
{
printf("\n");
border_print();
center_print("SIMULATION", 79);
border_print();
}
CLOCKS_EMULATE
CACHE_BARRIER(NULL)
TRACE_START
STATS_START
tget(tstart);
//initialize papi with one thread (master) here
#ifdef PAPI
if ( PAPI_library_init(PAPI_VER_CURRENT) != PAPI_VER_CURRENT){
fprintf(stderr, "PAPI library init error!\n");
exit(1);
}
#endif
// OpenMP compiler directives - declaring variables as shared or private
#if defined(_OPENMP)
#pragma omp parallel default(none) \
private(i, thread, p_energy, mat, seed) \
shared( in, energy_grid, nuclide_grids, \
mats, concs, num_nucs, mype, vhash)
#endif
{
// Initialize parallel PAPI counters
#ifdef PAPI
int eventset = PAPI_NULL;
int num_papi_events;
#if defined(_OPENMP)
#pragma omp critical
#endif
{
counter_init(&eventset, &num_papi_events);
}
#endif
double macro_xs_vector[5];
double * xs = (double *) calloc(5, sizeof(double));
// Initialize RNG seeds for threads
thread = omp_get_thread_num();
seed = (thread+1)*19+17;
// XS Lookup Loop
#if defined(_OPENMP)
#pragma omp for schedule(dynamic)
#endif
for( i = 0; i < in.lookups; i++ )
{
// Status text
if( INFO && mype == 0 && thread == 0 && i % 1000 == 0 )
printf("\rCalculating XS's... (%.0lf%% completed)",
(i / ( (double)in.lookups / (double) in.nthreads ))
/ (double) in.nthreads * 100.0);
// Randomly pick an energy and material for the particle
#ifdef VERIFICATION
#if defined(_OPENMP)
#pragma omp critical
#endif
{
p_energy = rn_v();
mat = pick_mat(&seed);
}
#else
p_energy = rn(&seed);
mat = pick_mat(&seed);
#endif
// debugging
//printf("E = %lf mat = %d\n", p_energy, mat);
// This returns the macro_xs_vector, but we're not going
// to do anything with it in this program, so return value
// is written over.
calculate_macro_xs( p_energy, mat, in.n_isotopes,
in.n_gridpoints, num_nucs, concs,
energy_grid, nuclide_grids, mats,
macro_xs_vector, in.grid_type, in.hash_bins );
// Copy results from above function call onto heap
// so that compiler cannot optimize function out
// (only occurs if -flto flag is used)
memcpy(xs, macro_xs_vector, 5*sizeof(double));
// Verification hash calculation
// This method provides a consistent hash accross
// architectures and compilers.
#ifdef VERIFICATION
char line[256];
sprintf(line, "%.5lf %d %.5lf %.5lf %.5lf %.5lf %.5lf",
p_energy, mat,
macro_xs_vector[0],
macro_xs_vector[1],
macro_xs_vector[2],
macro_xs_vector[3],
macro_xs_vector[4]);
unsigned long long vhash_local = hash((unsigned char *)line, 10000);
#if defined(_OPENMP)
#pragma omp atomic
#endif
vhash += vhash_local;
#endif
}
// Prints out thread local PAPI counters
#ifdef PAPI
if( mype == 0 && thread == 0 )
{
printf("\n");
border_print();
center_print("PAPI COUNTER RESULTS", 79);
border_print();
printf("Count \tSmybol \tDescription\n");
}
{
#if defined(_OPENMP)
#pragma omp barrier
#endif
}
counter_stop(&eventset, num_papi_events);
#endif
}
#ifndef PAPI
if( mype == 0 )
{
printf("\n" );
printf("Simulation complete.\n" );
}
#endif
cache_flush(); /* flush all */
tget(tend);
CACHE_BARRIER(NULL)
STATS_STOP
TRACE_STOP
CLOCKS_NORMAL
// Print / Save Results and Exit
print_results( in, mype, tesec(tend,tstart), nprocs, vhash );
STATS_PRINT
#ifdef BENCHMARK
}
#endif
#ifdef MPI
MPI_Finalize();
#endif
TRACE_CAP
return 0;
}
|
GB_binop__iseq_uint16.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__iseq_uint16
// A.*B function (eWiseMult): GB_AemultB__iseq_uint16
// A*D function (colscale): GB_AxD__iseq_uint16
// D*A function (rowscale): GB_DxB__iseq_uint16
// C+=B function (dense accum): GB_Cdense_accumB__iseq_uint16
// C+=b function (dense accum): GB_Cdense_accumb__iseq_uint16
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__iseq_uint16
// C=scalar+B GB_bind1st__iseq_uint16
// C=scalar+B' GB_bind1st_tran__iseq_uint16
// C=A+scalar GB_bind2nd__iseq_uint16
// C=A'+scalar GB_bind2nd_tran__iseq_uint16
// C type: uint16_t
// A type: uint16_t
// B,b type: uint16_t
// BinaryOp: cij = (aij == bij)
#define GB_ATYPE \
uint16_t
#define GB_BTYPE \
uint16_t
#define GB_CTYPE \
uint16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint16_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x == y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISEQ || GxB_NO_UINT16 || GxB_NO_ISEQ_UINT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__iseq_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__iseq_uint16
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__iseq_uint16
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint16_t
uint16_t bwork = (*((uint16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__iseq_uint16
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *GB_RESTRICT Cx = (uint16_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__iseq_uint16
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *GB_RESTRICT Cx = (uint16_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__iseq_uint16
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__iseq_uint16
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__iseq_uint16
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t x = (*((uint16_t *) x_input)) ;
uint16_t *Bx = (uint16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint16_t bij = Bx [p] ;
Cx [p] = (x == bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__iseq_uint16
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t *Ax = (uint16_t *) Ax_input ;
uint16_t y = (*((uint16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint16_t aij = Ax [p] ;
Cx [p] = (aij == y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = Ax [pA] ; \
Cx [pC] = (x == aij) ; \
}
GrB_Info GB_bind1st_tran__iseq_uint16
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t x = (*((const uint16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = Ax [pA] ; \
Cx [pC] = (aij == y) ; \
}
GrB_Info GB_bind2nd_tran__iseq_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t y = (*((const uint16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
matrix.c | /*****************************************************************************
*
* Elmer, A Finite Element Software for Multiphysical Problems
*
* Copyright 1st April 1995 - , CSC - IT Center for Science Ltd., Finland
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library (in file ../LGPL-2.1); if not, write
* to the Free Software Foundation, Inc., 51 Franklin Street,
* Fifth Floor, Boston, MA 02110-1301 USA
*
*****************************************************************************/
/*******************************************************************************
*
* MATC matrix utilities.
*
*******************************************************************************
*
* Author: Juha Ruokolainen
*
* Address: CSC - IT Center for Science Ltd.
* Keilaranta 14, P.O. BOX 405
* 02101 Espoo, Finland
* Tel. +358 0 457 2723
* Telefax: +358 0 457 2302
* EMail: Juha.Ruokolainen@csc.fi
*
* Date: 30 May 1996
*
* Modified by:
*
* Date of modification:
*
******************************************************************************/
/***********************************************************************
|
| MATRIX.C - Last Edited 8. 8. 1988
|
***********************************************************************/
/*======================================================================
|Syntax of the manual pages:
|
|FUNCTION NAME(...) params ...
|
$ usage of the function and type of the parameters
? explane the effects of the function
= return value and the type of value if not of type int
@ globals effected directly by this routine
! current known bugs or limitations
& functions called by this function
~ these functions may interest you as an alternative function or
| because they control this function somehow
^=====================================================================*/
/*
* $Id: matrix.c,v 1.1.1.1 2005/04/14 13:29:14 vierinen Exp $
*
* $Log: matrix.c,v $
* Revision 1.1.1.1 2005/04/14 13:29:14 vierinen
* initial matc automake package
*
* Revision 1.2 1998/08/01 12:34:50 jpr
*
* Added Id, started Log.
*
*
*/
#include "elmer/matc.h"
#define MA(i,j) a[(i) * ncola + (j)]
#define MB(i,j) b[(i) * ncolb + (j)]
#define MC(i,j) c[(i) * ncolc + (j)]
double func_abs(arg)
double arg;
{
return abs(arg);
}
double func_mod(x,y)
double x,y;
{
int ix, iy;
ix = x + 0.5;
iy = y + 0.5;
return (double)(ix % iy);
}
VARIABLE *mtr_sum(A) VARIABLE *A;
{
VARIABLE *C;
int i, j;
int nrowa = NROW(A), ncola = NCOL(A);
double *a = MATR(A), *c;
if (nrowa == 1 || ncola == 1)
{
C = var_temp_new(TYPE_DOUBLE, 1, 1); c = MATR(C);
nrowa = (nrowa == 1) ? ncola : nrowa;
for(i = 0; i < nrowa; i++) *c += *a++;
}
else
{
C = var_temp_new(TYPE_DOUBLE, 1, ncola); c = MATR(C);
for(i = 0; i < ncola; i++)
for(j = 0; j < nrowa; j++) c[i] += MA(j, i);
}
return C;
}
VARIABLE *mtr_trace(A) VARIABLE *A;
{
VARIABLE *C;
double temp = 0.0;
int i;
int nrowa = NROW(A), ncola = NCOL(A);
double *a = MATR(A);
if (nrowa != ncola) error("trace: not square.\n");
for(i = 0; i < nrowa; i++) temp += MA(i,i);
C = var_temp_new(TYPE(A), 1, 1); *MATR(C) = temp;
return C;
}
VARIABLE *mtr_zeros(A) VARIABLE *A;
{
VARIABLE *C;
int ind1 = 1, ind2 = 1;
if (NEXT(A) != NULL)
{
ind1 = (int)*MATR(A); ind2 = (int)*MATR(NEXT(A));
}
else
{
ind2 = (int)*MATR(A);
}
if (ind1 < 1 || ind2 < 1)
error("Zeros: invalid size for and array");
C = var_temp_new(TYPE_DOUBLE, ind1, ind2);
return C;
}
VARIABLE *mtr_ones(A) VARIABLE *A;
{
VARIABLE *C;
double *c;
int i, n;
C = mtr_zeros(A); c = MATR(C);
n = NROW(C) * NCOL(C);
for(i = 0; i < n; i++) *c++ = 1.0;
return C;
}
VARIABLE *mtr_rand(A) VARIABLE *A;
{
VARIABLE *C;
static int seed = 0;
#pragma omp threadprivate (seed)
int i, n;
double *c;
C = mtr_zeros(A); c = MATR(C);
n = NROW(C) * NCOL(C);
if (seed == 0) seed = time(NULL);
for(i = 0; i < n; i++) *c++ = urand(&seed);
return C;
}
VARIABLE *mtr_resize(A) VARIABLE *A;
{
VARIABLE *C;
int i, j, n, m, ind1 = 1, ind2;
double *a = MATR(A), *c;
if (NEXT(NEXT(A)) != NULL)
{
ind1 = *MATR(NEXT(A)); ind2 = *MATR(NEXT(NEXT(A)));
}
else
{
ind2 = (int)*MATR(NEXT(A));;
}
if (ind1 < 1 || ind2 < 1)
error("resize: invalid size for and array");
C = var_temp_new(TYPE(A), ind1, ind2); c = MATR(C);
a = MATR(A); n = ind1 * ind2; m = NROW(A) * NCOL(A);
for(i = j = 0; i < n; i++)
{
*c++ = a[j++]; if (j == m) j = 0;
}
return C;
}
VARIABLE *mtr_vector(A) VARIABLE *A;
{
VARIABLE *C;
double start, stop, incr, x, *c;
int i, eval;
start = *MATR(A); stop = *MATR(NEXT(A));
if (NEXT(NEXT(A)) != (VARIABLE *)NULL)
incr = *MATR(NEXT(NEXT(A)));
else
incr = (start < stop) ? (1) : (-1);
if (incr == 0)
incr = (start < stop) ? (1) : (-1);
eval = (int)(abs(stop-start) / abs(incr)) + 1;
if (eval < 1) return NULL;
C = var_temp_new(TYPE_DOUBLE, 1, eval); c = MATR(C);
x = start;
for(i = 0; i < eval; i++)
{
*c++ = x; x += incr;
}
return C;
}
VARIABLE *mtr_eye(A) VARIABLE *A;
{
VARIABLE *C;
double *c;
int i, ind, ncolc;
if (*MATR(A) < 1)
{
error("eye: Invalid size for an array.\n");
}
ind = (int)*MATR(A);
C = var_temp_new(TYPE_DOUBLE, ind, ind);
c = MATR(C); ncolc = ind;
for (i = 0; i < ind; i++) MC(i,i) = 1.0;
return C;
}
VARIABLE *mtr_size(A) VARIABLE *A;
{
VARIABLE *C;
double *c;
C = var_temp_new(TYPE_DOUBLE, 1, 2); c = MATR(C);
*c++ = NROW(A); *c = NCOL(A);
return C;
}
VARIABLE *mtr_min(A) VARIABLE *A;
{
VARIABLE *C;
double *a = MATR(A), *c;
int nrowa = NROW(A), ncola = NCOL(A);
int i, j;
if (nrowa == 1 || ncola == 1)
{
C = var_temp_new(TYPE_DOUBLE, 1, 1); c = MATR(C);
*c = *a++; nrowa = max(ncola, nrowa);
for(i = 1; i < nrowa; i++, a++) *c = min(*c, *a);
}
else
{
C = var_temp_new(TYPE_DOUBLE, 1, ncola); c = MATR(C);
for(i = 0; i < ncola; i++, c++)
{
*c = MA(0, i);
for(j = 1; j < nrowa; j++) *c = min(*c, MA(j, i));
}
}
return C;
}
VARIABLE *mtr_max(A) VARIABLE *A;
{
VARIABLE *C;
double *a = MATR(A), *c;
int nrowa = NROW(A), ncola = NCOL(A);
int i, j;
if (nrowa == 1 || ncola == 1)
{
C = var_temp_new(TYPE_DOUBLE, 1, 1); c = MATR(C);
*c = *a++; nrowa = max(ncola, nrowa);
for(i = 1; i < nrowa; i++, a++) *c = max(*c, *a);
}
else
{
C = var_temp_new(TYPE_DOUBLE, 1, ncola); c = MATR(C);
for(i = 0; i < ncola; i++, c++)
{
*c = MA(0, i);
for(j = 1; j < nrowa; j++) *c = max(*c, MA(j, i));
}
}
return C;
}
VARIABLE *mtr_diag(A) VARIABLE *A;
{
VARIABLE *C;
double *a = MATR(A), *c;
int nrowa = NROW(A), ncola = NCOL(A);
int ncolc;
int i;
if (nrowa == 1 || ncola == 1)
{
nrowa = max(nrowa, ncola); ncolc = nrowa;
C = var_temp_new(TYPE_DOUBLE, nrowa, nrowa); c = MATR(C);
for(i = 0; i < nrowa; i++) MC(i, i) = *a++;
}
else
{
C = var_temp_new(TYPE_DOUBLE, 1, nrowa); c = MATR(C);
for(i = 0; i < min(nrowa,ncola); i++) *c++ = MA(i, i);
}
return C;
}
VARIABLE *mtr_pow(A) VARIABLE *A;
{
VARIABLE *B = NEXT(A), *C;
double *a = MATR(A), b = M(B,0,0), *c;
int nrowa = NROW(A), ncola = NCOL(A);
int i;
C = var_temp_new(TYPE_DOUBLE, nrowa,ncola );
c = MATR( C );
for(i = 0; i < nrowa*ncola; i++) *c++ = pow(*a++,b);
return C;
}
VARIABLE *mtr_where(A) VARIABLE *A;
{
VARIABLE *C;
double *a = MATR(A), *c;
int nrowa = NROW(A), ncola = NCOL(A);
int i,n=0;
for( i=0; i < nrowa*ncola; i++) if ( a[i] ) n++;
C = var_temp_new( TYPE_DOUBLE,1,n );
c = MATR(C);
for( i=0; i < nrowa*ncola; i++ ) if ( a[i] ) { *c++ = i; }
return C;
}
void mtr_com_init()
{
static char *minHelp =
{
"r = min(matrix)\n"
"Return value is a vector containing smallest element in columns of given matrix.\n"
"r=min(min(matrix) gives smallest element of the matrix.\n\n"
};
static char *maxHelp =
{
"r = max(matrix)\n"
"Return value is a vector containing largest element in columns of given matrix.\n"
"r=max(max(matrix)) gives largest element of the matrix.\n\n"
};
static char *sumHelp =
{
"r = sum(matrix)\n"
"Return vector is column sums of given matrix. r=sum(sum(matrix)) gives\n"
"the total sum of elements of the matrix.\n\n"
};
static char *traceHelp =
{
"r = trace(matrix)\n"
"Return value is sum of matrix diagonal elements.\n\n"
};
static char *detHelp =
{
"r = det(matrix)\n"
"Return value is determinant of given square matrix.\n\n"
};
static char *invHelp =
{
"r = inv(matrix)\n"
"Invert given square matrix. Computed also by r=matrix^(-1).\n\n"
};
static char *eigHelp =
{
"r = eig(matrix)\n"
"Return eigenvalues of given square matrix. r(n,0) is real part of the\n"
"n:th eigenvalue, r(n,1) is the imaginary part respectively\n\n"
};
static char *jacobHelp =
{
"r = jacob(a,b,eps)\n"
"Solve symmetric positive definite eigenvalue problem by Jacob iteration.\n"
"Return values are the eigenvalues. Also a variable eigv is created containing\n"
"eigenvectors.\n\n"
};
static char *ludHelp =
{
"r = lud(matrix)\n"
"Return value is lud decomposition of given matrix.\n\n"
};
static char *hesseHelp =
{
"r = hesse(matrix)\n"
"Return the upper hessenberg form of given matrix.\n\n"
};
static char *eyeHelp =
{
"r = eye(n)\n"
"Return n by n identity matrix.\n\n"
};
static char *zerosHelp =
{
"r = zeros(n,m)\n"
"Return n by m matrix with elements initialized to zero.\n"
};
static char *onesHelp =
{
"r = ones(n,m)\n"
"Return n by m matrix with elements initialized to one.\n"
};
static char *randHelp =
{
"r = rand(n,m)\n"
"Return n by m matrix with elements initialized to with random number from\n"
"zero to one.\n\n"
};
static char *diagHelp =
{
"r=diag(matrix) or r=diag(vector)\n"
"Given matrix return diagonal entries as a vector. Given vector return matrix\n"
"with diagonal elements from vector. r=diag(diag(a)) gives matrix with diagonal\n"
"elements from matrix a otherwise elements are zero.\n\n"
};
static char *vectorHelp =
{
"r=vector(start,end,inc)\n"
"Return vector of values going from start to end by inc.\n\n"
};
static char *sizeHelp =
{
"r = size(matrix)\n"
"Return size of given matrix.\n"
};
static char *resizeHelp =
{
"r = resize(matrix,n,m)\n"
"Make a matrix to look as a n by m matrix.\n\n"
};
static char *whereHelp =
{
"r = where(l)\n"
"Return linear indexes of where l is true.\n\n"
};
com_init( "sin" , TRUE, TRUE, (VARIABLE *(*)())sin , 1, 1, "r=sin(x)" );
com_init( "cos" , TRUE, TRUE, (VARIABLE *(*)())cos , 1, 1, "r=cos(x)" );
com_init( "tan" , TRUE, TRUE, (VARIABLE *(*)())tan , 1, 1, "r=tan(x)" );
com_init( "asin" , TRUE, TRUE, (VARIABLE *(*)())asin , 1, 1, "r=asin(x)" );
com_init( "acos" , TRUE, TRUE, (VARIABLE *(*)())acos , 1, 1, "r=acos(x)" );
com_init( "atan" , TRUE, TRUE, (VARIABLE *(*)())atan , 1, 1, "r=atan(x)" );
com_init( "atan2" , TRUE, TRUE, (VARIABLE *(*)())atan2 , 2, 2, "r=atan2(y,x)" );
com_init( "sinh" , TRUE, TRUE, (VARIABLE *(*)())sinh , 1, 1, "r=sinh(x)" );
com_init( "cosh" , TRUE, TRUE, (VARIABLE *(*)())cosh , 1, 1, "r=cosh(x)" );
com_init( "tanh" , TRUE, TRUE, (VARIABLE *(*)())tanh , 1, 1, "r=tanh(x)" );
com_init( "exp" , TRUE, TRUE, (VARIABLE *(*)())exp , 1, 1, "r=exp(x)" );
com_init( "ln" , TRUE, TRUE, (VARIABLE *(*)())log , 1, 1, "r=ln(x)\nNatural logarithm." );
com_init( "log" , TRUE, TRUE, (VARIABLE *(*)())log10 , 1, 1, "r=log(x)\nBase 10 logarithm." );
com_init( "sqrt" , TRUE, TRUE, (VARIABLE *(*)())sqrt , 1, 1, "r=sqrt(x)" );
com_init( "ceil" , TRUE, TRUE, (VARIABLE *(*)())ceil , 1, 1, "r=ceil(x)\nSmallest integer not less than x." );
com_init( "floor" , TRUE, TRUE, (VARIABLE *(*)())floor , 1, 1, "r=floor(x)\nLargest integer not more than x." );
com_init( "abs" , TRUE, TRUE, (VARIABLE *(*)())func_abs , 1, 1,"r=abs(x)");
com_init( "mod" , TRUE, TRUE, (VARIABLE *(*)())func_mod , 2, 2,"r=mod(x,y)");
com_init( "pow" , FALSE, TRUE, mtr_pow, 2, 2, "r=pow(x,y)" );
com_init( "min" , FALSE, TRUE, mtr_min, 1, 1, minHelp );
com_init( "max" , FALSE, TRUE, mtr_max, 1, 1, maxHelp );
com_init( "sum" , FALSE, TRUE, mtr_sum, 1, 1, sumHelp );
com_init( "trace" , FALSE, TRUE, mtr_trace, 1, 1, traceHelp );
com_init( "det" , FALSE, TRUE, mtr_det, 1, 1, detHelp );
com_init( "inv" , FALSE, TRUE, mtr_inv, 1, 1, invHelp );
com_init( "eig" , FALSE, TRUE, mtr_eig, 1, 1, eigHelp );
com_init( "jacob" , FALSE, TRUE, mtr_jacob, 3, 3, jacobHelp );
com_init( "lud" , FALSE, TRUE, mtr_LUD, 1, 1, ludHelp );
com_init( "hesse" , FALSE, TRUE, mtr_hesse, 1, 1, hesseHelp );
com_init( "eye" , FALSE, TRUE, mtr_eye, 1, 1, eyeHelp );
com_init( "zeros" , FALSE, TRUE, mtr_zeros, 1, 2, zerosHelp );
com_init( "ones" , FALSE, TRUE, mtr_ones, 1, 2, onesHelp );
com_init( "rand" , FALSE, FALSE, mtr_rand, 1, 2, randHelp );
com_init( "diag" , FALSE, TRUE, mtr_diag, 1, 1, diagHelp );
com_init( "vector" , FALSE, TRUE, mtr_vector, 2, 3, vectorHelp );
com_init( "size" , FALSE, TRUE, mtr_size, 1, 1, sizeHelp );
com_init( "resize" , FALSE, TRUE, mtr_resize, 2, 3, resizeHelp );
com_init( "where" , FALSE, FALSE, mtr_where, 1, 1, whereHelp );
}
|
OMP-Jacobi-1D-Sliced-Diamond-Tiling.test.c | //#include "jacobi1d.h"
//#include "trapezoidTiling.h"
#include <stdio.h>
#include <omp.h>
#include <time.h>
#include <stdlib.h>
#include <unistd.h>
#include <getopt.h>
#include <ctype.h>
//#include "trapezoidTiling.h"
//#include "jacobi1d.h"
#include <stdbool.h>
#include <assert.h>
//#include "jacobi1d.h"
#include <stdbool.h>
#include <stdio.h>
#include <omp.h>
#include <time.h>
#include <stdlib.h>
#include <assert.h>
#define stencil(read,write,x) space[write][x] = (space[read][x-1] + space[read][x] + space[read][x+1])/3;
bool initedJacobi = false;
int globalSeed = -1;
int cores = -1;
int problemSize = -1, T = -1, lowerBound = -1, upperBound = -1;
double* space[2] = { NULL, NULL }; // space[t][x] for (t,x) in { {0,1} X {lowerBound, ... , upperBound} };
int min( int a, int b){
return (a <= b)? a : b;
}
int max( int a, int b){
return (a >= b)? a : b;
}
/*
void stencil( int read, int write, int x ){
// stencil operation
space[write][x] = (space[read][x-1] + space[read][x] + space[read][x+1])/3;
}
*/
void initJacobi(){
// if init has not already been called (preserve things like global seed.
if( ! initedJacobi ){
// note the convention someVar = ( someVar == -1 )? defaultValue : someVar ;
// this allows us to use the cmd line flags to set variables, AND have an init call.
// all values are initialized with -1 in global space, so if someVar == -1, then it has
// not been set, and and be given a default value.
// seed for random number generator.
// allows all initSpace calls to generate the same inital values
globalSeed = (globalSeed== -1)? time(NULL) : globalSeed;
// problemSpace parameters
T = (T == -1)? 100 : T;
problemSize = (problemSize == -1)? 1000000 : problemSize;
lowerBound = 1;
upperBound = lowerBound + problemSize - 1;
cores = (cores == -1)? omp_get_num_procs() : cores ;
omp_set_num_threads( cores );
// set initialization flag
initedJacobi = true;
}
}
// initialize space array
void initSpace(){
// if space has been previously allocated, free up space.
if( space[0] != NULL ){
free( space[0] );
}
if( space[1] != NULL ) {
free( space[1] );
}
/*
// allocate space
space = (double**) malloc( 2 * sizeof(double*) );
if( space == NULL ){
printf( "Could not allocate space array\n" );
exit(0);
}
*/
// allocate time-steps 0 and 1
space[0] = (double*) malloc( (problemSize + 2) * sizeof(double));
space[1] = (double*) malloc( (problemSize + 2) * sizeof(double));
if( space[0] == NULL || space[1] == NULL ){
printf( "Could not allocate space array\n" );
exit(0);
}
// use global seed to seed the random number gen (will be constant)
srand(globalSeed);
// seed the space.
int x;
for( x = lowerBound; x <= upperBound; ++x ){
space[0][x] = rand() / (double)rand();
}
// set halo values (sanity)
space[0][0] = 0;
space[0][upperBound+1] = 0;
space[1][0] = 0;
space[1][upperBound+1] = 0;
}
// parse int abstraction from strtol
int parseInt( char* string ){
return (int) strtol( string, NULL, 10 );
}
bool verifyResult( bool verbose ){
assert( space[0] != NULL && space[1] != NULL );
double* endSpace = (double*) malloc( (problemSize + 2) * sizeof(double) );
for( int x = 0; x < problemSize + 2; ++x ){
endSpace[x] = space[T & 1][x];
}
initSpace();
int read = 0, write = 1;
for( int t = 1; t <= T; ++t ){
for( int x = lowerBound; x <= upperBound; ++x ){
stencil(read, write, x);
}
read = write;
write = 1 - write;
}
bool failed = false;
for( int x = lowerBound; x <= upperBound; ++x ){
if( endSpace[x] != space[T & 1][x] ){
failed = true;
if( verbose ) printf( "FAILED\n");// %f != %f at %d\n", endSpace[x], space[T & 1][x], x );
break;
}
}
if( verbose && !failed ) printf( "SUCCESS\n" );
free( endSpace );
return !failed;
}
bool initedTrapezoid = false;
int timeBand = -1, width_max = -1, width_min = -1;
int tiles_A_start = -1, tiles_B_start = -1, betweenTiles = -1;
int count_A_tiles = 0, count_B_tiles = 0;
int A_tiles_per_core = 0, B_tiles_per_core = 0;
int countTiles( char type ){
int x0;
int count = 0;
if( type == 0 || type == 'a' || type == 'A' ){
for( x0 = tiles_A_start; x0 <= upperBound; x0 += betweenTiles ){
count += 1;
}
} else if( type == 1 || type == 'b' || type == 'B' ){
for( x0 = tiles_B_start; x0 <= upperBound; x0 += betweenTiles ){
count += 1;
}
}
return count;
}
void initTrapezoid(){
// if init has not already been called (preserve things like global seed.
if( initedJacobi && !initedTrapezoid ){
// note the convention someVar = ( someVar == -1 )? defaultValue : someVar ;
// this allows us to use the cmd line flags to set variables, AND have an init call.
// all values are initialized with -1 in global space, so if someVar == -1, then it has
// not been set, and and be given a default value.
// tile size parameters
timeBand = (timeBand == -1)? 100 : timeBand;
width_max = (width_max == -1)? 54701 : width_max ;
width_min = (width_max + -1 * timeBand) - (0 + 1 * timeBand) +1;
tiles_A_start = lowerBound - timeBand + 1; // starting point for doing 'A' tiles loops
tiles_B_start = tiles_A_start + width_max; // starting point for doing 'B' tiles loop
betweenTiles = width_min + width_max; // width between the first x0 point and next x0 point
// asser that this is a valid tile
assert( width_min >= 1 && width_max >= width_min );
count_A_tiles = countTiles( 'a' );
count_B_tiles = countTiles( 'b' );
A_tiles_per_core = max( 1, count_A_tiles / cores );
B_tiles_per_core = max( 1, count_B_tiles / cores );
//printf("count A tiles: %d\n", count_A_tiles );
//printf("count B tiles: %d\n", count_A_tiles );
// set initialization flag
initedTrapezoid = true;
}
}
// Parallel Tiling
double test_1(){
double start_time = omp_get_wtime();
int write, read; // read and write buffers
int t0, t1, x0, x1, dx0, dx1; // most values of the tile tuples
int t, x; // indices into space (t,x)
// for all t0 in t0..T by timeBand
for( t0 = 1; t0 <= T; t0 += timeBand ) {
// set and clamp t1 from t0
t1 = min(t0 + timeBand - 1, T);
// Do A-tiles
// set dx0 and dx1 to correct A-tile values
dx0 = 1;
dx1 = -1;
// iterate over all x0 points for A-tiles
#pragma omp parallel for private( x0, x1, write, read, t, x) schedule(dynamic, A_tiles_per_core)
for( x0 = tiles_A_start; x0 <= upperBound; x0 += betweenTiles ){
x1 = x0 + width_max - 1; // set x1 from x0
// Set read and write buffer.
// this is equivilent to t0 % 2 but assumed faster
read = (t0 - 1) & 1;
write = 1 - read;
// if x0 is at or below lower bound (left edge tile)
if( x0 <= lowerBound ) {
//printf("%d, %d, %d, %d, %d, %d\n", lowerBound, 0, x1, dx1, t0, t1 );
// for t in t0 ... t1
for( t = t0; t<= t1; ++t ){
//#pragma omp parallel for private( x ) schedule(static)
// for x in lowerBound ... x1'ish
int minVal = min(x1 + dx1 * (t - t0), upperBound );
for( x = lowerBound; x <= minVal; ++x){
stencil( read, write, x ); // stencil computation
}// for x
// flip write buffer
read = write;
write = 1 - write;
}// for t
}// if x0 <= lowerBound
// if x1 is at or above upper bound (right edge tile)
else if( x1 >= upperBound ){
//printf("%d, %d, %d, %d, %d, %d\n", x0, dx0, upperBound, 0, t0, t1 );
// for t in t0...t1
for( t = t0; t<= t1; ++t ){
//#pragma omp parallel for private( x ) schedule(static)
// for x in x0'ish ... upperbound
for( x = max(x0 + dx0 * (t - t0), lowerBound); x <= upperBound; ++x){
stencil( read, write, x ); // stencil computation
}// for x
// flip write buffer
read = write;
write = 1 - write;
}// for t
}// else if x1 >= upperBound
// otherwise regular ol' tile
else {
//printf("%d, %d, %d, %d, %d, %d\n", x0, dx0, x1, dx1, t0, t1 );
// for t in t0 ... t1
for( t = t0; t<= t1; ++t ){
//#pragma omp parallel for private( x ) schedule(static)
// for x in x0'ish ... x1'ish
int minVal = min(x1 + dx1 * (t - t0), upperBound );
for( x = max(x0 + dx0 * (t - t0), lowerBound); x <= minVal; ++x){
stencil( read, write, x ); // stencil computation
}// for x
// flip write buffer
read = write;
write = 1 - write;
}// for t
}// else
}// for A-tiles
// Do B-tiles
// set dx0 and dx1 to correct B-tile values
dx0 = -1;
dx1 = 1;
// iterate over x0 points for B-tiles
#pragma omp parallel for private( x0, x1, write, read, t, x) schedule(dynamic,B_tiles_per_core)
for( x0 = tiles_B_start; x0 <= upperBound; x0 += betweenTiles ){
x1 = x0 + width_min - 1; // set x1 from x0
// Set write buffer.
// this is equivilent to (t0 - 1 )% 2, but assumed faster
read = (t0 - 1) & 1;
write = 1 - read;
// if x1 is at or above upper bound (right edge tile)
if( x1 >= upperBound ){
//printf("%d, %d, %d, %d, %d, %d\n", x0, dx0, upperBound, 0, t0, t1 );
// for t in t0 ... t1
for( t = t0; t <= t1; ++t ){
//#pragma omp parallel for private( x ) schedule(static)
// for x in x0'ish ... upper bound
for( x = max( x0 + dx0 * (t - t0), lowerBound); x <= upperBound; ++x){
stencil( read, write, x ); // stencil computation
}// for x
// flip write buffer
read = write;
write = 1 - write;
}// for t
}// if x1 >= upperBound
// regular ol' tile
else {
//printf("%d, %d, %d, %d, %d, %d\n", x0, dx0, x1, dx1, t0, t1 );
// for t in t0 ... t1
for( t = t0; t<= t1; ++t ){
//#pragma omp parallel for private( x ) schedule(static)
// for x in x0'ish ... x1'ish
int minVal = min(x1 + dx1 * (t - t0), upperBound);
for( x = max(x0 + dx0 * (t - t0), lowerBound); x <= minVal; ++x){
stencil( read, write, x ); // stencil computation
}// for x
// flip write buffer
read = write;
write = 1 - write;
} // for t
}// else
} // for B-tiles
}// for t0
double end_time = omp_get_wtime();
return (end_time - start_time);
}
int main( int argc, char* argv[] ){
setbuf(stdout, NULL); // set buffer to null, so prints ALWAYS print (for debug purposes mainly)
bool verify = false;
bool printtime = true;
// Command line parsing
char c;
while ((c = getopt (argc, argv, "nc:s:p:T:t:w:hv")) != -1){
switch( c ) {
case 'n': // print time
printtime = false;
break;
case 'c': // cores
cores = parseInt( optarg );
if( cores <= 0 ){
fprintf(stderr, "cores must be greater than 0: %d\n", cores);
exit( 0 );
}
break;
case 'p': // problem size
problemSize = parseInt( optarg );
if( problemSize <= 0 ){
fprintf(stderr, "problemSize must be greater than 0: %d\n", problemSize);
exit( 0 );
}
break;
case 'T': // T (time steps)
T = parseInt( optarg );
if( T <= 0 ){
fprintf(stderr, "T must be greater than 0: %d\n", T);
exit( 0 );
}
break;
case 't': // timeBand
timeBand = parseInt( optarg );
if( timeBand <= 0 ){
fprintf(stderr, "t must be greater than 0: %d\n", T);
exit( 0 );
}
break;
case 'w': // width
width_max = parseInt( optarg );
if( width_max <= 0 ){
fprintf(stderr, "w must be greater than 0: %d\n", T);
exit( 0 );
}
break;
case 'h': // help
printf("usage: %s\n-n \t dont print time \n-p <problem size> \t problem size in elements \n-T <time steps>\t number of time steps\n-c <cores>\tnumber of threads\n-w <tile width>\t the width of the tile\n-t <tile height>\t the number of timesteps in a tile\n-h\tthis dialogue\n-v\tverify output\n", argv[0]);
exit(0);
case 'v': // verify;
verify = true;
break;
case '?':
if (optopt == 'p')
fprintf (stderr, "Option -%c requires positive int argument: problem size.\n", optopt);
else if (optopt == 'T')
fprintf (stderr, "Option -%c requires positive int argument: T.\n", optopt);
else if (optopt == 's')
fprintf (stderr, "Option -%c requires int argument: subset_s.\n", optopt);
else if (optopt == 'c')
fprintf (stderr, "Option -%c requires int argument: number of cores.\n", optopt);
else if (isprint (optopt))
fprintf (stderr, "Unknown option `-%c'.\n", optopt);
else
fprintf(stderr, "Unknown option character `\\x%x'.\n", optopt);
exit(0);
default:
exit(0);
}
}
initJacobi();
initTrapezoid();
initSpace();
double time = test_1();
if( printtime ){
printf( "Time: %f\n", time );
}
if( verify ){
verifyResult( true );
}
}
|
covariance.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4000. */
#include "covariance.h"
/* Array initialization. */
static
void init_array (int m, int n,
DATA_TYPE *float_n,
DATA_TYPE POLYBENCH_2D(data,M,N,m,n))
{
int i __attribute__((annotate("scalar(range(0, " PB_XSTR(N) ")) final")));
int j __attribute__((annotate("scalar(range(0, " PB_XSTR(M) ")) final")));
*float_n = 1.2;
for (i = 0; i < M; i++)
for (j = 0; j < N; j++)
data[i][j] = ((DATA_TYPE) i*j) / M;
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int m,
DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m))
{
int i, j;
for (i = 0; i < m; i++)
for (j = 0; j < m; j++) {
fprintf (stderr, DATA_PRINTF_MODIFIER, symmat[i][j]);
if ((i * m + j) % 20 == 0) fprintf (stderr, "\n");
}
fprintf (stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_covariance(int m, int n,
DATA_TYPE float_n,
DATA_TYPE POLYBENCH_2D(data,M,N,m,n),
DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m),
DATA_TYPE POLYBENCH_1D(mean,M,m))
{
int i __attribute__((annotate("scalar(range(0, " PB_XSTR(N) ")) final")));
int j __attribute__((annotate("scalar(range(0, " PB_XSTR(M) ")) final")));
int j1 __attribute__((annotate("scalar(range(0, " PB_XSTR(M) ")) final")));
int j2 __attribute__((annotate("scalar(range(0, " PB_XSTR(M) ")) final")));
#pragma scop
/* Determine mean of column vectors of input data matrix */
#pragma omp parallel
{
#pragma omp for private (i)
for (j = 0; j < _PB_M; j++)
{
mean[j] = 0.0;
for (i = 0; i < _PB_N; i++)
mean[j] += data[i][j];
mean[j] /= float_n;
}
/* Center the column vectors. */
#pragma omp for private (j)
for (i = 0; i < _PB_N; i++)
for (j = 0; j < _PB_M; j++)
data[i][j] -= mean[j];
/* Calculate the m * m covariance matrix. */
#pragma omp for private (j2, i)
for (j1 = 0; j1 < _PB_M; j1++)
for (j2 = j1; j2 < _PB_M; j2++)
{
symmat[j1][j2] = 0.0;
for (i = 0; i < _PB_N; i++)
symmat[j1][j2] += data[i][j1] * data[i][j2];
symmat[j2][j1] = symmat[j1][j2];
}
}
#pragma endscop
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int n = N;
int m = M;
/* Variable declaration/allocation. */
DATA_TYPE __attribute((annotate("target('float_n') scalar(range(0,8))"))) float_n;
POLYBENCH_2D_ARRAY_DECL(data,DATA_TYPE __attribute((annotate("target('data') scalar(range(0,1000))"))),M,N,m,n);
POLYBENCH_2D_ARRAY_DECL(symmat,DATA_TYPE __attribute((annotate("target('cov') scalar(range(0,1000000000))"))),M,M,m,m);
POLYBENCH_1D_ARRAY_DECL(mean,DATA_TYPE __attribute((annotate("target('mean') scalar(range(0,1000000))"))),M,m);
/* Initialize array(s). */
init_array (m, n, &float_n, POLYBENCH_ARRAY(data));
/* Start timer. */
polybench_start_instruments;
/* Run kernel. */
kernel_covariance (m, n, float_n,
POLYBENCH_ARRAY(data),
POLYBENCH_ARRAY(symmat),
POLYBENCH_ARRAY(mean));
/* Stop and print timer. */
polybench_stop_instruments;
polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(m, POLYBENCH_ARRAY(symmat)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(data);
POLYBENCH_FREE_ARRAY(symmat);
POLYBENCH_FREE_ARRAY(mean);
return 0;
}
|
Example_SIMD.3.c | /*
* @@name: SIMD.3c
* @@type: C
* @@compilable: yes
* @@linkable: no
* @@expect: success
* @@version: omp_4.0
*/
double work( double *a, double *b, int n )
{
int i;
double tmp, sum;
sum = 0.0;
#pragma omp simd private(tmp) reduction(+:sum)
for (i = 0; i < n; i++) {
tmp = a[i] + b[i];
sum += tmp;
}
return sum;
}
|
LinearSearch.c | #include "stdio.h"
#include "stdlib.h"
#include "omp.h"
#include "mpi.h"
FILE *fp;
void printArray(int *arr, int size);
int linearSearch(int x, int *arr, int size) {
int i;
for (i = 0; i < size; i++) {
if (arr[i] == x)
return i;
}
return -1;
}
int linearSearchOMP(int x, int *arr, int size) {
int i, itr, minIndex, threads = omp_get_num_threads();
int *found = (int *)malloc(threads * sizeof(int));
#pragma omp for private(i)
for (i = 0; i < threads; i++) {
found[i] = -1;
}
#pragma omp for private(i)
for (i = 0; i < size; i++) {
if (arr[i] == x) {
if(found[omp_get_thread_num()] == -1)
found[omp_get_thread_num()] = i;
}
}
minIndex = size;
for (i = 0; i < threads; i++) {
if (found[i] != -1) {
if (found[i] < minIndex) minIndex = found[i];
}
}
free(found);
if(minIndex == size)
return -1;
else return minIndex;
}
int linearSearchMPI(int argc, char *argv[], int x, int *arr, int size) {
int rank, np, localsize, i, minIndex;
int *localdata, *found, *results;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &np);
localsize = size / np;
found = (int *)malloc(1 * sizeof(int));
results = (int *)malloc(np * sizeof(int));
found[0] = -1;
for (i = rank * localsize; i < (rank + 1)*localsize; i++) {
if (arr[i] == x) {
found[0] = i;
break;
}
}
MPI_Gather(found, 1, MPI_INT, results, 1, MPI_INT, 0, MPI_COMM_WORLD);
/*
localsize = size / np;
localdata = (int *)malloc(localsize * sizeof(int));
found = (int *)malloc(1 * sizeof(int));
results = (int *)malloc(np * sizeof(int));
MPI_Scatter(arr, localsize, MPI_INT, localdata, localsize, MPI_INT, 0, MPI_COMM_WORLD);
found[0] = -1;
for (i = 0; i < localsize; i++) {
if (localdata[i] == x) {
found[0] = i;
break;
}
}
MPI_Gather(found, 1, MPI_INT, results, 1, MPI_INT, 0, MPI_COMM_WORLD);
if(rank == 0)
printArray(results, np);*/
if (rank == 0) {
minIndex = size;
for (i = 0; i < np; i++) {
if (results[i] != -1) {
if (results[i] < minIndex) minIndex = results[i];
}
}
if (minIndex == size)minIndex = -1;
}
else minIndex = -2;
free(found);
free(results);
MPI_Finalize();
return minIndex;
}
void randomize(int *arr, int size) {
int i;
for (i = 0; i < size; i++) {
arr[i] = rand();
}
}
int pickRandElem(int *arr, int size) {
int elem = size/2 + (rand()%(size/2));
elem = arr[elem];
return elem;
}
void printArray(int *arr, int size) {
int i;
for (i = 0; i < size; i++) {
printf("%d ", arr[i]);
}
printf("\n");
}
int is_correct(int elem, int index, int *arr) {
if (arr[index] == elem)return 1;
else printf("Returned this element: %d\n", arr[index]);
return 0;
}
int main(int argc, char *argv[]) {
if (argc != 3) {
//printf("Must specify size of array.\n");
return 0;
}
fp = fopen("LinearSearch.txt", "a");
int size = atoi(argv[2]), oper = atoi(argv[1]), randElem, index;
int *arr = (int *)malloc(size * sizeof(int));
double start, end;
randomize(arr, size);
randElem = pickRandElem(arr, size);
if (oper == 2) {
start = omp_get_wtime();
index = linearSearchMPI(argc, argv, randElem, arr, size);
if (index == -2) {
free(arr);
return 0;
}
end = omp_get_wtime();
fprintf(fp, "MPI Linear Search\n");
}
else if (oper == 1) {
start = omp_get_wtime();
index = linearSearchOMP(randElem, arr, size);
end = omp_get_wtime();
fprintf(fp, "OpenMP Linear Search\n");
}
else if (oper == 0) {
start = omp_get_wtime();
index = linearSearch(randElem, arr, size);
end = omp_get_wtime();
fprintf(fp, "Sequential Linear Search\n");
}
fprintf(fp, "Element: %d -> Index: %d\n", randElem, index);
fprintf(fp, "Time elapsed: %f\n", (end - start));
if (is_correct(randElem, index, arr))fprintf(fp, "Correct\n");
else fprintf(fp, "Incorrect\n");
free(arr);
return 0;
} |
cp-tree.h | /* Definitions for C++ parsing and type checking.
Copyright (C) 1987-2018 Free Software Foundation, Inc.
Contributed by Michael Tiemann (tiemann@cygnus.com)
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
GCC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
#ifndef GCC_CP_TREE_H
#define GCC_CP_TREE_H
#include "tm.h"
#include "hard-reg-set.h"
#include "function.h"
/* In order for the format checking to accept the C++ front end
diagnostic framework extensions, you must include this file before
diagnostic-core.h, not after. We override the definition of GCC_DIAG_STYLE
in c-common.h. */
#undef GCC_DIAG_STYLE
#define GCC_DIAG_STYLE __gcc_cxxdiag__
#if defined(GCC_DIAGNOSTIC_CORE_H) || defined (GCC_C_COMMON_H)
#error \
In order for the format checking to accept the C++ front end diagnostic \
framework extensions, you must include this file before diagnostic-core.h and \
c-common.h, not after.
#endif
#include "c-family/c-common.h"
#include "diagnostic.h"
/* A tree node, together with a location, so that we can track locations
(and ranges) during parsing.
The location is redundant for node kinds that have locations,
but not all node kinds do (e.g. constants, and references to
params, locals, etc), so we stash a copy here. */
class cp_expr
{
public:
cp_expr () :
m_value (NULL), m_loc (UNKNOWN_LOCATION) {}
cp_expr (tree value) :
m_value (value), m_loc (EXPR_LOCATION (m_value)) {}
cp_expr (tree value, location_t loc):
m_value (value), m_loc (loc) {}
cp_expr (const cp_expr &other) :
m_value (other.m_value), m_loc (other.m_loc) {}
/* Implicit conversions to tree. */
operator tree () const { return m_value; }
tree & operator* () { return m_value; }
tree operator* () const { return m_value; }
tree & operator-> () { return m_value; }
tree operator-> () const { return m_value; }
tree get_value () const { return m_value; }
location_t get_location () const { return m_loc; }
location_t get_start () const
{
source_range src_range = get_range_from_loc (line_table, m_loc);
return src_range.m_start;
}
location_t get_finish () const
{
source_range src_range = get_range_from_loc (line_table, m_loc);
return src_range.m_finish;
}
void set_location (location_t loc)
{
protected_set_expr_location (m_value, loc);
m_loc = loc;
}
void set_range (location_t start, location_t finish)
{
set_location (make_location (m_loc, start, finish));
}
cp_expr& maybe_add_location_wrapper ()
{
m_value = maybe_wrap_with_location (m_value, m_loc);
return *this;
}
private:
tree m_value;
location_t m_loc;
};
inline bool
operator == (const cp_expr &lhs, tree rhs)
{
return lhs.get_value () == rhs;
}
enum cp_tree_index
{
CPTI_WCHAR_DECL,
CPTI_VTABLE_ENTRY_TYPE,
CPTI_DELTA_TYPE,
CPTI_VTABLE_INDEX_TYPE,
CPTI_CLEANUP_TYPE,
CPTI_VTT_PARM_TYPE,
CPTI_CLASS_TYPE,
CPTI_UNKNOWN_TYPE,
CPTI_INIT_LIST_TYPE,
CPTI_VTBL_TYPE,
CPTI_VTBL_PTR_TYPE,
CPTI_STD,
CPTI_ABI,
CPTI_GLOBAL,
CPTI_GLOBAL_TYPE,
CPTI_CONST_TYPE_INFO_TYPE,
CPTI_TYPE_INFO_PTR_TYPE,
CPTI_ABORT_FNDECL,
CPTI_AGGR_TAG,
CPTI_CONV_OP_MARKER,
CPTI_CTOR_IDENTIFIER,
CPTI_COMPLETE_CTOR_IDENTIFIER,
CPTI_BASE_CTOR_IDENTIFIER,
CPTI_DTOR_IDENTIFIER,
CPTI_COMPLETE_DTOR_IDENTIFIER,
CPTI_BASE_DTOR_IDENTIFIER,
CPTI_DELETING_DTOR_IDENTIFIER,
CPTI_CONV_OP_IDENTIFIER,
CPTI_DELTA_IDENTIFIER,
CPTI_IN_CHARGE_IDENTIFIER,
CPTI_VTT_PARM_IDENTIFIER,
CPTI_THIS_IDENTIFIER,
CPTI_PFN_IDENTIFIER,
CPTI_VPTR_IDENTIFIER,
CPTI_GLOBAL_IDENTIFIER,
CPTI_STD_IDENTIFIER,
CPTI_ANON_IDENTIFIER,
CPTI_AUTO_IDENTIFIER,
CPTI_DECLTYPE_AUTO_IDENTIFIER,
CPTI_INIT_LIST_IDENTIFIER,
CPTI_LANG_NAME_C,
CPTI_LANG_NAME_CPLUSPLUS,
CPTI_EMPTY_EXCEPT_SPEC,
CPTI_NOEXCEPT_TRUE_SPEC,
CPTI_NOEXCEPT_FALSE_SPEC,
CPTI_NOEXCEPT_DEFERRED_SPEC,
CPTI_TERMINATE_FN,
CPTI_CALL_UNEXPECTED_FN,
CPTI_GET_EXCEPTION_PTR_FN,
CPTI_BEGIN_CATCH_FN,
CPTI_END_CATCH_FN,
CPTI_ALLOCATE_EXCEPTION_FN,
CPTI_FREE_EXCEPTION_FN,
CPTI_THROW_FN,
CPTI_RETHROW_FN,
CPTI_ATEXIT_FN_PTR_TYPE,
CPTI_ATEXIT,
CPTI_DSO_HANDLE,
CPTI_DCAST,
CPTI_NULLPTR,
CPTI_NULLPTR_TYPE,
CPTI_ALIGN_TYPE,
CPTI_ANY_TARG,
CPTI_MAX
};
extern GTY(()) tree cp_global_trees[CPTI_MAX];
#define wchar_decl_node cp_global_trees[CPTI_WCHAR_DECL]
#define vtable_entry_type cp_global_trees[CPTI_VTABLE_ENTRY_TYPE]
/* The type used to represent an offset by which to adjust the `this'
pointer in pointer-to-member types. */
#define delta_type_node cp_global_trees[CPTI_DELTA_TYPE]
/* The type used to represent an index into the vtable. */
#define vtable_index_type cp_global_trees[CPTI_VTABLE_INDEX_TYPE]
#define class_type_node cp_global_trees[CPTI_CLASS_TYPE]
#define unknown_type_node cp_global_trees[CPTI_UNKNOWN_TYPE]
#define init_list_type_node cp_global_trees[CPTI_INIT_LIST_TYPE]
#define vtbl_type_node cp_global_trees[CPTI_VTBL_TYPE]
#define vtbl_ptr_type_node cp_global_trees[CPTI_VTBL_PTR_TYPE]
#define std_node cp_global_trees[CPTI_STD]
#define abi_node cp_global_trees[CPTI_ABI]
#define global_namespace cp_global_trees[CPTI_GLOBAL]
#define global_type_node cp_global_trees[CPTI_GLOBAL_TYPE]
#define const_type_info_type_node cp_global_trees[CPTI_CONST_TYPE_INFO_TYPE]
#define type_info_ptr_type cp_global_trees[CPTI_TYPE_INFO_PTR_TYPE]
#define conv_op_marker cp_global_trees[CPTI_CONV_OP_MARKER]
#define abort_fndecl cp_global_trees[CPTI_ABORT_FNDECL]
#define current_aggr cp_global_trees[CPTI_AGGR_TAG]
#define nullptr_node cp_global_trees[CPTI_NULLPTR]
#define nullptr_type_node cp_global_trees[CPTI_NULLPTR_TYPE]
/* std::align_val_t */
#define align_type_node cp_global_trees[CPTI_ALIGN_TYPE]
/* We cache these tree nodes so as to call get_identifier less frequently.
For identifiers for functions, including special member functions such
as ctors and assignment operators, the nodes can be used (among other
things) to iterate over their overloads defined by/for a type. For
example:
tree ovlid = assign_op_identifier;
tree overloads = get_class_binding (type, ovlid);
for (ovl_iterator it (overloads); it; ++it) { ... }
iterates over the set of implicitly and explicitly defined overloads
of the assignment operator for type (including the copy and move
assignment operators, whether deleted or not). */
/* The name of a constructor that takes an in-charge parameter to
decide whether or not to construct virtual base classes. */
#define ctor_identifier cp_global_trees[CPTI_CTOR_IDENTIFIER]
/* The name of a constructor that constructs virtual base classes. */
#define complete_ctor_identifier cp_global_trees[CPTI_COMPLETE_CTOR_IDENTIFIER]
/* The name of a constructor that does not construct virtual base classes. */
#define base_ctor_identifier cp_global_trees[CPTI_BASE_CTOR_IDENTIFIER]
/* The name of a destructor that takes an in-charge parameter to
decide whether or not to destroy virtual base classes and whether
or not to delete the object. */
#define dtor_identifier cp_global_trees[CPTI_DTOR_IDENTIFIER]
/* The name of a destructor that destroys virtual base classes. */
#define complete_dtor_identifier cp_global_trees[CPTI_COMPLETE_DTOR_IDENTIFIER]
/* The name of a destructor that does not destroy virtual base
classes. */
#define base_dtor_identifier cp_global_trees[CPTI_BASE_DTOR_IDENTIFIER]
/* The name of a destructor that destroys virtual base classes, and
then deletes the entire object. */
#define deleting_dtor_identifier cp_global_trees[CPTI_DELETING_DTOR_IDENTIFIER]
#define ovl_op_identifier(ISASS, CODE) (OVL_OP_INFO(ISASS, CODE)->identifier)
#define assign_op_identifier (ovl_op_info[true][OVL_OP_NOP_EXPR].identifier)
#define call_op_identifier (ovl_op_info[false][OVL_OP_CALL_EXPR].identifier)
/* The name used for conversion operators -- but note that actual
conversion functions use special identifiers outside the identifier
table. */
#define conv_op_identifier cp_global_trees[CPTI_CONV_OP_IDENTIFIER]
#define delta_identifier cp_global_trees[CPTI_DELTA_IDENTIFIER]
#define in_charge_identifier cp_global_trees[CPTI_IN_CHARGE_IDENTIFIER]
/* The name of the parameter that contains a pointer to the VTT to use
for this subobject constructor or destructor. */
#define vtt_parm_identifier cp_global_trees[CPTI_VTT_PARM_IDENTIFIER]
#define this_identifier cp_global_trees[CPTI_THIS_IDENTIFIER]
#define pfn_identifier cp_global_trees[CPTI_PFN_IDENTIFIER]
#define vptr_identifier cp_global_trees[CPTI_VPTR_IDENTIFIER]
/* The name of the ::, std & anon namespaces. */
#define global_identifier cp_global_trees[CPTI_GLOBAL_IDENTIFIER]
#define std_identifier cp_global_trees[CPTI_STD_IDENTIFIER]
#define anon_identifier cp_global_trees[CPTI_ANON_IDENTIFIER]
/* auto and declspec(auto) identifiers. */
#define auto_identifier cp_global_trees[CPTI_AUTO_IDENTIFIER]
#define decltype_auto_identifier cp_global_trees[CPTI_DECLTYPE_AUTO_IDENTIFIER]
#define init_list_identifier cp_global_trees[CPTI_INIT_LIST_IDENTIFIER]
#define lang_name_c cp_global_trees[CPTI_LANG_NAME_C]
#define lang_name_cplusplus cp_global_trees[CPTI_LANG_NAME_CPLUSPLUS]
/* Exception specifiers used for throw(), noexcept(true),
noexcept(false) and deferred noexcept. We rely on these being
uncloned. */
#define empty_except_spec cp_global_trees[CPTI_EMPTY_EXCEPT_SPEC]
#define noexcept_true_spec cp_global_trees[CPTI_NOEXCEPT_TRUE_SPEC]
#define noexcept_false_spec cp_global_trees[CPTI_NOEXCEPT_FALSE_SPEC]
#define noexcept_deferred_spec cp_global_trees[CPTI_NOEXCEPT_DEFERRED_SPEC]
/* Exception handling function declarations. */
#define terminate_fn cp_global_trees[CPTI_TERMINATE_FN]
#define call_unexpected_fn cp_global_trees[CPTI_CALL_UNEXPECTED_FN]
#define get_exception_ptr_fn cp_global_trees[CPTI_GET_EXCEPTION_PTR_FN]
#define begin_catch_fn cp_global_trees[CPTI_BEGIN_CATCH_FN]
#define end_catch_fn cp_global_trees[CPTI_END_CATCH_FN]
#define allocate_exception_fn cp_global_trees[CPTI_ALLOCATE_EXCEPTION_FN]
#define free_exception_fn cp_global_trees[CPTI_FREE_EXCEPTION_FN]
#define throw_fn cp_global_trees[CPTI_THROW_FN]
#define rethrow_fn cp_global_trees[CPTI_RETHROW_FN]
/* The type of the function-pointer argument to "__cxa_atexit" (or
"std::atexit", if "__cxa_atexit" is not being used). */
#define atexit_fn_ptr_type_node cp_global_trees[CPTI_ATEXIT_FN_PTR_TYPE]
/* A pointer to `std::atexit'. */
#define atexit_node cp_global_trees[CPTI_ATEXIT]
/* A pointer to `__dso_handle'. */
#define dso_handle_node cp_global_trees[CPTI_DSO_HANDLE]
/* The declaration of the dynamic_cast runtime. */
#define dynamic_cast_node cp_global_trees[CPTI_DCAST]
/* The type of a destructor. */
#define cleanup_type cp_global_trees[CPTI_CLEANUP_TYPE]
/* The type of the vtt parameter passed to subobject constructors and
destructors. */
#define vtt_parm_type cp_global_trees[CPTI_VTT_PARM_TYPE]
/* A node which matches any template argument. */
#define any_targ_node cp_global_trees[CPTI_ANY_TARG]
/* Node to indicate default access. This must be distinct from the
access nodes in tree.h. */
#define access_default_node null_node
#include "name-lookup.h"
/* Usage of TREE_LANG_FLAG_?:
0: IDENTIFIER_KIND_BIT_0 (in IDENTIFIER_NODE)
NEW_EXPR_USE_GLOBAL (in NEW_EXPR).
COND_EXPR_IS_VEC_DELETE (in COND_EXPR).
DELETE_EXPR_USE_GLOBAL (in DELETE_EXPR).
COMPOUND_EXPR_OVERLOADED (in COMPOUND_EXPR).
CLEANUP_P (in TRY_BLOCK)
AGGR_INIT_VIA_CTOR_P (in AGGR_INIT_EXPR)
PTRMEM_OK_P (in ADDR_EXPR, OFFSET_REF, SCOPE_REF)
PAREN_STRING_LITERAL (in STRING_CST)
CP_DECL_THREAD_LOCAL_P (in VAR_DECL)
KOENIG_LOOKUP_P (in CALL_EXPR)
STATEMENT_LIST_NO_SCOPE (in STATEMENT_LIST).
EXPR_STMT_STMT_EXPR_RESULT (in EXPR_STMT)
STMT_EXPR_NO_SCOPE (in STMT_EXPR)
BIND_EXPR_TRY_BLOCK (in BIND_EXPR)
TYPENAME_IS_ENUM_P (in TYPENAME_TYPE)
OMP_FOR_GIMPLIFYING_P (in OMP_FOR, OMP_SIMD, OMP_DISTRIBUTE,
and OMP_TASKLOOP)
BASELINK_QUALIFIED_P (in BASELINK)
TARGET_EXPR_IMPLICIT_P (in TARGET_EXPR)
TEMPLATE_PARM_PARAMETER_PACK (in TEMPLATE_PARM_INDEX)
ATTR_IS_DEPENDENT (in the TREE_LIST for an attribute)
ABI_TAG_IMPLICIT (in the TREE_LIST for the argument of abi_tag)
LAMBDA_CAPTURE_EXPLICIT_P (in a TREE_LIST in LAMBDA_EXPR_CAPTURE_LIST)
CONSTRUCTOR_IS_DIRECT_INIT (in CONSTRUCTOR)
LAMBDA_EXPR_CAPTURES_THIS_P (in LAMBDA_EXPR)
DECLTYPE_FOR_LAMBDA_CAPTURE (in DECLTYPE_TYPE)
VEC_INIT_EXPR_IS_CONSTEXPR (in VEC_INIT_EXPR)
DECL_OVERRIDE_P (in FUNCTION_DECL)
IMPLICIT_CONV_EXPR_DIRECT_INIT (in IMPLICIT_CONV_EXPR)
TRANSACTION_EXPR_IS_STMT (in TRANSACTION_EXPR)
CONVERT_EXPR_VBASE_PATH (in CONVERT_EXPR)
PACK_EXPANSION_LOCAL_P (in *_PACK_EXPANSION)
TINFO_HAS_ACCESS_ERRORS (in TEMPLATE_INFO)
SIZEOF_EXPR_TYPE_P (in SIZEOF_EXPR)
COMPOUND_REQ_NOEXCEPT_P (in COMPOUND_REQ)
WILDCARD_PACK_P (in WILDCARD_DECL)
BLOCK_OUTER_CURLY_BRACE_P (in BLOCK)
FOLD_EXPR_MODOP_P (*_FOLD_EXPR)
IF_STMT_CONSTEXPR_P (IF_STMT)
TEMPLATE_TYPE_PARM_FOR_CLASS (TEMPLATE_TYPE_PARM)
DECL_NAMESPACE_INLINE_P (in NAMESPACE_DECL)
SWITCH_STMT_ALL_CASES_P (in SWITCH_STMT)
REINTERPRET_CAST_P (in NOP_EXPR)
ALIGNOF_EXPR_STD_P (in ALIGNOF_EXPR)
1: IDENTIFIER_KIND_BIT_1 (in IDENTIFIER_NODE)
TI_PENDING_TEMPLATE_FLAG.
TEMPLATE_PARMS_FOR_INLINE.
DELETE_EXPR_USE_VEC (in DELETE_EXPR).
(TREE_CALLS_NEW) (in _EXPR or _REF) (commented-out).
ICS_ELLIPSIS_FLAG (in _CONV)
DECL_INITIALIZED_P (in VAR_DECL)
TYPENAME_IS_CLASS_P (in TYPENAME_TYPE)
STMT_IS_FULL_EXPR_P (in _STMT)
TARGET_EXPR_LIST_INIT_P (in TARGET_EXPR)
LAMBDA_EXPR_MUTABLE_P (in LAMBDA_EXPR)
DECL_FINAL_P (in FUNCTION_DECL)
QUALIFIED_NAME_IS_TEMPLATE (in SCOPE_REF)
DECLTYPE_FOR_INIT_CAPTURE (in DECLTYPE_TYPE)
CONSTRUCTOR_NO_IMPLICIT_ZERO (in CONSTRUCTOR)
TINFO_USED_TEMPLATE_ID (in TEMPLATE_INFO)
PACK_EXPANSION_SIZEOF_P (in *_PACK_EXPANSION)
OVL_USING_P (in OVERLOAD)
IMPLICIT_CONV_EXPR_NONTYPE_ARG (in IMPLICIT_CONV_EXPR)
2: IDENTIFIER_KIND_BIT_2 (in IDENTIFIER_NODE)
ICS_THIS_FLAG (in _CONV)
DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P (in VAR_DECL)
STATEMENT_LIST_TRY_BLOCK (in STATEMENT_LIST)
TYPENAME_IS_RESOLVING_P (in TYPE_NAME_TYPE)
TARGET_EXPR_DIRECT_INIT_P (in TARGET_EXPR)
FNDECL_USED_AUTO (in FUNCTION_DECL)
DECLTYPE_FOR_LAMBDA_PROXY (in DECLTYPE_TYPE)
REF_PARENTHESIZED_P (in COMPONENT_REF, INDIRECT_REF, SCOPE_REF)
AGGR_INIT_ZERO_FIRST (in AGGR_INIT_EXPR)
CONSTRUCTOR_MUTABLE_POISON (in CONSTRUCTOR)
OVL_HIDDEN_P (in OVERLOAD)
SWITCH_STMT_NO_BREAK_P (in SWITCH_STMT)
LAMBDA_EXPR_CAPTURE_OPTIMIZED (in LAMBDA_EXPR)
3: (TREE_REFERENCE_EXPR) (in NON_LVALUE_EXPR) (commented-out).
ICS_BAD_FLAG (in _CONV)
FN_TRY_BLOCK_P (in TRY_BLOCK)
BIND_EXPR_BODY_BLOCK (in BIND_EXPR)
DECL_NONTRIVIALLY_INITIALIZED_P (in VAR_DECL)
CALL_EXPR_ORDERED_ARGS (in CALL_EXPR, AGGR_INIT_EXPR)
DECLTYPE_FOR_REF_CAPTURE (in DECLTYPE_TYPE)
CONSTRUCTOR_C99_COMPOUND_LITERAL (in CONSTRUCTOR)
OVL_NESTED_P (in OVERLOAD)
4: IDENTIFIER_MARKED (IDENTIFIER_NODEs)
TREE_HAS_CONSTRUCTOR (in INDIRECT_REF, SAVE_EXPR, CONSTRUCTOR,
CALL_EXPR, or FIELD_DECL).
DECL_TINFO_P (in VAR_DECL)
FUNCTION_REF_QUALIFIED (in FUNCTION_TYPE, METHOD_TYPE)
OVL_LOOKUP_P (in OVERLOAD)
LOOKUP_FOUND_P (in RECORD_TYPE, UNION_TYPE, NAMESPACE_DECL)
5: IDENTIFIER_VIRTUAL_P (in IDENTIFIER_NODE)
DECL_VTABLE_OR_VTT_P (in VAR_DECL)
FUNCTION_RVALUE_QUALIFIED (in FUNCTION_TYPE, METHOD_TYPE)
CALL_EXPR_REVERSE_ARGS (in CALL_EXPR, AGGR_INIT_EXPR)
CONSTRUCTOR_PLACEHOLDER_BOUNDARY (in CONSTRUCTOR)
6: IDENTIFIER_REPO_CHOSEN (in IDENTIFIER_NODE)
DECL_CONSTRUCTION_VTABLE_P (in VAR_DECL)
TYPE_MARKED_P (in _TYPE)
RANGE_FOR_IVDEP (in RANGE_FOR_STMT)
CALL_EXPR_OPERATOR_SYNTAX (in CALL_EXPR, AGGR_INIT_EXPR)
Usage of TYPE_LANG_FLAG_?:
0: TYPE_DEPENDENT_P
1: TYPE_HAS_USER_CONSTRUCTOR.
2: TYPE_HAS_LATE_RETURN_TYPE (in FUNCTION_TYPE, METHOD_TYPE)
TYPE_PTRMEMFUNC_FLAG (in RECORD_TYPE)
4: TYPE_HAS_NONTRIVIAL_DESTRUCTOR
5: CLASS_TYPE_P (in RECORD_TYPE and UNION_TYPE)
ENUM_FIXED_UNDERLYING_TYPE_P (in ENUMERAL_TYPE)
AUTO_IS_DECLTYPE (in TEMPLATE_TYPE_PARM)
REFERENCE_VLA_OK (in REFERENCE_TYPE)
6: TYPE_DEPENDENT_P_VALID
Usage of DECL_LANG_FLAG_?:
0: DECL_ERROR_REPORTED (in VAR_DECL).
DECL_TEMPLATE_PARM_P (in PARM_DECL, CONST_DECL, TYPE_DECL, or TEMPLATE_DECL)
DECL_LOCAL_FUNCTION_P (in FUNCTION_DECL)
DECL_MUTABLE_P (in FIELD_DECL)
DECL_DEPENDENT_P (in USING_DECL)
LABEL_DECL_BREAK (in LABEL_DECL)
1: C_TYPEDEF_EXPLICITLY_SIGNED (in TYPE_DECL).
DECL_TEMPLATE_INSTANTIATED (in a VAR_DECL or a FUNCTION_DECL)
DECL_MEMBER_TEMPLATE_P (in TEMPLATE_DECL)
USING_DECL_TYPENAME_P (in USING_DECL)
DECL_VLA_CAPTURE_P (in FIELD_DECL)
DECL_ARRAY_PARAMETER_P (in PARM_DECL)
LABEL_DECL_CONTINUE (in LABEL_DECL)
2: DECL_THIS_EXTERN (in VAR_DECL or FUNCTION_DECL).
DECL_IMPLICIT_TYPEDEF_P (in a TYPE_DECL)
DECL_CONSTRAINT_VAR_P (in a PARM_DECL)
TEMPLATE_DECL_COMPLEX_ALIAS_P (in TEMPLATE_DECL)
DECL_INSTANTIATING_NSDMI_P (in a FIELD_DECL)
LABEL_DECL_CDTOR (in LABEL_DECL)
3: DECL_IN_AGGR_P.
4: DECL_C_BIT_FIELD (in a FIELD_DECL)
DECL_ANON_UNION_VAR_P (in a VAR_DECL)
DECL_SELF_REFERENCE_P (in a TYPE_DECL)
DECL_INVALID_OVERRIDER_P (in a FUNCTION_DECL)
5: DECL_INTERFACE_KNOWN.
6: DECL_THIS_STATIC (in VAR_DECL or FUNCTION_DECL).
DECL_FIELD_IS_BASE (in FIELD_DECL)
TYPE_DECL_ALIAS_P (in TYPE_DECL)
7: DECL_DEAD_FOR_LOCAL (in VAR_DECL).
DECL_THUNK_P (in a member FUNCTION_DECL)
DECL_NORMAL_CAPTURE_P (in FIELD_DECL)
8: DECL_DECLARED_CONSTEXPR_P (in VAR_DECL, FUNCTION_DECL)
Usage of language-independent fields in a language-dependent manner:
TYPE_ALIAS_SET
This field is used by TYPENAME_TYPEs, TEMPLATE_TYPE_PARMs, and so
forth as a substitute for the mark bits provided in `lang_type'.
At present, only the six low-order bits are used.
TYPE_LANG_SLOT_1
For a FUNCTION_TYPE or METHOD_TYPE, this is TYPE_RAISES_EXCEPTIONS.
For a POINTER_TYPE (to a METHOD_TYPE), this is TYPE_PTRMEMFUNC_TYPE.
For an ENUMERAL_TYPE, BOUND_TEMPLATE_TEMPLATE_PARM_TYPE,
RECORD_TYPE or UNION_TYPE this is TYPE_TEMPLATE_INFO,
BINFO_VIRTUALS
For a binfo, this is a TREE_LIST. There is an entry for each
virtual function declared either in BINFO or its direct and
indirect primary bases.
The BV_DELTA of each node gives the amount by which to adjust the
`this' pointer when calling the function. If the method is an
overridden version of a base class method, then it is assumed
that, prior to adjustment, the this pointer points to an object
of the base class.
The BV_VCALL_INDEX of each node, if non-NULL, gives the vtable
index of the vcall offset for this entry.
The BV_FN is the declaration for the virtual function itself.
If BV_LOST_PRIMARY is set, it means that this entry is for a lost
primary virtual base and can be left null in the vtable.
BINFO_VTABLE
This is an expression with POINTER_TYPE that gives the value
to which the vptr should be initialized. Use get_vtbl_decl_for_binfo
to extract the VAR_DECL for the complete vtable.
DECL_VINDEX
This field is NULL for a non-virtual function. For a virtual
function, it is eventually set to an INTEGER_CST indicating the
index in the vtable at which this function can be found. When
a virtual function is declared, but before it is known what
function is overridden, this field is the error_mark_node.
Temporarily, it may be set to a TREE_LIST whose TREE_VALUE is
the virtual function this one overrides, and whose TREE_CHAIN is
the old DECL_VINDEX. */
/* Language-specific tree checkers. */
#define VAR_OR_FUNCTION_DECL_CHECK(NODE) \
TREE_CHECK2(NODE,VAR_DECL,FUNCTION_DECL)
#define TYPE_FUNCTION_OR_TEMPLATE_DECL_CHECK(NODE) \
TREE_CHECK3(NODE,TYPE_DECL,TEMPLATE_DECL,FUNCTION_DECL)
#define TYPE_FUNCTION_OR_TEMPLATE_DECL_P(NODE) \
(TREE_CODE (NODE) == TYPE_DECL || TREE_CODE (NODE) == TEMPLATE_DECL \
|| TREE_CODE (NODE) == FUNCTION_DECL)
#define VAR_FUNCTION_OR_PARM_DECL_CHECK(NODE) \
TREE_CHECK3(NODE,VAR_DECL,FUNCTION_DECL,PARM_DECL)
#define VAR_TEMPL_TYPE_OR_FUNCTION_DECL_CHECK(NODE) \
TREE_CHECK4(NODE,VAR_DECL,FUNCTION_DECL,TYPE_DECL,TEMPLATE_DECL)
#define VAR_TEMPL_TYPE_FIELD_OR_FUNCTION_DECL_CHECK(NODE) \
TREE_CHECK5(NODE,VAR_DECL,FIELD_DECL,FUNCTION_DECL,TYPE_DECL,TEMPLATE_DECL)
#define BOUND_TEMPLATE_TEMPLATE_PARM_TYPE_CHECK(NODE) \
TREE_CHECK(NODE,BOUND_TEMPLATE_TEMPLATE_PARM)
#if defined ENABLE_TREE_CHECKING && (GCC_VERSION >= 2007)
#define THUNK_FUNCTION_CHECK(NODE) __extension__ \
({ __typeof (NODE) const __t = (NODE); \
if (TREE_CODE (__t) != FUNCTION_DECL || !__t->decl_common.lang_specific \
|| !__t->decl_common.lang_specific->u.fn.thunk_p) \
tree_check_failed (__t, __FILE__, __LINE__, __FUNCTION__, 0); \
__t; })
#else
#define THUNK_FUNCTION_CHECK(NODE) (NODE)
#endif
/* Language-dependent contents of an identifier. */
struct GTY(()) lang_identifier {
struct c_common_identifier c_common;
cxx_binding *bindings;
};
/* Return a typed pointer version of T if it designates a
C++ front-end identifier. */
inline lang_identifier*
identifier_p (tree t)
{
if (TREE_CODE (t) == IDENTIFIER_NODE)
return (lang_identifier*) t;
return NULL;
}
#define LANG_IDENTIFIER_CAST(NODE) \
((struct lang_identifier*)IDENTIFIER_NODE_CHECK (NODE))
struct GTY(()) template_parm_index {
struct tree_common common;
int index;
int level;
int orig_level;
tree decl;
};
struct GTY(()) ptrmem_cst {
struct tree_common common;
tree member;
};
typedef struct ptrmem_cst * ptrmem_cst_t;
#define CLEANUP_P(NODE) TREE_LANG_FLAG_0 (TRY_BLOCK_CHECK (NODE))
#define BIND_EXPR_TRY_BLOCK(NODE) \
TREE_LANG_FLAG_0 (BIND_EXPR_CHECK (NODE))
/* Used to mark the block around the member initializers and cleanups. */
#define BIND_EXPR_BODY_BLOCK(NODE) \
TREE_LANG_FLAG_3 (BIND_EXPR_CHECK (NODE))
#define FUNCTION_NEEDS_BODY_BLOCK(NODE) \
(DECL_CONSTRUCTOR_P (NODE) || DECL_DESTRUCTOR_P (NODE) \
|| LAMBDA_FUNCTION_P (NODE))
#define STATEMENT_LIST_NO_SCOPE(NODE) \
TREE_LANG_FLAG_0 (STATEMENT_LIST_CHECK (NODE))
#define STATEMENT_LIST_TRY_BLOCK(NODE) \
TREE_LANG_FLAG_2 (STATEMENT_LIST_CHECK (NODE))
/* Mark the outer curly brace BLOCK. */
#define BLOCK_OUTER_CURLY_BRACE_P(NODE) TREE_LANG_FLAG_0 (BLOCK_CHECK (NODE))
/* Nonzero if this statement should be considered a full-expression,
i.e., if temporaries created during this statement should have
their destructors run at the end of this statement. */
#define STMT_IS_FULL_EXPR_P(NODE) TREE_LANG_FLAG_1 ((NODE))
/* Marks the result of a statement expression. */
#define EXPR_STMT_STMT_EXPR_RESULT(NODE) \
TREE_LANG_FLAG_0 (EXPR_STMT_CHECK (NODE))
/* Nonzero if this statement-expression does not have an associated scope. */
#define STMT_EXPR_NO_SCOPE(NODE) \
TREE_LANG_FLAG_0 (STMT_EXPR_CHECK (NODE))
#define COND_EXPR_IS_VEC_DELETE(NODE) \
TREE_LANG_FLAG_0 (COND_EXPR_CHECK (NODE))
/* Nonzero if this NOP_EXPR is a reinterpret_cast. Such conversions
are not constexprs. Other NOP_EXPRs are. */
#define REINTERPRET_CAST_P(NODE) \
TREE_LANG_FLAG_0 (NOP_EXPR_CHECK (NODE))
/* Returns nonzero iff TYPE1 and TYPE2 are the same type, in the usual
sense of `same'. */
#define same_type_p(TYPE1, TYPE2) \
comptypes ((TYPE1), (TYPE2), COMPARE_STRICT)
/* Returns nonzero iff NODE is a declaration for the global function
`main'. */
#define DECL_MAIN_P(NODE) \
(DECL_EXTERN_C_FUNCTION_P (NODE) \
&& DECL_NAME (NODE) != NULL_TREE \
&& MAIN_NAME_P (DECL_NAME (NODE)) \
&& flag_hosted)
/* Lookup walker marking. */
#define LOOKUP_SEEN_P(NODE) TREE_VISITED(NODE)
#define LOOKUP_FOUND_P(NODE) \
TREE_LANG_FLAG_4 (TREE_CHECK3(NODE,RECORD_TYPE,UNION_TYPE,NAMESPACE_DECL))
/* These two accessors should only be used by OVL manipulators.
Other users should use iterators and convenience functions. */
#define OVL_FUNCTION(NODE) \
(((struct tree_overload*)OVERLOAD_CHECK (NODE))->function)
#define OVL_CHAIN(NODE) \
(((struct tree_overload*)OVERLOAD_CHECK (NODE))->common.chain)
/* If set, this was imported in a using declaration. */
#define OVL_USING_P(NODE) TREE_LANG_FLAG_1 (OVERLOAD_CHECK (NODE))
/* If set, this overload is a hidden decl. */
#define OVL_HIDDEN_P(NODE) TREE_LANG_FLAG_2 (OVERLOAD_CHECK (NODE))
/* If set, this overload contains a nested overload. */
#define OVL_NESTED_P(NODE) TREE_LANG_FLAG_3 (OVERLOAD_CHECK (NODE))
/* If set, this overload was constructed during lookup. */
#define OVL_LOOKUP_P(NODE) TREE_LANG_FLAG_4 (OVERLOAD_CHECK (NODE))
/* If set, this is a persistant lookup. */
#define OVL_USED_P(NODE) TREE_USED (OVERLOAD_CHECK (NODE))
/* The first decl of an overload. */
#define OVL_FIRST(NODE) ovl_first (NODE)
/* The name of the overload set. */
#define OVL_NAME(NODE) DECL_NAME (OVL_FIRST (NODE))
/* Whether this is a set of overloaded functions. TEMPLATE_DECLS are
always wrapped in an OVERLOAD, so we don't need to check them
here. */
#define OVL_P(NODE) \
(TREE_CODE (NODE) == FUNCTION_DECL || TREE_CODE (NODE) == OVERLOAD)
/* Whether this is a single member overload. */
#define OVL_SINGLE_P(NODE) \
(TREE_CODE (NODE) != OVERLOAD || !OVL_CHAIN (NODE))
/* OVL_HIDDEN_P nodes come first, then OVL_USING_P nodes, then regular
fns. */
struct GTY(()) tree_overload {
struct tree_common common;
tree function;
};
/* Iterator for a 1 dimensional overload. Permits iterating over the
outer level of a 2-d overload when explicitly enabled. */
class ovl_iterator
{
tree ovl;
const bool allow_inner; /* Only used when checking. */
public:
explicit ovl_iterator (tree o, bool allow = false)
: ovl (o), allow_inner (allow)
{
}
private:
/* Do not duplicate. */
ovl_iterator &operator= (const ovl_iterator &);
ovl_iterator (const ovl_iterator &);
public:
operator bool () const
{
return ovl;
}
ovl_iterator &operator++ ()
{
ovl = TREE_CODE (ovl) != OVERLOAD ? NULL_TREE : OVL_CHAIN (ovl);
return *this;
}
tree operator* () const
{
tree fn = TREE_CODE (ovl) != OVERLOAD ? ovl : OVL_FUNCTION (ovl);
/* Check this is not an unexpected 2-dimensional overload. */
gcc_checking_assert (allow_inner || TREE_CODE (fn) != OVERLOAD);
return fn;
}
public:
/* Whether this overload was introduced by a using decl. */
bool using_p () const
{
return TREE_CODE (ovl) == OVERLOAD && OVL_USING_P (ovl);
}
bool hidden_p () const
{
return TREE_CODE (ovl) == OVERLOAD && OVL_HIDDEN_P (ovl);
}
public:
tree remove_node (tree head)
{
return remove_node (head, ovl);
}
tree reveal_node (tree head)
{
return reveal_node (head, ovl);
}
protected:
/* If we have a nested overload, point at the inner overload and
return the next link on the outer one. */
tree maybe_push ()
{
tree r = NULL_TREE;
if (ovl && TREE_CODE (ovl) == OVERLOAD && OVL_NESTED_P (ovl))
{
r = OVL_CHAIN (ovl);
ovl = OVL_FUNCTION (ovl);
}
return r;
}
/* Restore an outer nested overload. */
void pop (tree outer)
{
gcc_checking_assert (!ovl);
ovl = outer;
}
private:
/* We make these static functions to avoid the address of the
iterator escaping the local context. */
static tree remove_node (tree head, tree node);
static tree reveal_node (tree ovl, tree node);
};
/* Iterator over a (potentially) 2 dimensional overload, which is
produced by name lookup. */
class lkp_iterator : public ovl_iterator
{
typedef ovl_iterator parent;
tree outer;
public:
explicit lkp_iterator (tree o)
: parent (o, true), outer (maybe_push ())
{
}
public:
lkp_iterator &operator++ ()
{
bool repush = !outer;
if (!parent::operator++ () && !repush)
{
pop (outer);
repush = true;
}
if (repush)
outer = maybe_push ();
return *this;
}
};
/* hash traits for declarations. Hashes potential overload sets via
DECL_NAME. */
struct named_decl_hash : ggc_remove <tree>
{
typedef tree value_type; /* A DECL or OVERLOAD */
typedef tree compare_type; /* An identifier. */
inline static hashval_t hash (const value_type decl);
inline static bool equal (const value_type existing, compare_type candidate);
static inline void mark_empty (value_type &p) {p = NULL_TREE;}
static inline bool is_empty (value_type p) {return !p;}
/* Nothing is deletable. Everything is insertable. */
static bool is_deleted (value_type) { return false; }
static void mark_deleted (value_type) { gcc_unreachable (); }
};
struct GTY(()) tree_template_decl {
struct tree_decl_common common;
tree arguments;
tree result;
};
/* Returns true iff NODE is a BASELINK. */
#define BASELINK_P(NODE) \
(TREE_CODE (NODE) == BASELINK)
/* The BINFO indicating the base in which lookup found the
BASELINK_FUNCTIONS. */
#define BASELINK_BINFO(NODE) \
(((struct tree_baselink*) BASELINK_CHECK (NODE))->binfo)
/* The functions referred to by the BASELINK; either a FUNCTION_DECL,
a TEMPLATE_DECL, an OVERLOAD, or a TEMPLATE_ID_EXPR. */
#define BASELINK_FUNCTIONS(NODE) \
(((struct tree_baselink*) BASELINK_CHECK (NODE))->functions)
/* If T is a BASELINK, grab the functions, otherwise just T, which is
expected to already be a (list of) functions. */
#define MAYBE_BASELINK_FUNCTIONS(T) \
(BASELINK_P (T) ? BASELINK_FUNCTIONS (T) : T)
/* The BINFO in which the search for the functions indicated by this baselink
began. This base is used to determine the accessibility of functions
selected by overload resolution. */
#define BASELINK_ACCESS_BINFO(NODE) \
(((struct tree_baselink*) BASELINK_CHECK (NODE))->access_binfo)
/* For a type-conversion operator, the BASELINK_OPTYPE indicates the type
to which the conversion should occur. This value is important if
the BASELINK_FUNCTIONS include a template conversion operator --
the BASELINK_OPTYPE can be used to determine what type the user
requested. */
#define BASELINK_OPTYPE(NODE) \
(TREE_CHAIN (BASELINK_CHECK (NODE)))
/* Nonzero if this baselink was from a qualified lookup. */
#define BASELINK_QUALIFIED_P(NODE) \
TREE_LANG_FLAG_0 (BASELINK_CHECK (NODE))
struct GTY(()) tree_baselink {
struct tree_common common;
tree binfo;
tree functions;
tree access_binfo;
};
/* The different kinds of ids that we encounter. */
enum cp_id_kind
{
/* Not an id at all. */
CP_ID_KIND_NONE,
/* An unqualified-id that is not a template-id. */
CP_ID_KIND_UNQUALIFIED,
/* An unqualified-id that is a dependent name. */
CP_ID_KIND_UNQUALIFIED_DEPENDENT,
/* An unqualified template-id. */
CP_ID_KIND_TEMPLATE_ID,
/* A qualified-id. */
CP_ID_KIND_QUALIFIED
};
/* The various kinds of C++0x warnings we encounter. */
enum cpp0x_warn_str
{
/* extended initializer lists */
CPP0X_INITIALIZER_LISTS,
/* explicit conversion operators */
CPP0X_EXPLICIT_CONVERSION,
/* variadic templates */
CPP0X_VARIADIC_TEMPLATES,
/* lambda expressions */
CPP0X_LAMBDA_EXPR,
/* C++0x auto */
CPP0X_AUTO,
/* scoped enums */
CPP0X_SCOPED_ENUMS,
/* defaulted and deleted functions */
CPP0X_DEFAULTED_DELETED,
/* inline namespaces */
CPP0X_INLINE_NAMESPACES,
/* override controls, override/final */
CPP0X_OVERRIDE_CONTROLS,
/* non-static data member initializers */
CPP0X_NSDMI,
/* user defined literals */
CPP0X_USER_DEFINED_LITERALS,
/* delegating constructors */
CPP0X_DELEGATING_CTORS,
/* inheriting constructors */
CPP0X_INHERITING_CTORS,
/* C++11 attributes */
CPP0X_ATTRIBUTES,
/* ref-qualified member functions */
CPP0X_REF_QUALIFIER
};
/* The various kinds of operation used by composite_pointer_type. */
enum composite_pointer_operation
{
/* comparison */
CPO_COMPARISON,
/* conversion */
CPO_CONVERSION,
/* conditional expression */
CPO_CONDITIONAL_EXPR
};
/* Possible cases of expression list used by build_x_compound_expr_from_list. */
enum expr_list_kind {
ELK_INIT, /* initializer */
ELK_MEM_INIT, /* member initializer */
ELK_FUNC_CAST /* functional cast */
};
/* Possible cases of implicit bad rhs conversions. */
enum impl_conv_rhs {
ICR_DEFAULT_ARGUMENT, /* default argument */
ICR_CONVERTING, /* converting */
ICR_INIT, /* initialization */
ICR_ARGPASS, /* argument passing */
ICR_RETURN, /* return */
ICR_ASSIGN /* assignment */
};
/* Possible cases of implicit or explicit bad conversions to void. */
enum impl_conv_void {
ICV_CAST, /* (explicit) conversion to void */
ICV_SECOND_OF_COND, /* second operand of conditional expression */
ICV_THIRD_OF_COND, /* third operand of conditional expression */
ICV_RIGHT_OF_COMMA, /* right operand of comma operator */
ICV_LEFT_OF_COMMA, /* left operand of comma operator */
ICV_STATEMENT, /* statement */
ICV_THIRD_IN_FOR /* for increment expression */
};
/* Possible invalid uses of an abstract class that might not have a
specific associated declaration. */
enum GTY(()) abstract_class_use {
ACU_UNKNOWN, /* unknown or decl provided */
ACU_CAST, /* cast to abstract class */
ACU_NEW, /* new-expression of abstract class */
ACU_THROW, /* throw-expression of abstract class */
ACU_CATCH, /* catch-parameter of abstract class */
ACU_ARRAY, /* array of abstract class */
ACU_RETURN, /* return type of abstract class */
ACU_PARM /* parameter type of abstract class */
};
/* Macros for access to language-specific slots in an identifier. */
/* The IDENTIFIER_BINDING is the innermost cxx_binding for the
identifier. Its PREVIOUS is the next outermost binding. Each
VALUE field is a DECL for the associated declaration. Thus,
name lookup consists simply of pulling off the node at the front
of the list (modulo oddities for looking up the names of types,
and such.) You can use SCOPE field to determine the scope
that bound the name. */
#define IDENTIFIER_BINDING(NODE) \
(LANG_IDENTIFIER_CAST (NODE)->bindings)
/* TREE_TYPE only indicates on local and class scope the current
type. For namespace scope, the presence of a type in any namespace
is indicated with global_type_node, and the real type behind must
be found through lookup. */
#define IDENTIFIER_TYPE_VALUE(NODE) identifier_type_value (NODE)
#define REAL_IDENTIFIER_TYPE_VALUE(NODE) TREE_TYPE (NODE)
#define SET_IDENTIFIER_TYPE_VALUE(NODE,TYPE) (TREE_TYPE (NODE) = (TYPE))
#define IDENTIFIER_HAS_TYPE_VALUE(NODE) (IDENTIFIER_TYPE_VALUE (NODE) ? 1 : 0)
/* Kinds of identifiers. Values are carefully chosen. */
enum cp_identifier_kind {
cik_normal = 0, /* Not a special identifier. */
cik_keyword = 1, /* A keyword. */
cik_ctor = 2, /* Constructor (in-chg, complete or base). */
cik_dtor = 3, /* Destructor (in-chg, deleting, complete or
base). */
cik_simple_op = 4, /* Non-assignment operator name. */
cik_assign_op = 5, /* An assignment operator name. */
cik_conv_op = 6, /* Conversion operator name. */
cik_reserved_for_udlit = 7, /* Not yet in use */
cik_max
};
/* Kind bits. */
#define IDENTIFIER_KIND_BIT_0(NODE) \
TREE_LANG_FLAG_0 (IDENTIFIER_NODE_CHECK (NODE))
#define IDENTIFIER_KIND_BIT_1(NODE) \
TREE_LANG_FLAG_1 (IDENTIFIER_NODE_CHECK (NODE))
#define IDENTIFIER_KIND_BIT_2(NODE) \
TREE_LANG_FLAG_2 (IDENTIFIER_NODE_CHECK (NODE))
/* Used by various search routines. */
#define IDENTIFIER_MARKED(NODE) \
TREE_LANG_FLAG_4 (IDENTIFIER_NODE_CHECK (NODE))
/* Nonzero if this identifier is used as a virtual function name somewhere
(optimizes searches). */
#define IDENTIFIER_VIRTUAL_P(NODE) \
TREE_LANG_FLAG_5 (IDENTIFIER_NODE_CHECK (NODE))
/* True iff NAME is the DECL_ASSEMBLER_NAME for an entity with vague
linkage which the prelinker has assigned to this translation
unit. */
#define IDENTIFIER_REPO_CHOSEN(NAME) \
(TREE_LANG_FLAG_6 (IDENTIFIER_NODE_CHECK (NAME)))
/* True if this identifier is a reserved word. C_RID_CODE (node) is
then the RID_* value of the keyword. Value 1. */
#define IDENTIFIER_KEYWORD_P(NODE) \
((!IDENTIFIER_KIND_BIT_2 (NODE)) \
& (!IDENTIFIER_KIND_BIT_1 (NODE)) \
& IDENTIFIER_KIND_BIT_0 (NODE))
/* True if this identifier is the name of a constructor or
destructor. Value 2 or 3. */
#define IDENTIFIER_CDTOR_P(NODE) \
((!IDENTIFIER_KIND_BIT_2 (NODE)) \
& IDENTIFIER_KIND_BIT_1 (NODE))
/* True if this identifier is the name of a constructor. Value 2. */
#define IDENTIFIER_CTOR_P(NODE) \
(IDENTIFIER_CDTOR_P(NODE) \
& (!IDENTIFIER_KIND_BIT_0 (NODE)))
/* True if this identifier is the name of a destructor. Value 3. */
#define IDENTIFIER_DTOR_P(NODE) \
(IDENTIFIER_CDTOR_P(NODE) \
& IDENTIFIER_KIND_BIT_0 (NODE))
/* True if this identifier is for any operator name (including
conversions). Value 4, 5, 6 or 7. */
#define IDENTIFIER_ANY_OP_P(NODE) \
(IDENTIFIER_KIND_BIT_2 (NODE))
/* True if this identifier is for an overloaded operator. Values 4, 5. */
#define IDENTIFIER_OVL_OP_P(NODE) \
(IDENTIFIER_ANY_OP_P (NODE) \
& (!IDENTIFIER_KIND_BIT_1 (NODE)))
/* True if this identifier is for any assignment. Values 5. */
#define IDENTIFIER_ASSIGN_OP_P(NODE) \
(IDENTIFIER_OVL_OP_P (NODE) \
& IDENTIFIER_KIND_BIT_0 (NODE))
/* True if this identifier is the name of a type-conversion
operator. Value 7. */
#define IDENTIFIER_CONV_OP_P(NODE) \
(IDENTIFIER_ANY_OP_P (NODE) \
& IDENTIFIER_KIND_BIT_1 (NODE) \
& (!IDENTIFIER_KIND_BIT_0 (NODE)))
/* True if this identifier is a new or delete operator. */
#define IDENTIFIER_NEWDEL_OP_P(NODE) \
(IDENTIFIER_OVL_OP_P (NODE) \
&& IDENTIFIER_OVL_OP_FLAGS (NODE) & OVL_OP_FLAG_ALLOC)
/* True if this identifier is a new operator. */
#define IDENTIFIER_NEW_OP_P(NODE) \
(IDENTIFIER_OVL_OP_P (NODE) \
&& (IDENTIFIER_OVL_OP_FLAGS (NODE) \
& (OVL_OP_FLAG_ALLOC | OVL_OP_FLAG_DELETE)) == OVL_OP_FLAG_ALLOC)
/* Access a C++-specific index for identifier NODE.
Used to optimize operator mappings etc. */
#define IDENTIFIER_CP_INDEX(NODE) \
(IDENTIFIER_NODE_CHECK(NODE)->base.u.bits.address_space)
/* In a RECORD_TYPE or UNION_TYPE, nonzero if any component is read-only. */
#define C_TYPE_FIELDS_READONLY(TYPE) \
(LANG_TYPE_CLASS_CHECK (TYPE)->fields_readonly)
/* The tokens stored in the default argument. */
#define DEFARG_TOKENS(NODE) \
(((struct tree_default_arg *)DEFAULT_ARG_CHECK (NODE))->tokens)
#define DEFARG_INSTANTIATIONS(NODE) \
(((struct tree_default_arg *)DEFAULT_ARG_CHECK (NODE))->instantiations)
struct GTY (()) tree_default_arg {
struct tree_common common;
struct cp_token_cache *tokens;
vec<tree, va_gc> *instantiations;
};
#define DEFERRED_NOEXCEPT_PATTERN(NODE) \
(((struct tree_deferred_noexcept *)DEFERRED_NOEXCEPT_CHECK (NODE))->pattern)
#define DEFERRED_NOEXCEPT_ARGS(NODE) \
(((struct tree_deferred_noexcept *)DEFERRED_NOEXCEPT_CHECK (NODE))->args)
#define DEFERRED_NOEXCEPT_SPEC_P(NODE) \
((NODE) && (TREE_PURPOSE (NODE)) \
&& (TREE_CODE (TREE_PURPOSE (NODE)) == DEFERRED_NOEXCEPT))
#define UNEVALUATED_NOEXCEPT_SPEC_P(NODE) \
(DEFERRED_NOEXCEPT_SPEC_P (NODE) \
&& DEFERRED_NOEXCEPT_PATTERN (TREE_PURPOSE (NODE)) == NULL_TREE)
struct GTY (()) tree_deferred_noexcept {
struct tree_base base;
tree pattern;
tree args;
};
/* The condition associated with the static assertion. This must be
an integral constant expression. */
#define STATIC_ASSERT_CONDITION(NODE) \
(((struct tree_static_assert *)STATIC_ASSERT_CHECK (NODE))->condition)
/* The message associated with the static assertion. This must be a
string constant, which will be emitted as an error message when the
static assert condition is false. */
#define STATIC_ASSERT_MESSAGE(NODE) \
(((struct tree_static_assert *)STATIC_ASSERT_CHECK (NODE))->message)
/* Source location information for a static assertion. */
#define STATIC_ASSERT_SOURCE_LOCATION(NODE) \
(((struct tree_static_assert *)STATIC_ASSERT_CHECK (NODE))->location)
struct GTY (()) tree_static_assert {
struct tree_common common;
tree condition;
tree message;
location_t location;
};
struct GTY (()) tree_argument_pack_select {
struct tree_common common;
tree argument_pack;
int index;
};
/* The different kinds of traits that we encounter. */
enum cp_trait_kind
{
CPTK_BASES,
CPTK_DIRECT_BASES,
CPTK_HAS_NOTHROW_ASSIGN,
CPTK_HAS_NOTHROW_CONSTRUCTOR,
CPTK_HAS_NOTHROW_COPY,
CPTK_HAS_TRIVIAL_ASSIGN,
CPTK_HAS_TRIVIAL_CONSTRUCTOR,
CPTK_HAS_TRIVIAL_COPY,
CPTK_HAS_TRIVIAL_DESTRUCTOR,
CPTK_HAS_UNIQUE_OBJ_REPRESENTATIONS,
CPTK_HAS_VIRTUAL_DESTRUCTOR,
CPTK_IS_ABSTRACT,
CPTK_IS_AGGREGATE,
CPTK_IS_BASE_OF,
CPTK_IS_CLASS,
CPTK_IS_EMPTY,
CPTK_IS_ENUM,
CPTK_IS_FINAL,
CPTK_IS_LITERAL_TYPE,
CPTK_IS_POD,
CPTK_IS_POLYMORPHIC,
CPTK_IS_SAME_AS,
CPTK_IS_STD_LAYOUT,
CPTK_IS_TRIVIAL,
CPTK_IS_TRIVIALLY_ASSIGNABLE,
CPTK_IS_TRIVIALLY_CONSTRUCTIBLE,
CPTK_IS_TRIVIALLY_COPYABLE,
CPTK_IS_UNION,
CPTK_UNDERLYING_TYPE,
CPTK_IS_ASSIGNABLE,
CPTK_IS_CONSTRUCTIBLE
};
/* The types that we are processing. */
#define TRAIT_EXPR_TYPE1(NODE) \
(((struct tree_trait_expr *)TRAIT_EXPR_CHECK (NODE))->type1)
#define TRAIT_EXPR_TYPE2(NODE) \
(((struct tree_trait_expr *)TRAIT_EXPR_CHECK (NODE))->type2)
/* The specific trait that we are processing. */
#define TRAIT_EXPR_KIND(NODE) \
(((struct tree_trait_expr *)TRAIT_EXPR_CHECK (NODE))->kind)
struct GTY (()) tree_trait_expr {
struct tree_common common;
tree type1;
tree type2;
enum cp_trait_kind kind;
};
/* Based off of TYPE_UNNAMED_P. */
#define LAMBDA_TYPE_P(NODE) \
(CLASS_TYPE_P (NODE) && CLASSTYPE_LAMBDA_EXPR (NODE))
/* Test if FUNCTION_DECL is a lambda function. */
#define LAMBDA_FUNCTION_P(FNDECL) \
(DECL_DECLARES_FUNCTION_P (FNDECL) \
&& DECL_OVERLOADED_OPERATOR_P (FNDECL) \
&& DECL_OVERLOADED_OPERATOR_IS (FNDECL, CALL_EXPR) \
&& LAMBDA_TYPE_P (CP_DECL_CONTEXT (FNDECL)))
enum cp_lambda_default_capture_mode_type {
CPLD_NONE,
CPLD_COPY,
CPLD_REFERENCE
};
/* The method of default capture, if any. */
#define LAMBDA_EXPR_DEFAULT_CAPTURE_MODE(NODE) \
(((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->default_capture_mode)
/* The capture-list, including `this'. Each capture is stored as a FIELD_DECL
* so that the name, type, and field are all together, whether or not it has
* been added to the lambda's class type.
TREE_LIST:
TREE_PURPOSE: The FIELD_DECL for this capture.
TREE_VALUE: The initializer. This is part of a GNU extension. */
#define LAMBDA_EXPR_CAPTURE_LIST(NODE) \
(((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->capture_list)
/* During parsing of the lambda-introducer, the node in the capture-list
that holds the 'this' capture. During parsing of the body, the
capture proxy for that node. */
#define LAMBDA_EXPR_THIS_CAPTURE(NODE) \
(((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->this_capture)
/* Predicate tracking whether `this' is in the effective capture set. */
#define LAMBDA_EXPR_CAPTURES_THIS_P(NODE) \
LAMBDA_EXPR_THIS_CAPTURE(NODE)
/* Predicate tracking whether the lambda was declared 'mutable'. */
#define LAMBDA_EXPR_MUTABLE_P(NODE) \
TREE_LANG_FLAG_1 (LAMBDA_EXPR_CHECK (NODE))
/* True iff uses of a const variable capture were optimized away. */
#define LAMBDA_EXPR_CAPTURE_OPTIMIZED(NODE) \
TREE_LANG_FLAG_2 (LAMBDA_EXPR_CHECK (NODE))
/* True if this TREE_LIST in LAMBDA_EXPR_CAPTURE_LIST is for an explicit
capture. */
#define LAMBDA_CAPTURE_EXPLICIT_P(NODE) \
TREE_LANG_FLAG_0 (TREE_LIST_CHECK (NODE))
/* The source location of the lambda. */
#define LAMBDA_EXPR_LOCATION(NODE) \
(((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->locus)
/* The mangling scope for the lambda: FUNCTION_DECL, PARM_DECL, VAR_DECL,
FIELD_DECL or NULL_TREE. If this is NULL_TREE, we have no linkage. */
#define LAMBDA_EXPR_EXTRA_SCOPE(NODE) \
(((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->extra_scope)
/* If EXTRA_SCOPE, this is the number of the lambda within that scope. */
#define LAMBDA_EXPR_DISCRIMINATOR(NODE) \
(((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->discriminator)
/* During parsing of the lambda, a vector of capture proxies which need
to be pushed once we're done processing a nested lambda. */
#define LAMBDA_EXPR_PENDING_PROXIES(NODE) \
(((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->pending_proxies)
/* The closure type of the lambda, which is also the type of the
LAMBDA_EXPR. */
#define LAMBDA_EXPR_CLOSURE(NODE) \
(TREE_TYPE (LAMBDA_EXPR_CHECK (NODE)))
struct GTY (()) tree_lambda_expr
{
struct tree_typed typed;
tree capture_list;
tree this_capture;
tree extra_scope;
vec<tree, va_gc> *pending_proxies;
location_t locus;
enum cp_lambda_default_capture_mode_type default_capture_mode;
int discriminator;
};
/* A (typedef,context,usage location) triplet.
It represents a typedef used through a
context at a given source location.
e.g.
struct foo {
typedef int myint;
};
struct bar {
foo::myint v; // #1<-- this location.
};
In bar, the triplet will be (myint, foo, #1).
*/
struct GTY(()) qualified_typedef_usage_s {
tree typedef_decl;
tree context;
location_t locus;
};
typedef struct qualified_typedef_usage_s qualified_typedef_usage_t;
/* Non-zero if this template specialization has access violations that
should be rechecked when the function is instantiated outside argument
deduction. */
#define TINFO_HAS_ACCESS_ERRORS(NODE) \
(TREE_LANG_FLAG_0 (TEMPLATE_INFO_CHECK (NODE)))
#define FNDECL_HAS_ACCESS_ERRORS(NODE) \
(TINFO_HAS_ACCESS_ERRORS (DECL_TEMPLATE_INFO (NODE)))
/* Non-zero if this variable template specialization was specified using a
template-id, so it's a partial or full specialization and not a definition
of the member template of a particular class specialization. */
#define TINFO_USED_TEMPLATE_ID(NODE) \
(TREE_LANG_FLAG_1 (TEMPLATE_INFO_CHECK (NODE)))
struct GTY(()) tree_template_info {
struct tree_common common;
vec<qualified_typedef_usage_t, va_gc> *typedefs_needing_access_checking;
};
// Constraint information for a C++ declaration. Constraint information is
// comprised of:
//
// - a constraint expression introduced by the template header
// - a constraint expression introduced by a function declarator
// - the associated constraints, which are the conjunction of those,
// and used for declaration matching
//
// The template and declarator requirements are kept to support pretty
// printing constrained declarations.
struct GTY(()) tree_constraint_info {
struct tree_base base;
tree template_reqs;
tree declarator_reqs;
tree associated_constr;
};
// Require that pointer P is non-null before returning.
template<typename T>
inline T*
check_nonnull (T* p)
{
gcc_assert (p);
return p;
}
// Returns true iff T is non-null and represents constraint info.
inline tree_constraint_info *
check_constraint_info (tree t)
{
if (t && TREE_CODE (t) == CONSTRAINT_INFO)
return (tree_constraint_info *)t;
return NULL;
}
// Access the expression describing the template constraints. This may be
// null if no constraints were introduced in the template parameter list,
// a requirements clause after the template parameter list, or constraints
// through a constrained-type-specifier.
#define CI_TEMPLATE_REQS(NODE) \
check_constraint_info (check_nonnull(NODE))->template_reqs
// Access the expression describing the trailing constraints. This is non-null
// for any implicit instantiation of a constrained declaration. For a
// templated declaration it is non-null only when a trailing requires-clause
// was specified.
#define CI_DECLARATOR_REQS(NODE) \
check_constraint_info (check_nonnull(NODE))->declarator_reqs
// The computed associated constraint expression for a declaration.
#define CI_ASSOCIATED_CONSTRAINTS(NODE) \
check_constraint_info (check_nonnull(NODE))->associated_constr
// Access the logical constraints on the template parameters introduced
// at a given template parameter list level indicated by NODE.
#define TEMPLATE_PARMS_CONSTRAINTS(NODE) \
TREE_TYPE (TREE_LIST_CHECK (NODE))
// Access the logical constraints on the template parameter declaration
// indicated by NODE.
#define TEMPLATE_PARM_CONSTRAINTS(NODE) \
TREE_TYPE (TREE_LIST_CHECK (NODE))
/* Non-zero if the noexcept is present in a compound requirement. */
#define COMPOUND_REQ_NOEXCEPT_P(NODE) \
TREE_LANG_FLAG_0 (TREE_CHECK (NODE, COMPOUND_REQ))
/* The constraints on an 'auto' placeholder type, used in an argument deduction
constraint. */
#define PLACEHOLDER_TYPE_CONSTRAINTS(NODE) \
DECL_SIZE_UNIT (TYPE_NAME (NODE))
/* The expression evaluated by the predicate constraint. */
#define PRED_CONSTR_EXPR(NODE) \
TREE_OPERAND (TREE_CHECK (NODE, PRED_CONSTR), 0)
/* The concept of a concept check. */
#define CHECK_CONSTR_CONCEPT(NODE) \
TREE_OPERAND (TREE_CHECK (NODE, CHECK_CONSTR), 0)
/* The template arguments of a concept check. */
#define CHECK_CONSTR_ARGS(NODE) \
TREE_OPERAND (TREE_CHECK (NODE, CHECK_CONSTR), 1)
/* The expression validated by the predicate constraint. */
#define EXPR_CONSTR_EXPR(NODE) \
TREE_OPERAND (TREE_CHECK (NODE, EXPR_CONSTR), 0)
/* The type validated by the predicate constraint. */
#define TYPE_CONSTR_TYPE(NODE) \
TREE_OPERAND (TREE_CHECK (NODE, TYPE_CONSTR), 0)
/* In an implicit conversion constraint, the source expression. */
#define ICONV_CONSTR_EXPR(NODE) \
TREE_OPERAND (TREE_CHECK (NODE, ICONV_CONSTR), 0)
/* In an implicit conversion constraint, the target type. */
#define ICONV_CONSTR_TYPE(NODE) \
TREE_OPERAND (TREE_CHECK (NODE, ICONV_CONSTR), 1)
/* In an argument deduction constraint, the source expression. */
#define DEDUCT_CONSTR_EXPR(NODE) \
TREE_OPERAND (TREE_CHECK (NODE, DEDUCT_CONSTR), 0)
/* In an argument deduction constraint, the target type pattern. */
#define DEDUCT_CONSTR_PATTERN(NODE) \
TREE_OPERAND (TREE_CHECK (NODE, DEDUCT_CONSTR), 1)
/* In an argument deduction constraint, the list of placeholder nodes. */
#define DEDUCT_CONSTR_PLACEHOLDER(NODE) \
TREE_OPERAND (TREE_CHECK (NODE, DEDUCT_CONSTR), 2)
/* The expression of an exception constraint. */
#define EXCEPT_CONSTR_EXPR(NODE) \
TREE_OPERAND (TREE_CHECK (NODE, EXCEPT_CONSTR), 0)
/* In a parameterized constraint, the local parameters. */
#define PARM_CONSTR_PARMS(NODE) \
TREE_OPERAND (TREE_CHECK (NODE, PARM_CONSTR), 0)
/* In a parameterized constraint, the operand. */
#define PARM_CONSTR_OPERAND(NODE) \
TREE_OPERAND (TREE_CHECK (NODE, PARM_CONSTR), 1)
/* Whether a PARM_DECL represents a local parameter in a
requires-expression. */
#define CONSTRAINT_VAR_P(NODE) \
DECL_LANG_FLAG_2 (TREE_CHECK (NODE, PARM_DECL))
/* The concept constraining this constrained template-parameter. */
#define CONSTRAINED_PARM_CONCEPT(NODE) \
DECL_SIZE_UNIT (TYPE_DECL_CHECK (NODE))
/* Any extra template arguments specified for a constrained
template-parameter. */
#define CONSTRAINED_PARM_EXTRA_ARGS(NODE) \
DECL_SIZE (TYPE_DECL_CHECK (NODE))
/* The first template parameter of CONSTRAINED_PARM_CONCEPT to be used as a
prototype for the constrained parameter in finish_shorthand_constraint,
attached for convenience. */
#define CONSTRAINED_PARM_PROTOTYPE(NODE) \
DECL_INITIAL (TYPE_DECL_CHECK (NODE))
enum cp_tree_node_structure_enum {
TS_CP_GENERIC,
TS_CP_IDENTIFIER,
TS_CP_TPI,
TS_CP_PTRMEM,
TS_CP_OVERLOAD,
TS_CP_BASELINK,
TS_CP_TEMPLATE_DECL,
TS_CP_DEFAULT_ARG,
TS_CP_DEFERRED_NOEXCEPT,
TS_CP_STATIC_ASSERT,
TS_CP_ARGUMENT_PACK_SELECT,
TS_CP_TRAIT_EXPR,
TS_CP_LAMBDA_EXPR,
TS_CP_TEMPLATE_INFO,
TS_CP_CONSTRAINT_INFO,
TS_CP_USERDEF_LITERAL
};
/* The resulting tree type. */
union GTY((desc ("cp_tree_node_structure (&%h)"),
chain_next ("(union lang_tree_node *) c_tree_chain_next (&%h.generic)"))) lang_tree_node {
union tree_node GTY ((tag ("TS_CP_GENERIC"),
desc ("tree_node_structure (&%h)"))) generic;
struct template_parm_index GTY ((tag ("TS_CP_TPI"))) tpi;
struct ptrmem_cst GTY ((tag ("TS_CP_PTRMEM"))) ptrmem;
struct tree_overload GTY ((tag ("TS_CP_OVERLOAD"))) overload;
struct tree_baselink GTY ((tag ("TS_CP_BASELINK"))) baselink;
struct tree_template_decl GTY ((tag ("TS_CP_TEMPLATE_DECL"))) template_decl;
struct tree_default_arg GTY ((tag ("TS_CP_DEFAULT_ARG"))) default_arg;
struct tree_deferred_noexcept GTY ((tag ("TS_CP_DEFERRED_NOEXCEPT"))) deferred_noexcept;
struct lang_identifier GTY ((tag ("TS_CP_IDENTIFIER"))) identifier;
struct tree_static_assert GTY ((tag ("TS_CP_STATIC_ASSERT")))
static_assertion;
struct tree_argument_pack_select GTY ((tag ("TS_CP_ARGUMENT_PACK_SELECT")))
argument_pack_select;
struct tree_trait_expr GTY ((tag ("TS_CP_TRAIT_EXPR")))
trait_expression;
struct tree_lambda_expr GTY ((tag ("TS_CP_LAMBDA_EXPR")))
lambda_expression;
struct tree_template_info GTY ((tag ("TS_CP_TEMPLATE_INFO")))
template_info;
struct tree_constraint_info GTY ((tag ("TS_CP_CONSTRAINT_INFO")))
constraint_info;
struct tree_userdef_literal GTY ((tag ("TS_CP_USERDEF_LITERAL")))
userdef_literal;
};
/* Global state. */
struct GTY(()) saved_scope {
vec<cxx_saved_binding, va_gc> *old_bindings;
tree old_namespace;
vec<tree, va_gc> *decl_ns_list;
tree class_name;
tree class_type;
tree access_specifier;
tree function_decl;
vec<tree, va_gc> *lang_base;
tree lang_name;
tree template_parms;
cp_binding_level *x_previous_class_level;
tree x_saved_tree;
/* Only used for uses of this in trailing return type. */
tree x_current_class_ptr;
tree x_current_class_ref;
int x_processing_template_decl;
int x_processing_specialization;
BOOL_BITFIELD x_processing_explicit_instantiation : 1;
BOOL_BITFIELD need_pop_function_context : 1;
/* Nonzero if we are parsing the discarded statement of a constexpr
if-statement. */
BOOL_BITFIELD discarded_stmt : 1;
int unevaluated_operand;
int inhibit_evaluation_warnings;
int noexcept_operand;
/* If non-zero, implicit "omp declare target" attribute is added into the
attribute lists. */
int omp_declare_target_attribute;
struct stmt_tree_s x_stmt_tree;
cp_binding_level *class_bindings;
cp_binding_level *bindings;
hash_map<tree, tree> *GTY((skip)) x_local_specializations;
struct saved_scope *prev;
};
extern GTY(()) struct saved_scope *scope_chain;
/* The current open namespace. */
#define current_namespace scope_chain->old_namespace
/* The stack for namespaces of current declarations. */
#define decl_namespace_list scope_chain->decl_ns_list
/* IDENTIFIER_NODE: name of current class */
#define current_class_name scope_chain->class_name
/* _TYPE: the type of the current class */
#define current_class_type scope_chain->class_type
/* When parsing a class definition, the access specifier most recently
given by the user, or, if no access specifier was given, the
default value appropriate for the kind of class (i.e., struct,
class, or union). */
#define current_access_specifier scope_chain->access_specifier
/* Pointer to the top of the language name stack. */
#define current_lang_base scope_chain->lang_base
#define current_lang_name scope_chain->lang_name
/* When parsing a template declaration, a TREE_LIST represents the
active template parameters. Each node in the list represents one
level of template parameters. The innermost level is first in the
list. The depth of each level is stored as an INTEGER_CST in the
TREE_PURPOSE of each node. The parameters for that level are
stored in the TREE_VALUE. */
#define current_template_parms scope_chain->template_parms
#define processing_template_decl scope_chain->x_processing_template_decl
#define processing_specialization scope_chain->x_processing_specialization
#define processing_explicit_instantiation scope_chain->x_processing_explicit_instantiation
#define in_discarded_stmt scope_chain->discarded_stmt
/* RAII sentinel to handle clearing processing_template_decl and restoring
it when done. */
struct processing_template_decl_sentinel
{
int saved;
processing_template_decl_sentinel (bool reset = true)
: saved (processing_template_decl)
{
if (reset)
processing_template_decl = 0;
}
~processing_template_decl_sentinel()
{
processing_template_decl = saved;
}
};
/* RAII sentinel to disable certain warnings during template substitution
and elsewhere. */
struct warning_sentinel
{
int &flag;
int val;
warning_sentinel(int& flag, bool suppress=true)
: flag(flag), val(flag) { if (suppress) flag = 0; }
~warning_sentinel() { flag = val; }
};
/* RAII sentinel that saves the value of a variable, optionally
overrides it right away, and restores its value when the sentinel
id destructed. */
template <typename T>
class temp_override
{
T& overridden_variable;
T saved_value;
public:
temp_override(T& var) : overridden_variable (var), saved_value (var) {}
temp_override(T& var, T overrider)
: overridden_variable (var), saved_value (var)
{
overridden_variable = overrider;
}
~temp_override() { overridden_variable = saved_value; }
};
/* The cached class binding level, from the most recently exited
class, or NULL if none. */
#define previous_class_level scope_chain->x_previous_class_level
/* A map from local variable declarations in the body of the template
presently being instantiated to the corresponding instantiated
local variables. */
#define local_specializations scope_chain->x_local_specializations
/* Nonzero if we are parsing the operand of a noexcept operator. */
#define cp_noexcept_operand scope_chain->noexcept_operand
/* A list of private types mentioned, for deferred access checking. */
struct GTY((for_user)) cxx_int_tree_map {
unsigned int uid;
tree to;
};
struct cxx_int_tree_map_hasher : ggc_ptr_hash<cxx_int_tree_map>
{
static hashval_t hash (cxx_int_tree_map *);
static bool equal (cxx_int_tree_map *, cxx_int_tree_map *);
};
struct named_label_entry; /* Defined in decl.c. */
struct named_label_hash : ggc_remove <named_label_entry *>
{
typedef named_label_entry *value_type;
typedef tree compare_type; /* An identifier. */
inline static hashval_t hash (value_type);
inline static bool equal (const value_type, compare_type);
inline static void mark_empty (value_type &p) {p = NULL;}
inline static bool is_empty (value_type p) {return !p;}
/* Nothing is deletable. Everything is insertable. */
inline static bool is_deleted (value_type) { return false; }
inline static void mark_deleted (value_type) { gcc_unreachable (); }
};
/* Global state pertinent to the current function. */
struct GTY(()) language_function {
struct c_language_function base;
tree x_cdtor_label;
tree x_current_class_ptr;
tree x_current_class_ref;
tree x_eh_spec_block;
tree x_in_charge_parm;
tree x_vtt_parm;
tree x_return_value;
tree x_auto_return_pattern;
BOOL_BITFIELD returns_value : 1;
BOOL_BITFIELD returns_null : 1;
BOOL_BITFIELD returns_abnormally : 1;
BOOL_BITFIELD infinite_loop: 1;
BOOL_BITFIELD x_in_function_try_handler : 1;
BOOL_BITFIELD x_in_base_initializer : 1;
/* True if this function can throw an exception. */
BOOL_BITFIELD can_throw : 1;
BOOL_BITFIELD invalid_constexpr : 1;
hash_table<named_label_hash> *x_named_labels;
cp_binding_level *bindings;
vec<tree, va_gc> *x_local_names;
/* Tracking possibly infinite loops. This is a vec<tree> only because
vec<bool> doesn't work with gtype. */
vec<tree, va_gc> *infinite_loops;
hash_table<cxx_int_tree_map_hasher> *extern_decl_map;
};
/* The current C++-specific per-function global variables. */
#define cp_function_chain (cfun->language)
/* In a constructor destructor, the point at which all derived class
destroying/construction has been done. I.e., just before a
constructor returns, or before any base class destroying will be done
in a destructor. */
#define cdtor_label cp_function_chain->x_cdtor_label
/* When we're processing a member function, current_class_ptr is the
PARM_DECL for the `this' pointer. The current_class_ref is an
expression for `*this'. */
#define current_class_ptr \
(*(cfun && cp_function_chain \
? &cp_function_chain->x_current_class_ptr \
: &scope_chain->x_current_class_ptr))
#define current_class_ref \
(*(cfun && cp_function_chain \
? &cp_function_chain->x_current_class_ref \
: &scope_chain->x_current_class_ref))
/* The EH_SPEC_BLOCK for the exception-specifiers for the current
function, if any. */
#define current_eh_spec_block cp_function_chain->x_eh_spec_block
/* The `__in_chrg' parameter for the current function. Only used for
constructors and destructors. */
#define current_in_charge_parm cp_function_chain->x_in_charge_parm
/* The `__vtt_parm' parameter for the current function. Only used for
constructors and destructors. */
#define current_vtt_parm cp_function_chain->x_vtt_parm
/* Set to 0 at beginning of a function definition, set to 1 if
a return statement that specifies a return value is seen. */
#define current_function_returns_value cp_function_chain->returns_value
/* Set to 0 at beginning of a function definition, set to 1 if
a return statement with no argument is seen. */
#define current_function_returns_null cp_function_chain->returns_null
/* Set to 0 at beginning of a function definition, set to 1 if
a call to a noreturn function is seen. */
#define current_function_returns_abnormally \
cp_function_chain->returns_abnormally
/* Set to 0 at beginning of a function definition, set to 1 if we see an
obvious infinite loop. This can have false positives and false
negatives, so it should only be used as a heuristic. */
#define current_function_infinite_loop cp_function_chain->infinite_loop
/* Nonzero if we are processing a base initializer. Zero elsewhere. */
#define in_base_initializer cp_function_chain->x_in_base_initializer
#define in_function_try_handler cp_function_chain->x_in_function_try_handler
/* Expression always returned from function, or error_mark_node
otherwise, for use by the automatic named return value optimization. */
#define current_function_return_value \
(cp_function_chain->x_return_value)
/* A type involving 'auto' to be used for return type deduction. */
#define current_function_auto_return_pattern \
(cp_function_chain->x_auto_return_pattern)
/* In parser.c. */
extern tree cp_literal_operator_id (const char *);
/* TRUE if a tree code represents a statement. */
extern bool statement_code_p[MAX_TREE_CODES];
#define STATEMENT_CODE_P(CODE) statement_code_p[(int) (CODE)]
enum languages { lang_c, lang_cplusplus };
/* Macros to make error reporting functions' lives easier. */
#define TYPE_LINKAGE_IDENTIFIER(NODE) \
(TYPE_IDENTIFIER (TYPE_MAIN_VARIANT (NODE)))
#define TYPE_NAME_STRING(NODE) (IDENTIFIER_POINTER (TYPE_IDENTIFIER (NODE)))
#define TYPE_NAME_LENGTH(NODE) (IDENTIFIER_LENGTH (TYPE_IDENTIFIER (NODE)))
/* Nonzero if NODE has no name for linkage purposes. */
#define TYPE_UNNAMED_P(NODE) \
(OVERLOAD_TYPE_P (NODE) && anon_aggrname_p (TYPE_LINKAGE_IDENTIFIER (NODE)))
/* The _DECL for this _TYPE. */
#define TYPE_MAIN_DECL(NODE) (TYPE_STUB_DECL (TYPE_MAIN_VARIANT (NODE)))
/* Nonzero if T is a type that could resolve to any kind of concrete type
at instantiation time. */
#define WILDCARD_TYPE_P(T) \
(TREE_CODE (T) == TEMPLATE_TYPE_PARM \
|| TREE_CODE (T) == TYPENAME_TYPE \
|| TREE_CODE (T) == TYPEOF_TYPE \
|| TREE_CODE (T) == BOUND_TEMPLATE_TEMPLATE_PARM \
|| TREE_CODE (T) == DECLTYPE_TYPE)
/* Nonzero if T is a class (or struct or union) type. Also nonzero
for template type parameters, typename types, and instantiated
template template parameters. Keep these checks in ascending code
order. */
#define MAYBE_CLASS_TYPE_P(T) (WILDCARD_TYPE_P (T) || CLASS_TYPE_P (T))
/* Set CLASS_TYPE_P for T to VAL. T must be a class, struct, or
union type. */
#define SET_CLASS_TYPE_P(T, VAL) \
(TYPE_LANG_FLAG_5 (RECORD_OR_UNION_CHECK (T)) = (VAL))
/* Nonzero if T is a class type. Zero for template type parameters,
typename types, and so forth. */
#define CLASS_TYPE_P(T) \
(RECORD_OR_UNION_CODE_P (TREE_CODE (T)) && TYPE_LANG_FLAG_5 (T))
/* Nonzero if T is a class type but not an union. */
#define NON_UNION_CLASS_TYPE_P(T) \
(TREE_CODE (T) == RECORD_TYPE && TYPE_LANG_FLAG_5 (T))
/* Keep these checks in ascending code order. */
#define RECORD_OR_UNION_CODE_P(T) \
((T) == RECORD_TYPE || (T) == UNION_TYPE)
#define OVERLOAD_TYPE_P(T) \
(CLASS_TYPE_P (T) || TREE_CODE (T) == ENUMERAL_TYPE)
/* True if this type is dependent. This predicate is only valid if
TYPE_DEPENDENT_P_VALID is true. */
#define TYPE_DEPENDENT_P(NODE) TYPE_LANG_FLAG_0 (NODE)
/* True if dependent_type_p has been called for this type, with the
result that TYPE_DEPENDENT_P is valid. */
#define TYPE_DEPENDENT_P_VALID(NODE) TYPE_LANG_FLAG_6(NODE)
/* Nonzero if this type is const-qualified. */
#define CP_TYPE_CONST_P(NODE) \
((cp_type_quals (NODE) & TYPE_QUAL_CONST) != 0)
/* Nonzero if this type is volatile-qualified. */
#define CP_TYPE_VOLATILE_P(NODE) \
((cp_type_quals (NODE) & TYPE_QUAL_VOLATILE) != 0)
/* Nonzero if this type is restrict-qualified. */
#define CP_TYPE_RESTRICT_P(NODE) \
((cp_type_quals (NODE) & TYPE_QUAL_RESTRICT) != 0)
/* Nonzero if this type is const-qualified, but not
volatile-qualified. Other qualifiers are ignored. This macro is
used to test whether or not it is OK to bind an rvalue to a
reference. */
#define CP_TYPE_CONST_NON_VOLATILE_P(NODE) \
((cp_type_quals (NODE) & (TYPE_QUAL_CONST | TYPE_QUAL_VOLATILE)) \
== TYPE_QUAL_CONST)
#define FUNCTION_ARG_CHAIN(NODE) \
TREE_CHAIN (TYPE_ARG_TYPES (TREE_TYPE (NODE)))
/* Given a FUNCTION_DECL, returns the first TREE_LIST out of TYPE_ARG_TYPES
which refers to a user-written parameter. */
#define FUNCTION_FIRST_USER_PARMTYPE(NODE) \
skip_artificial_parms_for ((NODE), TYPE_ARG_TYPES (TREE_TYPE (NODE)))
/* Similarly, but for DECL_ARGUMENTS. */
#define FUNCTION_FIRST_USER_PARM(NODE) \
skip_artificial_parms_for ((NODE), DECL_ARGUMENTS (NODE))
/* Nonzero iff TYPE is derived from PARENT. Ignores accessibility and
ambiguity issues. */
#define DERIVED_FROM_P(PARENT, TYPE) \
(lookup_base ((TYPE), (PARENT), ba_any, NULL, tf_none) != NULL_TREE)
/* Gives the visibility specification for a class type. */
#define CLASSTYPE_VISIBILITY(TYPE) \
DECL_VISIBILITY (TYPE_MAIN_DECL (TYPE))
#define CLASSTYPE_VISIBILITY_SPECIFIED(TYPE) \
DECL_VISIBILITY_SPECIFIED (TYPE_MAIN_DECL (TYPE))
struct GTY (()) tree_pair_s {
tree purpose;
tree value;
};
typedef tree_pair_s *tree_pair_p;
/* This structure provides additional information above and beyond
what is provide in the ordinary tree_type. In the past, we used it
for the types of class types, template parameters types, typename
types, and so forth. However, there can be many (tens to hundreds
of thousands) of template parameter types in a compilation, and
there's no need for this additional information in that case.
Therefore, we now use this data structure only for class types.
In the past, it was thought that there would be relatively few
class types. However, in the presence of heavy use of templates,
many (i.e., thousands) of classes can easily be generated.
Therefore, we should endeavor to keep the size of this structure to
a minimum. */
struct GTY(()) lang_type {
unsigned char align;
unsigned has_type_conversion : 1;
unsigned has_copy_ctor : 1;
unsigned has_default_ctor : 1;
unsigned const_needs_init : 1;
unsigned ref_needs_init : 1;
unsigned has_const_copy_assign : 1;
unsigned use_template : 2;
unsigned has_mutable : 1;
unsigned com_interface : 1;
unsigned non_pod_class : 1;
unsigned nearly_empty_p : 1;
unsigned user_align : 1;
unsigned has_copy_assign : 1;
unsigned has_new : 1;
unsigned has_array_new : 1;
unsigned gets_delete : 2;
unsigned interface_only : 1;
unsigned interface_unknown : 1;
unsigned contains_empty_class_p : 1;
unsigned anon_aggr : 1;
unsigned non_zero_init : 1;
unsigned empty_p : 1;
/* 32 bits allocated. */
unsigned vec_new_uses_cookie : 1;
unsigned declared_class : 1;
unsigned diamond_shaped : 1;
unsigned repeated_base : 1;
unsigned being_defined : 1;
unsigned debug_requested : 1;
unsigned fields_readonly : 1;
unsigned ptrmemfunc_flag : 1;
unsigned was_anonymous : 1;
unsigned lazy_default_ctor : 1;
unsigned lazy_copy_ctor : 1;
unsigned lazy_copy_assign : 1;
unsigned lazy_destructor : 1;
unsigned has_const_copy_ctor : 1;
unsigned has_complex_copy_ctor : 1;
unsigned has_complex_copy_assign : 1;
unsigned non_aggregate : 1;
unsigned has_complex_dflt : 1;
unsigned has_list_ctor : 1;
unsigned non_std_layout : 1;
unsigned is_literal : 1;
unsigned lazy_move_ctor : 1;
unsigned lazy_move_assign : 1;
unsigned has_complex_move_ctor : 1;
unsigned has_complex_move_assign : 1;
unsigned has_constexpr_ctor : 1;
unsigned unique_obj_representations : 1;
unsigned unique_obj_representations_set : 1;
/* When adding a flag here, consider whether or not it ought to
apply to a template instance if it applies to the template. If
so, make sure to copy it in instantiate_class_template! */
/* There are some bits left to fill out a 32-bit word. Keep track
of this by updating the size of this bitfield whenever you add or
remove a flag. */
unsigned dummy : 4;
tree primary_base;
vec<tree_pair_s, va_gc> *vcall_indices;
tree vtables;
tree typeinfo_var;
vec<tree, va_gc> *vbases;
binding_table nested_udts;
tree as_base;
vec<tree, va_gc> *pure_virtuals;
tree friend_classes;
vec<tree, va_gc> * GTY((reorder ("resort_type_member_vec"))) members;
tree key_method;
tree decl_list;
tree befriending_classes;
/* In a RECORD_TYPE, information specific to Objective-C++, such
as a list of adopted protocols or a pointer to a corresponding
@interface. See objc/objc-act.h for details. */
tree objc_info;
/* FIXME reuse another field? */
tree lambda_expr;
};
/* We used to have a variant type for lang_type. Keep the name of the
checking accessor for the sole survivor. */
#define LANG_TYPE_CLASS_CHECK(NODE) (TYPE_LANG_SPECIFIC (NODE))
/* Nonzero for _CLASSTYPE means that operator delete is defined. */
#define TYPE_GETS_DELETE(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->gets_delete)
#define TYPE_GETS_REG_DELETE(NODE) (TYPE_GETS_DELETE (NODE) & 1)
/* Nonzero if `new NODE[x]' should cause the allocation of extra
storage to indicate how many array elements are in use. */
#define TYPE_VEC_NEW_USES_COOKIE(NODE) \
(CLASS_TYPE_P (NODE) \
&& LANG_TYPE_CLASS_CHECK (NODE)->vec_new_uses_cookie)
/* Nonzero means that this _CLASSTYPE node defines ways of converting
itself to other types. */
#define TYPE_HAS_CONVERSION(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->has_type_conversion)
/* Nonzero means that NODE (a class type) has a default constructor --
but that it has not yet been declared. */
#define CLASSTYPE_LAZY_DEFAULT_CTOR(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->lazy_default_ctor)
/* Nonzero means that NODE (a class type) has a copy constructor --
but that it has not yet been declared. */
#define CLASSTYPE_LAZY_COPY_CTOR(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->lazy_copy_ctor)
/* Nonzero means that NODE (a class type) has a move constructor --
but that it has not yet been declared. */
#define CLASSTYPE_LAZY_MOVE_CTOR(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->lazy_move_ctor)
/* Nonzero means that NODE (a class type) has an assignment operator
-- but that it has not yet been declared. */
#define CLASSTYPE_LAZY_COPY_ASSIGN(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->lazy_copy_assign)
/* Nonzero means that NODE (a class type) has an assignment operator
-- but that it has not yet been declared. */
#define CLASSTYPE_LAZY_MOVE_ASSIGN(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->lazy_move_assign)
/* Nonzero means that NODE (a class type) has a destructor -- but that
it has not yet been declared. */
#define CLASSTYPE_LAZY_DESTRUCTOR(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->lazy_destructor)
/* Nonzero means that NODE (a class type) is final */
#define CLASSTYPE_FINAL(NODE) \
TYPE_FINAL_P (NODE)
/* Nonzero means that this _CLASSTYPE node overloads operator=(X&). */
#define TYPE_HAS_COPY_ASSIGN(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_copy_assign)
/* True iff the class type NODE has an "operator =" whose parameter
has a parameter of type "const X&". */
#define TYPE_HAS_CONST_COPY_ASSIGN(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->has_const_copy_assign)
/* Nonzero means that this _CLASSTYPE node has an X(X&) constructor. */
#define TYPE_HAS_COPY_CTOR(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_copy_ctor)
#define TYPE_HAS_CONST_COPY_CTOR(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->has_const_copy_ctor)
/* Nonzero if this class has an X(initializer_list<T>) constructor. */
#define TYPE_HAS_LIST_CTOR(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->has_list_ctor)
/* Nonzero if this class has a constexpr constructor other than a copy/move
constructor. Note that a class can have constexpr constructors for
static initialization even if it isn't a literal class. */
#define TYPE_HAS_CONSTEXPR_CTOR(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->has_constexpr_ctor)
/* Nonzero if this class defines an overloaded operator new. (An
operator new [] doesn't count.) */
#define TYPE_HAS_NEW_OPERATOR(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->has_new)
/* Nonzero if this class defines an overloaded operator new[]. */
#define TYPE_HAS_ARRAY_NEW_OPERATOR(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->has_array_new)
/* Nonzero means that this type is being defined. I.e., the left brace
starting the definition of this type has been seen. */
#define TYPE_BEING_DEFINED(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->being_defined)
/* Nonzero means that this type is either complete or being defined, so we
can do lookup in it. */
#define COMPLETE_OR_OPEN_TYPE_P(NODE) \
(COMPLETE_TYPE_P (NODE) || (CLASS_TYPE_P (NODE) && TYPE_BEING_DEFINED (NODE)))
/* Mark bits for repeated base checks. */
#define TYPE_MARKED_P(NODE) TREE_LANG_FLAG_6 (TYPE_CHECK (NODE))
/* Nonzero if the class NODE has multiple paths to the same (virtual)
base object. */
#define CLASSTYPE_DIAMOND_SHAPED_P(NODE) \
(LANG_TYPE_CLASS_CHECK(NODE)->diamond_shaped)
/* Nonzero if the class NODE has multiple instances of the same base
type. */
#define CLASSTYPE_REPEATED_BASE_P(NODE) \
(LANG_TYPE_CLASS_CHECK(NODE)->repeated_base)
/* The member function with which the vtable will be emitted:
the first noninline non-pure-virtual member function. NULL_TREE
if there is no key function or if this is a class template */
#define CLASSTYPE_KEY_METHOD(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->key_method)
/* Vector of members. During definition, it is unordered and only
member functions are present. After completion it is sorted and
contains both member functions and non-functions. STAT_HACK is
involved to preserve oneslot per name invariant. */
#define CLASSTYPE_MEMBER_VEC(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->members)
/* For class templates, this is a TREE_LIST of all member data,
functions, types, and friends in the order of declaration.
The TREE_PURPOSE of each TREE_LIST is NULL_TREE for a friend,
and the RECORD_TYPE for the class template otherwise. */
#define CLASSTYPE_DECL_LIST(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->decl_list)
/* A FUNCTION_DECL or OVERLOAD for the constructors for NODE. These
are the constructors that take an in-charge parameter. */
#define CLASSTYPE_CONSTRUCTORS(NODE) \
(get_class_binding_direct (NODE, ctor_identifier))
/* A FUNCTION_DECL for the destructor for NODE. This is the
destructors that take an in-charge parameter. If
CLASSTYPE_LAZY_DESTRUCTOR is true, then this entry will be NULL
until the destructor is created with lazily_declare_fn. */
#define CLASSTYPE_DESTRUCTOR(NODE) \
(get_class_binding_direct (NODE, dtor_identifier))
/* A dictionary of the nested user-defined-types (class-types, or enums)
found within this class. This table includes nested member class
templates. */
#define CLASSTYPE_NESTED_UTDS(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->nested_udts)
/* Nonzero if NODE has a primary base class, i.e., a base class with
which it shares the virtual function table pointer. */
#define CLASSTYPE_HAS_PRIMARY_BASE_P(NODE) \
(CLASSTYPE_PRIMARY_BINFO (NODE) != NULL_TREE)
/* If non-NULL, this is the binfo for the primary base class, i.e.,
the base class which contains the virtual function table pointer
for this class. */
#define CLASSTYPE_PRIMARY_BINFO(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->primary_base)
/* A vector of BINFOs for the direct and indirect virtual base classes
that this type uses in a post-order depth-first left-to-right
order. (In other words, these bases appear in the order that they
should be initialized.) */
#define CLASSTYPE_VBASECLASSES(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->vbases)
/* The type corresponding to NODE when NODE is used as a base class,
i.e., NODE without virtual base classes or tail padding. */
#define CLASSTYPE_AS_BASE(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->as_base)
/* True iff NODE is the CLASSTYPE_AS_BASE version of some type. */
#define IS_FAKE_BASE_TYPE(NODE) \
(TREE_CODE (NODE) == RECORD_TYPE \
&& TYPE_CONTEXT (NODE) && CLASS_TYPE_P (TYPE_CONTEXT (NODE)) \
&& CLASSTYPE_AS_BASE (TYPE_CONTEXT (NODE)) == (NODE))
/* These are the size and alignment of the type without its virtual
base classes, for when we use this type as a base itself. */
#define CLASSTYPE_SIZE(NODE) TYPE_SIZE (CLASSTYPE_AS_BASE (NODE))
#define CLASSTYPE_SIZE_UNIT(NODE) TYPE_SIZE_UNIT (CLASSTYPE_AS_BASE (NODE))
#define CLASSTYPE_ALIGN(NODE) TYPE_ALIGN (CLASSTYPE_AS_BASE (NODE))
#define CLASSTYPE_USER_ALIGN(NODE) TYPE_USER_ALIGN (CLASSTYPE_AS_BASE (NODE))
/* The alignment of NODE, without its virtual bases, in bytes. */
#define CLASSTYPE_ALIGN_UNIT(NODE) \
(CLASSTYPE_ALIGN (NODE) / BITS_PER_UNIT)
/* A vec<tree> of virtual functions which cannot be inherited by
derived classes. When deriving from this type, the derived
class must provide its own definition for each of these functions. */
#define CLASSTYPE_PURE_VIRTUALS(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->pure_virtuals)
/* Nonzero means that this type is an abstract class type. */
#define ABSTRACT_CLASS_TYPE_P(NODE) \
(CLASS_TYPE_P (NODE) && CLASSTYPE_PURE_VIRTUALS(NODE))
/* Nonzero means that this type has an X() constructor. */
#define TYPE_HAS_DEFAULT_CONSTRUCTOR(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->has_default_ctor)
/* Nonzero means that this type contains a mutable member. */
#define CLASSTYPE_HAS_MUTABLE(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_mutable)
#define TYPE_HAS_MUTABLE_P(NODE) (cp_has_mutable_p (NODE))
/* Nonzero means that this class type is not POD for the purpose of layout
(as defined in the ABI). This is different from the language's POD. */
#define CLASSTYPE_NON_LAYOUT_POD_P(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->non_pod_class)
/* Nonzero means that this class type is a non-standard-layout class. */
#define CLASSTYPE_NON_STD_LAYOUT(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->non_std_layout)
/* Nonzero means that this class type does have unique object
representations. */
#define CLASSTYPE_UNIQUE_OBJ_REPRESENTATIONS(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->unique_obj_representations)
/* Nonzero means that this class type has
CLASSTYPE_UNIQUE_OBJ_REPRESENTATIONS computed. */
#define CLASSTYPE_UNIQUE_OBJ_REPRESENTATIONS_SET(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->unique_obj_representations_set)
/* Nonzero means that this class contains pod types whose default
initialization is not a zero initialization (namely, pointers to
data members). */
#define CLASSTYPE_NON_ZERO_INIT_P(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->non_zero_init)
/* Nonzero if this class is "empty" in the sense of the C++ ABI. */
#define CLASSTYPE_EMPTY_P(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->empty_p)
/* Nonzero if this class is "nearly empty", i.e., contains only a
virtual function table pointer. */
#define CLASSTYPE_NEARLY_EMPTY_P(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->nearly_empty_p)
/* Nonzero if this class contains an empty subobject. */
#define CLASSTYPE_CONTAINS_EMPTY_CLASS_P(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->contains_empty_class_p)
/* A list of class types of which this type is a friend. The
TREE_VALUE is normally a TYPE, but will be a TEMPLATE_DECL in the
case of a template friend. */
#define CLASSTYPE_FRIEND_CLASSES(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->friend_classes)
/* A list of the classes which grant friendship to this class. */
#define CLASSTYPE_BEFRIENDING_CLASSES(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->befriending_classes)
/* The associated LAMBDA_EXPR that made this class. */
#define CLASSTYPE_LAMBDA_EXPR(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->lambda_expr)
/* The extra mangling scope for this closure type. */
#define LAMBDA_TYPE_EXTRA_SCOPE(NODE) \
(LAMBDA_EXPR_EXTRA_SCOPE (CLASSTYPE_LAMBDA_EXPR (NODE)))
/* Say whether this node was declared as a "class" or a "struct". */
#define CLASSTYPE_DECLARED_CLASS(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->declared_class)
/* Nonzero if this class has const members
which have no specified initialization. */
#define CLASSTYPE_READONLY_FIELDS_NEED_INIT(NODE) \
(TYPE_LANG_SPECIFIC (NODE) \
? LANG_TYPE_CLASS_CHECK (NODE)->const_needs_init : 0)
#define SET_CLASSTYPE_READONLY_FIELDS_NEED_INIT(NODE, VALUE) \
(LANG_TYPE_CLASS_CHECK (NODE)->const_needs_init = (VALUE))
/* Nonzero if this class has ref members
which have no specified initialization. */
#define CLASSTYPE_REF_FIELDS_NEED_INIT(NODE) \
(TYPE_LANG_SPECIFIC (NODE) \
? LANG_TYPE_CLASS_CHECK (NODE)->ref_needs_init : 0)
#define SET_CLASSTYPE_REF_FIELDS_NEED_INIT(NODE, VALUE) \
(LANG_TYPE_CLASS_CHECK (NODE)->ref_needs_init = (VALUE))
/* Nonzero if this class is included from a header file which employs
`#pragma interface', and it is not included in its implementation file. */
#define CLASSTYPE_INTERFACE_ONLY(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->interface_only)
/* True if we have already determined whether or not vtables, VTTs,
typeinfo, and other similar per-class data should be emitted in
this translation unit. This flag does not indicate whether or not
these items should be emitted; it only indicates that we know one
way or the other. */
#define CLASSTYPE_INTERFACE_KNOWN(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->interface_unknown == 0)
/* The opposite of CLASSTYPE_INTERFACE_KNOWN. */
#define CLASSTYPE_INTERFACE_UNKNOWN(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->interface_unknown)
#define SET_CLASSTYPE_INTERFACE_UNKNOWN_X(NODE,X) \
(LANG_TYPE_CLASS_CHECK (NODE)->interface_unknown = !!(X))
#define SET_CLASSTYPE_INTERFACE_UNKNOWN(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->interface_unknown = 1)
#define SET_CLASSTYPE_INTERFACE_KNOWN(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->interface_unknown = 0)
/* Nonzero if a _DECL node requires us to output debug info for this class. */
#define CLASSTYPE_DEBUG_REQUESTED(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->debug_requested)
/* Additional macros for inheritance information. */
/* Nonzero means that this class is on a path leading to a new vtable. */
#define BINFO_VTABLE_PATH_MARKED(NODE) BINFO_FLAG_1 (NODE)
/* Nonzero means B (a BINFO) has its own vtable. Any copies will not
have this flag set. */
#define BINFO_NEW_VTABLE_MARKED(B) (BINFO_FLAG_2 (B))
/* Compare a BINFO_TYPE with another type for equality. For a binfo,
this is functionally equivalent to using same_type_p, but
measurably faster. At least one of the arguments must be a
BINFO_TYPE. The other can be a BINFO_TYPE or a regular type. If
BINFO_TYPE(T) ever stops being the main variant of the class the
binfo is for, this macro must change. */
#define SAME_BINFO_TYPE_P(A, B) ((A) == (B))
/* Any subobject that needs a new vtable must have a vptr and must not
be a non-virtual primary base (since it would then use the vtable from a
derived class and never become non-primary.) */
#define SET_BINFO_NEW_VTABLE_MARKED(B) \
(BINFO_NEW_VTABLE_MARKED (B) = 1, \
gcc_assert (!BINFO_PRIMARY_P (B) || BINFO_VIRTUAL_P (B)), \
gcc_assert (TYPE_VFIELD (BINFO_TYPE (B))))
/* Nonzero if this binfo is for a dependent base - one that should not
be searched. */
#define BINFO_DEPENDENT_BASE_P(NODE) BINFO_FLAG_3 (NODE)
/* Nonzero if this binfo has lost its primary base binfo (because that
is a nearly-empty virtual base that has been taken by some other
base in the complete hierarchy. */
#define BINFO_LOST_PRIMARY_P(NODE) BINFO_FLAG_4 (NODE)
/* Nonzero if this BINFO is a primary base class. */
#define BINFO_PRIMARY_P(NODE) BINFO_FLAG_5(NODE)
/* A vec<tree_pair_s> of the vcall indices associated with the class
NODE. The PURPOSE of each element is a FUNCTION_DECL for a virtual
function. The VALUE is the index into the virtual table where the
vcall offset for that function is stored, when NODE is a virtual
base. */
#define CLASSTYPE_VCALL_INDICES(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->vcall_indices)
/* The various vtables for the class NODE. The primary vtable will be
first, followed by the construction vtables and VTT, if any. */
#define CLASSTYPE_VTABLES(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->vtables)
/* The std::type_info variable representing this class, or NULL if no
such variable has been created. This field is only set for the
TYPE_MAIN_VARIANT of the class. */
#define CLASSTYPE_TYPEINFO_VAR(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->typeinfo_var)
/* Accessor macros for the BINFO_VIRTUALS list. */
/* The number of bytes by which to adjust the `this' pointer when
calling this virtual function. Subtract this value from the this
pointer. Always non-NULL, might be constant zero though. */
#define BV_DELTA(NODE) (TREE_PURPOSE (NODE))
/* If non-NULL, the vtable index at which to find the vcall offset
when calling this virtual function. Add the value at that vtable
index to the this pointer. */
#define BV_VCALL_INDEX(NODE) (TREE_TYPE (NODE))
/* The function to call. */
#define BV_FN(NODE) (TREE_VALUE (NODE))
/* Whether or not this entry is for a lost primary virtual base. */
#define BV_LOST_PRIMARY(NODE) (TREE_LANG_FLAG_0 (NODE))
/* For FUNCTION_TYPE or METHOD_TYPE, a list of the exceptions that
this type can raise. Each TREE_VALUE is a _TYPE. The TREE_VALUE
will be NULL_TREE to indicate a throw specification of `()', or
no exceptions allowed. For a noexcept specification, TREE_VALUE
is NULL_TREE and TREE_PURPOSE is the constant-expression. For
a deferred noexcept-specification, TREE_PURPOSE is a DEFERRED_NOEXCEPT
(for templates) or an OVERLOAD list of functions (for implicitly
declared functions). */
#define TYPE_RAISES_EXCEPTIONS(NODE) \
TYPE_LANG_SLOT_1 (FUNC_OR_METHOD_CHECK (NODE))
/* For FUNCTION_TYPE or METHOD_TYPE, return 1 iff it is declared `throw()'
or noexcept(true). */
#define TYPE_NOTHROW_P(NODE) nothrow_spec_p (TYPE_RAISES_EXCEPTIONS (NODE))
/* For FUNCTION_TYPE or METHOD_TYPE, true if NODE is noexcept. This is the
case for things declared noexcept(true) and, with -fnothrow-opt, for
throw() functions. */
#define TYPE_NOEXCEPT_P(NODE) type_noexcept_p (NODE)
/* The binding level associated with the namespace. */
#define NAMESPACE_LEVEL(NODE) \
(LANG_DECL_NS_CHECK (NODE)->level)
/* Discriminator values for lang_decl. */
enum lang_decl_selector
{
lds_min,
lds_fn,
lds_ns,
lds_parm,
lds_decomp
};
/* Flags shared by all forms of DECL_LANG_SPECIFIC.
Some of the flags live here only to make lang_decl_min/fn smaller. Do
not make this struct larger than 32 bits; instead, make sel smaller. */
struct GTY(()) lang_decl_base {
/* Larger than necessary for faster access. */
ENUM_BITFIELD(lang_decl_selector) selector : 16;
ENUM_BITFIELD(languages) language : 1;
unsigned use_template : 2;
unsigned not_really_extern : 1; /* var or fn */
unsigned initialized_in_class : 1; /* var or fn */
unsigned repo_available_p : 1; /* var or fn */
unsigned threadprivate_or_deleted_p : 1; /* var or fn */
unsigned anticipated_p : 1; /* fn, type or template */
/* anticipated_p reused as DECL_OMP_PRIVATIZED_MEMBER in var */
unsigned friend_or_tls : 1; /* var, fn, type or template */
unsigned unknown_bound_p : 1; /* var */
unsigned odr_used : 1; /* var or fn */
unsigned u2sel : 1;
unsigned concept_p : 1; /* applies to vars and functions */
unsigned var_declared_inline_p : 1; /* var */
unsigned dependent_init_p : 1; /* var */
/* 1 spare bit */
};
/* True for DECL codes which have template info and access. */
#define LANG_DECL_HAS_MIN(NODE) \
(VAR_OR_FUNCTION_DECL_P (NODE) \
|| TREE_CODE (NODE) == FIELD_DECL \
|| TREE_CODE (NODE) == CONST_DECL \
|| TREE_CODE (NODE) == TYPE_DECL \
|| TREE_CODE (NODE) == TEMPLATE_DECL \
|| TREE_CODE (NODE) == USING_DECL)
/* DECL_LANG_SPECIFIC for the above codes. */
struct GTY(()) lang_decl_min {
struct lang_decl_base base;
/* In a FUNCTION_DECL for which DECL_THUNK_P holds, this is
THUNK_ALIAS.
In a FUNCTION_DECL for which DECL_THUNK_P does not hold,
VAR_DECL, TYPE_DECL, or TEMPLATE_DECL, this is
DECL_TEMPLATE_INFO. */
tree template_info;
union lang_decl_u2 {
/* In a FUNCTION_DECL for which DECL_THUNK_P holds, this is
THUNK_VIRTUAL_OFFSET.
In a VAR_DECL for which DECL_HAS_VALUE_EXPR_P holds,
this is DECL_CAPTURED_VARIABLE.
Otherwise this is DECL_ACCESS. */
tree GTY ((tag ("0"))) access;
/* For TREE_STATIC VAR_DECL in function, this is DECL_DISCRIMINATOR. */
int GTY ((tag ("1"))) discriminator;
} GTY ((desc ("%0.u.base.u2sel"))) u2;
};
/* Additional DECL_LANG_SPECIFIC information for functions. */
struct GTY(()) lang_decl_fn {
struct lang_decl_min min;
/* In a overloaded operator, this is the compressed operator code. */
unsigned ovl_op_code : 6;
unsigned global_ctor_p : 1;
unsigned global_dtor_p : 1;
unsigned static_function : 1;
unsigned pure_virtual : 1;
unsigned defaulted_p : 1;
unsigned has_in_charge_parm_p : 1;
unsigned has_vtt_parm_p : 1;
unsigned pending_inline_p : 1;
unsigned nonconverting : 1;
unsigned thunk_p : 1;
unsigned this_thunk_p : 1;
unsigned hidden_friend_p : 1;
unsigned omp_declare_reduction_p : 1;
unsigned spare : 13;
/* 32-bits padding on 64-bit host. */
/* For a non-thunk function decl, this is a tree list of
friendly classes. For a thunk function decl, it is the
thunked to function decl. */
tree befriending_classes;
/* For a non-virtual FUNCTION_DECL, this is
DECL_FRIEND_CONTEXT. For a virtual FUNCTION_DECL for which
DECL_THIS_THUNK_P does not hold, this is DECL_THUNKS. Both
this pointer and result pointer adjusting thunks are
chained here. This pointer thunks to return pointer thunks
will be chained on the return pointer thunk. */
tree context;
union lang_decl_u5
{
/* In a non-thunk FUNCTION_DECL or TEMPLATE_DECL, this is
DECL_CLONED_FUNCTION. */
tree GTY ((tag ("0"))) cloned_function;
/* In a FUNCTION_DECL for which THUNK_P holds this is the
THUNK_FIXED_OFFSET. */
HOST_WIDE_INT GTY ((tag ("1"))) fixed_offset;
} GTY ((desc ("%1.thunk_p"))) u5;
union lang_decl_u3
{
struct cp_token_cache * GTY ((tag ("1"))) pending_inline_info;
struct language_function * GTY ((tag ("0")))
saved_language_function;
} GTY ((desc ("%1.pending_inline_p"))) u;
};
/* DECL_LANG_SPECIFIC for namespaces. */
struct GTY(()) lang_decl_ns {
struct lang_decl_base base;
cp_binding_level *level;
/* using directives and inline children. These need to be va_gc,
because of PCH. */
vec<tree, va_gc> *usings;
vec<tree, va_gc> *inlinees;
/* Hash table of bound decls. It'd be nice to have this inline, but
as the hash_map has a dtor, we can't then put this struct into a
union (until moving to c++11). */
hash_table<named_decl_hash> *bindings;
};
/* DECL_LANG_SPECIFIC for parameters. */
struct GTY(()) lang_decl_parm {
struct lang_decl_base base;
int level;
int index;
};
/* Additional DECL_LANG_SPECIFIC information for structured bindings. */
struct GTY(()) lang_decl_decomp {
struct lang_decl_min min;
/* The artificial underlying "e" variable of the structured binding
variable. */
tree base;
};
/* DECL_LANG_SPECIFIC for all types. It would be nice to just make this a
union rather than a struct containing a union as its only field, but
tree.h declares it as a struct. */
struct GTY(()) lang_decl {
union GTY((desc ("%h.base.selector"))) lang_decl_u {
/* Nothing of only the base type exists. */
struct lang_decl_base GTY ((default)) base;
struct lang_decl_min GTY((tag ("lds_min"))) min;
struct lang_decl_fn GTY ((tag ("lds_fn"))) fn;
struct lang_decl_ns GTY((tag ("lds_ns"))) ns;
struct lang_decl_parm GTY((tag ("lds_parm"))) parm;
struct lang_decl_decomp GTY((tag ("lds_decomp"))) decomp;
} u;
};
/* Looks through a template (if present) to find what it declares. */
#define STRIP_TEMPLATE(NODE) \
(TREE_CODE (NODE) == TEMPLATE_DECL ? DECL_TEMPLATE_RESULT (NODE) : NODE)
#if defined ENABLE_TREE_CHECKING && (GCC_VERSION >= 2007)
#define LANG_DECL_MIN_CHECK(NODE) __extension__ \
({ struct lang_decl *lt = DECL_LANG_SPECIFIC (NODE); \
if (!LANG_DECL_HAS_MIN (NODE)) \
lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \
<->u.min; })
/* We want to be able to check DECL_CONSTRUCTOR_P and such on a function
template, not just on a FUNCTION_DECL. So when looking for things in
lang_decl_fn, look down through a TEMPLATE_DECL into its result. */
#define LANG_DECL_FN_CHECK(NODE) __extension__ \
({ struct lang_decl *lt = DECL_LANG_SPECIFIC (STRIP_TEMPLATE (NODE)); \
if (!DECL_DECLARES_FUNCTION_P (NODE) \
|| lt->u.base.selector != lds_fn) \
lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \
<->u.fn; })
#define LANG_DECL_NS_CHECK(NODE) __extension__ \
({ struct lang_decl *lt = DECL_LANG_SPECIFIC (NODE); \
if (TREE_CODE (NODE) != NAMESPACE_DECL \
|| lt->u.base.selector != lds_ns) \
lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \
<->u.ns; })
#define LANG_DECL_PARM_CHECK(NODE) __extension__ \
({ struct lang_decl *lt = DECL_LANG_SPECIFIC (NODE); \
if (TREE_CODE (NODE) != PARM_DECL \
|| lt->u.base.selector != lds_parm) \
lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \
<->u.parm; })
#define LANG_DECL_DECOMP_CHECK(NODE) __extension__ \
({ struct lang_decl *lt = DECL_LANG_SPECIFIC (NODE); \
if (!VAR_P (NODE) \
|| lt->u.base.selector != lds_decomp) \
lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \
<->u.decomp; })
#define LANG_DECL_U2_CHECK(NODE, TF) __extension__ \
({ struct lang_decl *lt = DECL_LANG_SPECIFIC (NODE); \
if (!LANG_DECL_HAS_MIN (NODE) || lt->u.base.u2sel != TF) \
lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \
<->u.min.u2; })
#else
#define LANG_DECL_MIN_CHECK(NODE) \
(&DECL_LANG_SPECIFIC (NODE)->u.min)
#define LANG_DECL_FN_CHECK(NODE) \
(&DECL_LANG_SPECIFIC (STRIP_TEMPLATE (NODE))->u.fn)
#define LANG_DECL_NS_CHECK(NODE) \
(&DECL_LANG_SPECIFIC (NODE)->u.ns)
#define LANG_DECL_PARM_CHECK(NODE) \
(&DECL_LANG_SPECIFIC (NODE)->u.parm)
#define LANG_DECL_DECOMP_CHECK(NODE) \
(&DECL_LANG_SPECIFIC (NODE)->u.decomp)
#define LANG_DECL_U2_CHECK(NODE, TF) \
(&DECL_LANG_SPECIFIC (NODE)->u.min.u2)
#endif /* ENABLE_TREE_CHECKING */
/* For a FUNCTION_DECL or a VAR_DECL, the language linkage for the
declaration. Some entities (like a member function in a local
class, or a local variable) do not have linkage at all, and this
macro should not be used in those cases.
Implementation note: A FUNCTION_DECL without DECL_LANG_SPECIFIC was
created by language-independent code, and has C linkage. Most
VAR_DECLs have C++ linkage, and do not have DECL_LANG_SPECIFIC, but
we do create DECL_LANG_SPECIFIC for variables with non-C++ linkage. */
#define DECL_LANGUAGE(NODE) \
(DECL_LANG_SPECIFIC (NODE) \
? DECL_LANG_SPECIFIC (NODE)->u.base.language \
: (TREE_CODE (NODE) == FUNCTION_DECL \
? lang_c : lang_cplusplus))
/* Set the language linkage for NODE to LANGUAGE. */
#define SET_DECL_LANGUAGE(NODE, LANGUAGE) \
(DECL_LANG_SPECIFIC (NODE)->u.base.language = (LANGUAGE))
/* For FUNCTION_DECLs and TEMPLATE_DECLs: nonzero means that this function
is a constructor. */
#define DECL_CONSTRUCTOR_P(NODE) \
IDENTIFIER_CTOR_P (DECL_NAME (NODE))
/* Nonzero if NODE (a FUNCTION_DECL) is a constructor for a complete
object. */
#define DECL_COMPLETE_CONSTRUCTOR_P(NODE) \
(DECL_NAME (NODE) == complete_ctor_identifier)
/* Nonzero if NODE (a FUNCTION_DECL) is a constructor for a base
object. */
#define DECL_BASE_CONSTRUCTOR_P(NODE) \
(DECL_NAME (NODE) == base_ctor_identifier)
/* Nonzero if NODE (a FUNCTION_DECL) is a constructor, but not either the
specialized in-charge constructor or the specialized not-in-charge
constructor. */
#define DECL_MAYBE_IN_CHARGE_CONSTRUCTOR_P(NODE) \
(DECL_NAME (NODE) == ctor_identifier)
/* Nonzero if NODE (a FUNCTION_DECL) is a copy constructor. */
#define DECL_COPY_CONSTRUCTOR_P(NODE) \
(DECL_CONSTRUCTOR_P (NODE) && copy_fn_p (NODE) > 0)
/* Nonzero if NODE (a FUNCTION_DECL) is a move constructor. */
#define DECL_MOVE_CONSTRUCTOR_P(NODE) \
(DECL_CONSTRUCTOR_P (NODE) && move_fn_p (NODE))
/* Nonzero if NODE (a FUNCTION_DECL or TEMPLATE_DECL)
is a destructor. */
#define DECL_DESTRUCTOR_P(NODE) \
IDENTIFIER_DTOR_P (DECL_NAME (NODE))
/* Nonzero if NODE (a FUNCTION_DECL) is a destructor, but not the
specialized in-charge constructor, in-charge deleting constructor,
or the base destructor. */
#define DECL_MAYBE_IN_CHARGE_DESTRUCTOR_P(NODE) \
(DECL_NAME (NODE) == dtor_identifier)
/* Nonzero if NODE (a FUNCTION_DECL) is a destructor for a complete
object. */
#define DECL_COMPLETE_DESTRUCTOR_P(NODE) \
(DECL_NAME (NODE) == complete_dtor_identifier)
/* Nonzero if NODE (a FUNCTION_DECL) is a destructor for a base
object. */
#define DECL_BASE_DESTRUCTOR_P(NODE) \
(DECL_NAME (NODE) == base_dtor_identifier)
/* Nonzero if NODE (a FUNCTION_DECL) is a destructor for a complete
object that deletes the object after it has been destroyed. */
#define DECL_DELETING_DESTRUCTOR_P(NODE) \
(DECL_NAME (NODE) == deleting_dtor_identifier)
/* Nonzero if NODE (a FUNCTION_DECL) is a cloned constructor or
destructor. */
#define DECL_CLONED_FUNCTION_P(NODE) (!!decl_cloned_function_p (NODE, true))
/* If DECL_CLONED_FUNCTION_P holds, this is the function that was
cloned. */
#define DECL_CLONED_FUNCTION(NODE) (*decl_cloned_function_p (NODE, false))
/* Perform an action for each clone of FN, if FN is a function with
clones. This macro should be used like:
FOR_EACH_CLONE (clone, fn)
{ ... }
*/
#define FOR_EACH_CLONE(CLONE, FN) \
if (!(TREE_CODE (FN) == FUNCTION_DECL \
&& (DECL_MAYBE_IN_CHARGE_CONSTRUCTOR_P (FN) \
|| DECL_MAYBE_IN_CHARGE_DESTRUCTOR_P (FN))))\
; \
else \
for (CLONE = DECL_CHAIN (FN); \
CLONE && DECL_CLONED_FUNCTION_P (CLONE); \
CLONE = DECL_CHAIN (CLONE))
/* Nonzero if NODE has DECL_DISCRIMINATOR and not DECL_ACCESS. */
#define DECL_DISCRIMINATOR_P(NODE) \
(VAR_P (NODE) && DECL_FUNCTION_SCOPE_P (NODE))
/* Discriminator for name mangling. */
#define DECL_DISCRIMINATOR(NODE) (LANG_DECL_U2_CHECK (NODE, 1)->discriminator)
/* True iff DECL_DISCRIMINATOR is set for a DECL_DISCRIMINATOR_P decl. */
#define DECL_DISCRIMINATOR_SET_P(NODE) \
(DECL_LANG_SPECIFIC (NODE) && DECL_LANG_SPECIFIC (NODE)->u.base.u2sel == 1)
/* The index of a user-declared parameter in its function, starting at 1.
All artificial parameters will have index 0. */
#define DECL_PARM_INDEX(NODE) \
(LANG_DECL_PARM_CHECK (NODE)->index)
/* The level of a user-declared parameter in its function, starting at 1.
A parameter of the function will have level 1; a parameter of the first
nested function declarator (i.e. t in void f (void (*p)(T t))) will have
level 2. */
#define DECL_PARM_LEVEL(NODE) \
(LANG_DECL_PARM_CHECK (NODE)->level)
/* Nonzero if the VTT parm has been added to NODE. */
#define DECL_HAS_VTT_PARM_P(NODE) \
(LANG_DECL_FN_CHECK (NODE)->has_vtt_parm_p)
/* Nonzero if NODE is a FUNCTION_DECL for which a VTT parameter is
required. */
#define DECL_NEEDS_VTT_PARM_P(NODE) \
(CLASSTYPE_VBASECLASSES (DECL_CONTEXT (NODE)) \
&& (DECL_BASE_CONSTRUCTOR_P (NODE) \
|| DECL_BASE_DESTRUCTOR_P (NODE)))
/* Nonzero if NODE is a user-defined conversion operator. */
#define DECL_CONV_FN_P(NODE) IDENTIFIER_CONV_OP_P (DECL_NAME (NODE))
/* The type to which conversion operator FN converts to. */
#define DECL_CONV_FN_TYPE(FN) \
TREE_TYPE ((gcc_checking_assert (DECL_CONV_FN_P (FN)), DECL_NAME (FN)))
/* Nonzero if NODE, a static data member, was declared in its class as an
array of unknown bound. */
#define VAR_HAD_UNKNOWN_BOUND(NODE) \
(DECL_LANG_SPECIFIC (VAR_DECL_CHECK (NODE)) \
? DECL_LANG_SPECIFIC (NODE)->u.base.unknown_bound_p \
: false)
#define SET_VAR_HAD_UNKNOWN_BOUND(NODE) \
(DECL_LANG_SPECIFIC (VAR_DECL_CHECK (NODE))->u.base.unknown_bound_p = true)
/* True iff decl NODE is for an overloaded operator. */
#define DECL_OVERLOADED_OPERATOR_P(NODE) \
IDENTIFIER_ANY_OP_P (DECL_NAME (NODE))
/* Nonzero if NODE is an assignment operator (including += and such). */
#define DECL_ASSIGNMENT_OPERATOR_P(NODE) \
IDENTIFIER_ASSIGN_OP_P (DECL_NAME (NODE))
/* NODE is a function_decl for an overloaded operator. Return its
compressed (raw) operator code. Note that this is not a TREE_CODE. */
#define DECL_OVERLOADED_OPERATOR_CODE_RAW(NODE) \
(LANG_DECL_FN_CHECK (NODE)->ovl_op_code)
/* DECL is an overloaded operator. Test whether it is for TREE_CODE
(a literal constant). */
#define DECL_OVERLOADED_OPERATOR_IS(DECL, CODE) \
(DECL_OVERLOADED_OPERATOR_CODE_RAW (DECL) == OVL_OP_##CODE)
/* For FUNCTION_DECLs: nonzero means that this function is a
constructor or a destructor with an extra in-charge parameter to
control whether or not virtual bases are constructed. */
#define DECL_HAS_IN_CHARGE_PARM_P(NODE) \
(LANG_DECL_FN_CHECK (NODE)->has_in_charge_parm_p)
/* Nonzero if DECL is a declaration of __builtin_constant_p. */
#define DECL_IS_BUILTIN_CONSTANT_P(NODE) \
(TREE_CODE (NODE) == FUNCTION_DECL \
&& DECL_BUILT_IN_CLASS (NODE) == BUILT_IN_NORMAL \
&& DECL_FUNCTION_CODE (NODE) == BUILT_IN_CONSTANT_P)
/* Nonzero for _DECL means that this decl appears in (or will appear
in) as a member in a RECORD_TYPE or UNION_TYPE node. It is also for
detecting circularity in case members are multiply defined. In the
case of a VAR_DECL, it is also used to determine how program storage
should be allocated. */
#define DECL_IN_AGGR_P(NODE) (DECL_LANG_FLAG_3 (NODE))
/* Nonzero for a VAR_DECL means that the variable's initialization (if
any) has been processed. (In general, DECL_INITIALIZED_P is
!DECL_EXTERNAL, but static data members may be initialized even if
not defined.) */
#define DECL_INITIALIZED_P(NODE) \
(TREE_LANG_FLAG_1 (VAR_DECL_CHECK (NODE)))
/* Nonzero for a VAR_DECL iff an explicit initializer was provided
or a non-trivial constructor is called. */
#define DECL_NONTRIVIALLY_INITIALIZED_P(NODE) \
(TREE_LANG_FLAG_3 (VAR_DECL_CHECK (NODE)))
/* Nonzero for a VAR_DECL that was initialized with a
constant-expression. */
#define DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P(NODE) \
(TREE_LANG_FLAG_2 (VAR_DECL_CHECK (NODE)))
/* Nonzero if the DECL was initialized in the class definition itself,
rather than outside the class. This is used for both static member
VAR_DECLS, and FUNCTION_DECLS that are defined in the class. */
#define DECL_INITIALIZED_IN_CLASS_P(DECL) \
(DECL_LANG_SPECIFIC (VAR_OR_FUNCTION_DECL_CHECK (DECL)) \
->u.base.initialized_in_class)
/* Nonzero if the DECL is used in the sense of 3.2 [basic.def.odr].
Only available for decls with DECL_LANG_SPECIFIC. */
#define DECL_ODR_USED(DECL) \
(DECL_LANG_SPECIFIC (VAR_OR_FUNCTION_DECL_CHECK (DECL)) \
->u.base.odr_used)
/* Nonzero for DECL means that this decl is just a friend declaration,
and should not be added to the list of members for this class. */
#define DECL_FRIEND_P(NODE) \
(DECL_LANG_SPECIFIC (TYPE_FUNCTION_OR_TEMPLATE_DECL_CHECK (NODE)) \
->u.base.friend_or_tls)
/* Nonzero if the thread-local variable was declared with __thread as
opposed to thread_local. */
#define DECL_GNU_TLS_P(NODE) \
(DECL_LANG_SPECIFIC (VAR_DECL_CHECK (NODE)) \
&& DECL_LANG_SPECIFIC (NODE)->u.base.friend_or_tls)
#define SET_DECL_GNU_TLS_P(NODE) \
(retrofit_lang_decl (VAR_DECL_CHECK (NODE)), \
DECL_LANG_SPECIFIC (NODE)->u.base.friend_or_tls = true)
/* A TREE_LIST of the types which have befriended this FUNCTION_DECL. */
#define DECL_BEFRIENDING_CLASSES(NODE) \
(LANG_DECL_FN_CHECK (NODE)->befriending_classes)
/* Nonzero for FUNCTION_DECL means that this decl is a static
member function. */
#define DECL_STATIC_FUNCTION_P(NODE) \
(LANG_DECL_FN_CHECK (NODE)->static_function)
/* Nonzero for FUNCTION_DECL means that this decl is a non-static
member function. */
#define DECL_NONSTATIC_MEMBER_FUNCTION_P(NODE) \
(TREE_CODE (TREE_TYPE (NODE)) == METHOD_TYPE)
/* Nonzero for FUNCTION_DECL means that this decl is a member function
(static or non-static). */
#define DECL_FUNCTION_MEMBER_P(NODE) \
(DECL_NONSTATIC_MEMBER_FUNCTION_P (NODE) || DECL_STATIC_FUNCTION_P (NODE))
/* Nonzero for FUNCTION_DECL means that this member function
has `this' as const X *const. */
#define DECL_CONST_MEMFUNC_P(NODE) \
(DECL_NONSTATIC_MEMBER_FUNCTION_P (NODE) \
&& CP_TYPE_CONST_P (TREE_TYPE (TREE_VALUE \
(TYPE_ARG_TYPES (TREE_TYPE (NODE))))))
/* Nonzero for FUNCTION_DECL means that this member function
has `this' as volatile X *const. */
#define DECL_VOLATILE_MEMFUNC_P(NODE) \
(DECL_NONSTATIC_MEMBER_FUNCTION_P (NODE) \
&& CP_TYPE_VOLATILE_P (TREE_TYPE (TREE_VALUE \
(TYPE_ARG_TYPES (TREE_TYPE (NODE))))))
/* Nonzero for a DECL means that this member is a non-static member. */
#define DECL_NONSTATIC_MEMBER_P(NODE) \
(DECL_NONSTATIC_MEMBER_FUNCTION_P (NODE) \
|| TREE_CODE (NODE) == FIELD_DECL)
/* Nonzero for _DECL means that this member object type
is mutable. */
#define DECL_MUTABLE_P(NODE) (DECL_LANG_FLAG_0 (NODE))
/* Nonzero for _DECL means that this constructor or conversion function is
non-converting. */
#define DECL_NONCONVERTING_P(NODE) \
(LANG_DECL_FN_CHECK (NODE)->nonconverting)
/* Nonzero for FUNCTION_DECL means that this member function is a pure
virtual function. */
#define DECL_PURE_VIRTUAL_P(NODE) \
(LANG_DECL_FN_CHECK (NODE)->pure_virtual)
/* True (in a FUNCTION_DECL) if NODE is a virtual function that is an
invalid overrider for a function from a base class. Once we have
complained about an invalid overrider we avoid complaining about it
again. */
#define DECL_INVALID_OVERRIDER_P(NODE) \
(DECL_LANG_FLAG_4 (NODE))
/* True (in a FUNCTION_DECL) if NODE is a function declared with
an override virt-specifier */
#define DECL_OVERRIDE_P(NODE) (TREE_LANG_FLAG_0 (NODE))
/* The thunks associated with NODE, a FUNCTION_DECL. */
#define DECL_THUNKS(NODE) \
(DECL_VIRTUAL_P (NODE) ? LANG_DECL_FN_CHECK (NODE)->context : NULL_TREE)
/* Set DECL_THUNKS. */
#define SET_DECL_THUNKS(NODE,THUNKS) \
(LANG_DECL_FN_CHECK (NODE)->context = (THUNKS))
/* If NODE, a FUNCTION_DECL, is a C++11 inheriting constructor, then this
is the constructor it inherits from. */
#define DECL_INHERITED_CTOR(NODE) \
(DECL_DECLARES_FUNCTION_P (NODE) && DECL_CONSTRUCTOR_P (NODE) \
? LANG_DECL_FN_CHECK (NODE)->context : NULL_TREE)
/* And this is the base that constructor comes from. */
#define DECL_INHERITED_CTOR_BASE(NODE) \
(DECL_INHERITED_CTOR (NODE) \
? DECL_CONTEXT (flag_new_inheriting_ctors \
? strip_inheriting_ctors (NODE) \
: DECL_INHERITED_CTOR (NODE)) \
: NULL_TREE)
/* Set the inherited base. */
#define SET_DECL_INHERITED_CTOR(NODE,INH) \
(LANG_DECL_FN_CHECK (NODE)->context = (INH))
/* Nonzero if NODE is a thunk, rather than an ordinary function. */
#define DECL_THUNK_P(NODE) \
(TREE_CODE (NODE) == FUNCTION_DECL \
&& DECL_LANG_SPECIFIC (NODE) \
&& LANG_DECL_FN_CHECK (NODE)->thunk_p)
/* Set DECL_THUNK_P for node. */
#define SET_DECL_THUNK_P(NODE, THIS_ADJUSTING) \
(LANG_DECL_FN_CHECK (NODE)->thunk_p = 1, \
LANG_DECL_FN_CHECK (NODE)->this_thunk_p = (THIS_ADJUSTING))
/* Nonzero if NODE is a this pointer adjusting thunk. */
#define DECL_THIS_THUNK_P(NODE) \
(DECL_THUNK_P (NODE) && LANG_DECL_FN_CHECK (NODE)->this_thunk_p)
/* Nonzero if NODE is a result pointer adjusting thunk. */
#define DECL_RESULT_THUNK_P(NODE) \
(DECL_THUNK_P (NODE) && !LANG_DECL_FN_CHECK (NODE)->this_thunk_p)
/* Nonzero if NODE is a FUNCTION_DECL, but not a thunk. */
#define DECL_NON_THUNK_FUNCTION_P(NODE) \
(TREE_CODE (NODE) == FUNCTION_DECL && !DECL_THUNK_P (NODE))
/* Nonzero if NODE is `extern "C"'. */
#define DECL_EXTERN_C_P(NODE) \
(DECL_LANGUAGE (NODE) == lang_c)
/* Nonzero if NODE is an `extern "C"' function. */
#define DECL_EXTERN_C_FUNCTION_P(NODE) \
(DECL_NON_THUNK_FUNCTION_P (NODE) && DECL_EXTERN_C_P (NODE))
/* True iff DECL is an entity with vague linkage whose definition is
available in this translation unit. */
#define DECL_REPO_AVAILABLE_P(NODE) \
(DECL_LANG_SPECIFIC (NODE)->u.base.repo_available_p)
/* True if DECL is declared 'constexpr'. */
#define DECL_DECLARED_CONSTEXPR_P(DECL) \
DECL_LANG_FLAG_8 (VAR_OR_FUNCTION_DECL_CHECK (STRIP_TEMPLATE (DECL)))
// True if NODE was declared as 'concept'. The flag implies that the
// declaration is constexpr, that the declaration cannot be specialized or
// refined, and that the result type must be convertible to bool.
#define DECL_DECLARED_CONCEPT_P(NODE) \
(DECL_LANG_SPECIFIC (NODE)->u.base.concept_p)
/* Nonzero if this DECL is the __PRETTY_FUNCTION__ variable in a
template function. */
#define DECL_PRETTY_FUNCTION_P(NODE) \
(DECL_NAME (NODE) \
&& id_equal (DECL_NAME (NODE), "__PRETTY_FUNCTION__"))
/* Nonzero if the variable was declared to be thread-local.
We need a special C++ version of this test because the middle-end
DECL_THREAD_LOCAL_P uses the symtab, so we can't use it for
templates. */
#define CP_DECL_THREAD_LOCAL_P(NODE) \
(TREE_LANG_FLAG_0 (VAR_DECL_CHECK (NODE)))
/* The _TYPE context in which this _DECL appears. This field holds the
class where a virtual function instance is actually defined. */
#define DECL_CLASS_CONTEXT(NODE) \
(DECL_CLASS_SCOPE_P (NODE) ? DECL_CONTEXT (NODE) : NULL_TREE)
/* For a non-member friend function, the class (if any) in which this
friend was defined. For example, given:
struct S { friend void f () { ... } };
the DECL_FRIEND_CONTEXT for `f' will be `S'. */
#define DECL_FRIEND_CONTEXT(NODE) \
((DECL_DECLARES_FUNCTION_P (NODE) \
&& DECL_FRIEND_P (NODE) && !DECL_FUNCTION_MEMBER_P (NODE)) \
? LANG_DECL_FN_CHECK (NODE)->context \
: NULL_TREE)
/* Set the DECL_FRIEND_CONTEXT for NODE to CONTEXT. */
#define SET_DECL_FRIEND_CONTEXT(NODE, CONTEXT) \
(LANG_DECL_FN_CHECK (NODE)->context = (CONTEXT))
#define CP_DECL_CONTEXT(NODE) \
(!DECL_FILE_SCOPE_P (NODE) ? DECL_CONTEXT (NODE) : global_namespace)
#define CP_TYPE_CONTEXT(NODE) \
(!TYPE_FILE_SCOPE_P (NODE) ? TYPE_CONTEXT (NODE) : global_namespace)
#define FROB_CONTEXT(NODE) \
((NODE) == global_namespace ? DECL_CONTEXT (NODE) : (NODE))
/* 1 iff NODE has namespace scope, including the global namespace. */
#define DECL_NAMESPACE_SCOPE_P(NODE) \
(!DECL_TEMPLATE_PARM_P (NODE) \
&& TREE_CODE (CP_DECL_CONTEXT (NODE)) == NAMESPACE_DECL)
#define TYPE_NAMESPACE_SCOPE_P(NODE) \
(TREE_CODE (CP_TYPE_CONTEXT (NODE)) == NAMESPACE_DECL)
#define NAMESPACE_SCOPE_P(NODE) \
((DECL_P (NODE) && DECL_NAMESPACE_SCOPE_P (NODE)) \
|| (TYPE_P (NODE) && TYPE_NAMESPACE_SCOPE_P (NODE)))
/* 1 iff NODE is a class member. */
#define DECL_CLASS_SCOPE_P(NODE) \
(DECL_CONTEXT (NODE) && TYPE_P (DECL_CONTEXT (NODE)))
#define TYPE_CLASS_SCOPE_P(NODE) \
(TYPE_CONTEXT (NODE) && TYPE_P (TYPE_CONTEXT (NODE)))
/* 1 iff NODE is function-local. */
#define DECL_FUNCTION_SCOPE_P(NODE) \
(DECL_CONTEXT (NODE) \
&& TREE_CODE (DECL_CONTEXT (NODE)) == FUNCTION_DECL)
#define TYPE_FUNCTION_SCOPE_P(NODE) \
(TYPE_CONTEXT (NODE) && TREE_CODE (TYPE_CONTEXT (NODE)) == FUNCTION_DECL)
/* 1 iff VAR_DECL node NODE is a type-info decl. This flag is set for
both the primary typeinfo object and the associated NTBS name. */
#define DECL_TINFO_P(NODE) TREE_LANG_FLAG_4 (VAR_DECL_CHECK (NODE))
/* 1 iff VAR_DECL node NODE is virtual table or VTT. */
#define DECL_VTABLE_OR_VTT_P(NODE) TREE_LANG_FLAG_5 (VAR_DECL_CHECK (NODE))
/* 1 iff FUNCTION_TYPE or METHOD_TYPE has a ref-qualifier (either & or &&). */
#define FUNCTION_REF_QUALIFIED(NODE) \
TREE_LANG_FLAG_4 (FUNC_OR_METHOD_CHECK (NODE))
/* 1 iff FUNCTION_TYPE or METHOD_TYPE has &&-ref-qualifier. */
#define FUNCTION_RVALUE_QUALIFIED(NODE) \
TREE_LANG_FLAG_5 (FUNC_OR_METHOD_CHECK (NODE))
/* Returns 1 iff VAR_DECL is a construction virtual table.
DECL_VTABLE_OR_VTT_P will be true in this case and must be checked
before using this macro. */
#define DECL_CONSTRUCTION_VTABLE_P(NODE) \
TREE_LANG_FLAG_6 (VAR_DECL_CHECK (NODE))
/* 1 iff NODE is function-local, but for types. */
#define LOCAL_CLASS_P(NODE) \
(decl_function_context (TYPE_MAIN_DECL (NODE)) != NULL_TREE)
/* The nesting depth of namespace, class or function. Makes is_ancestor much
simpler. Only 8 bits available. */
#define SCOPE_DEPTH(NODE) \
(NAMESPACE_DECL_CHECK (NODE)->base.u.bits.address_space)
/* Whether the namepace is an inline namespace. */
#define DECL_NAMESPACE_INLINE_P(NODE) \
TREE_LANG_FLAG_0 (NAMESPACE_DECL_CHECK (NODE))
/* In a NAMESPACE_DECL, a vector of using directives. */
#define DECL_NAMESPACE_USING(NODE) \
(LANG_DECL_NS_CHECK (NODE)->usings)
/* In a NAMESPACE_DECL, a vector of inline namespaces. */
#define DECL_NAMESPACE_INLINEES(NODE) \
(LANG_DECL_NS_CHECK (NODE)->inlinees)
/* Pointer to hash_map from IDENTIFIERS to DECLS */
#define DECL_NAMESPACE_BINDINGS(NODE) \
(LANG_DECL_NS_CHECK (NODE)->bindings)
/* In a NAMESPACE_DECL, points to the original namespace if this is
a namespace alias. */
#define DECL_NAMESPACE_ALIAS(NODE) \
DECL_ABSTRACT_ORIGIN (NAMESPACE_DECL_CHECK (NODE))
#define ORIGINAL_NAMESPACE(NODE) \
(DECL_NAMESPACE_ALIAS (NODE) ? DECL_NAMESPACE_ALIAS (NODE) : (NODE))
/* Nonzero if NODE is the std namespace. */
#define DECL_NAMESPACE_STD_P(NODE) \
(TREE_CODE (NODE) == NAMESPACE_DECL \
&& CP_DECL_CONTEXT (NODE) == global_namespace \
&& DECL_NAME (NODE) == std_identifier)
/* In a TREE_LIST in an attribute list, indicates that the attribute
must be applied at instantiation time. */
#define ATTR_IS_DEPENDENT(NODE) TREE_LANG_FLAG_0 (TREE_LIST_CHECK (NODE))
/* In a TREE_LIST in the argument of attribute abi_tag, indicates that the tag
was inherited from a template parameter, not explicitly indicated. */
#define ABI_TAG_IMPLICIT(NODE) TREE_LANG_FLAG_0 (TREE_LIST_CHECK (NODE))
extern tree decl_shadowed_for_var_lookup (tree);
extern void decl_shadowed_for_var_insert (tree, tree);
/* Non zero if this is a using decl for a dependent scope. */
#define DECL_DEPENDENT_P(NODE) DECL_LANG_FLAG_0 (USING_DECL_CHECK (NODE))
/* The scope named in a using decl. */
#define USING_DECL_SCOPE(NODE) TREE_TYPE (USING_DECL_CHECK (NODE))
/* The decls named by a using decl. */
#define USING_DECL_DECLS(NODE) DECL_INITIAL (USING_DECL_CHECK (NODE))
/* Non zero if the using decl refers to a dependent type. */
#define USING_DECL_TYPENAME_P(NODE) DECL_LANG_FLAG_1 (USING_DECL_CHECK (NODE))
/* In a VAR_DECL, true if we have a shadowed local variable
in the shadowed var table for this VAR_DECL. */
#define DECL_HAS_SHADOWED_FOR_VAR_P(NODE) \
(VAR_DECL_CHECK (NODE)->decl_with_vis.shadowed_for_var_p)
/* In a VAR_DECL for a variable declared in a for statement,
this is the shadowed (local) variable. */
#define DECL_SHADOWED_FOR_VAR(NODE) \
(DECL_HAS_SHADOWED_FOR_VAR_P(NODE) ? decl_shadowed_for_var_lookup (NODE) : NULL)
#define SET_DECL_SHADOWED_FOR_VAR(NODE, VAL) \
(decl_shadowed_for_var_insert (NODE, VAL))
/* In a FUNCTION_DECL, this is nonzero if this function was defined in
the class definition. We have saved away the text of the function,
but have not yet processed it. */
#define DECL_PENDING_INLINE_P(NODE) \
(LANG_DECL_FN_CHECK (NODE)->pending_inline_p)
/* If DECL_PENDING_INLINE_P holds, this is the saved text of the
function. */
#define DECL_PENDING_INLINE_INFO(NODE) \
(LANG_DECL_FN_CHECK (NODE)->u.pending_inline_info)
/* Nonzero for TYPE_DECL means that it was written 'using name = type'. */
#define TYPE_DECL_ALIAS_P(NODE) \
DECL_LANG_FLAG_6 (TYPE_DECL_CHECK (NODE))
/* Nonzero for TEMPLATE_DECL means that it is a 'complex' alias template. */
#define TEMPLATE_DECL_COMPLEX_ALIAS_P(NODE) \
DECL_LANG_FLAG_2 (TEMPLATE_DECL_CHECK (NODE))
/* Nonzero for a type which is an alias for another type; i.e, a type
which declaration was written 'using name-of-type =
another-type'. */
#define TYPE_ALIAS_P(NODE) \
(TYPE_P (NODE) \
&& TYPE_NAME (NODE) \
&& TREE_CODE (TYPE_NAME (NODE)) == TYPE_DECL \
&& TYPE_DECL_ALIAS_P (TYPE_NAME (NODE)))
/* If non-NULL for a VAR_DECL, FUNCTION_DECL, TYPE_DECL or
TEMPLATE_DECL, the entity is either a template specialization (if
DECL_USE_TEMPLATE is nonzero) or the abstract instance of the
template itself.
In either case, DECL_TEMPLATE_INFO is a TREE_LIST, whose
TREE_PURPOSE is the TEMPLATE_DECL of which this entity is a
specialization or abstract instance. The TREE_VALUE is the
template arguments used to specialize the template.
Consider:
template <typename T> struct S { friend void f(T) {} };
In this case, S<int>::f is, from the point of view of the compiler,
an instantiation of a template -- but, from the point of view of
the language, each instantiation of S results in a wholly unrelated
global function f. In this case, DECL_TEMPLATE_INFO for S<int>::f
will be non-NULL, but DECL_USE_TEMPLATE will be zero. */
#define DECL_TEMPLATE_INFO(NODE) \
(DECL_LANG_SPECIFIC (VAR_TEMPL_TYPE_FIELD_OR_FUNCTION_DECL_CHECK (NODE)) \
->u.min.template_info)
/* For a lambda capture proxy, its captured variable. */
#define DECL_CAPTURED_VARIABLE(NODE) \
(LANG_DECL_U2_CHECK (NODE, 0)->access)
/* For a VAR_DECL, indicates that the variable is actually a
non-static data member of anonymous union that has been promoted to
variable status. */
#define DECL_ANON_UNION_VAR_P(NODE) \
(DECL_LANG_FLAG_4 (VAR_DECL_CHECK (NODE)))
/* Template information for a RECORD_TYPE or UNION_TYPE. */
#define CLASSTYPE_TEMPLATE_INFO(NODE) \
(TYPE_LANG_SLOT_1 (RECORD_OR_UNION_CHECK (NODE)))
/* Template information for a template template parameter. */
#define TEMPLATE_TEMPLATE_PARM_TEMPLATE_INFO(NODE) \
(TYPE_LANG_SLOT_1 (BOUND_TEMPLATE_TEMPLATE_PARM_TYPE_CHECK (NODE)))
/* Template information for an ENUMERAL_, RECORD_, UNION_TYPE, or
BOUND_TEMPLATE_TEMPLATE_PARM type. This ignores any alias
templateness of NODE. It'd be nice if this could unconditionally
access the slot, rather than return NULL if given a
non-templatable type. */
#define TYPE_TEMPLATE_INFO(NODE) \
(TREE_CODE (NODE) == ENUMERAL_TYPE \
|| TREE_CODE (NODE) == BOUND_TEMPLATE_TEMPLATE_PARM \
|| RECORD_OR_UNION_TYPE_P (NODE) \
? TYPE_LANG_SLOT_1 (NODE) : NULL_TREE)
/* Template information (if any) for an alias type. */
#define TYPE_ALIAS_TEMPLATE_INFO(NODE) \
(DECL_LANG_SPECIFIC (TYPE_NAME (NODE)) \
? DECL_TEMPLATE_INFO (TYPE_NAME (NODE)) \
: NULL_TREE)
/* If NODE is a type alias, this accessor returns the template info
for the alias template (if any). Otherwise behave as
TYPE_TEMPLATE_INFO. */
#define TYPE_TEMPLATE_INFO_MAYBE_ALIAS(NODE) \
(TYPE_ALIAS_P (NODE) \
? TYPE_ALIAS_TEMPLATE_INFO (NODE) \
: TYPE_TEMPLATE_INFO (NODE))
/* Set the template information for an ENUMERAL_, RECORD_, or
UNION_TYPE to VAL. */
#define SET_TYPE_TEMPLATE_INFO(NODE, VAL) \
(TREE_CODE (NODE) == ENUMERAL_TYPE \
|| (CLASS_TYPE_P (NODE) && !TYPE_ALIAS_P (NODE)) \
? (TYPE_LANG_SLOT_1 (NODE) = (VAL)) \
: (DECL_TEMPLATE_INFO (TYPE_NAME (NODE)) = (VAL)))
#define TI_TEMPLATE(NODE) TREE_TYPE (TEMPLATE_INFO_CHECK (NODE))
#define TI_ARGS(NODE) TREE_CHAIN (TEMPLATE_INFO_CHECK (NODE))
#define TI_PENDING_TEMPLATE_FLAG(NODE) TREE_LANG_FLAG_1 (NODE)
/* For a given TREE_VEC containing a template argument list,
this property contains the number of arguments that are not
defaulted. */
#define NON_DEFAULT_TEMPLATE_ARGS_COUNT(NODE) TREE_CHAIN (TREE_VEC_CHECK (NODE))
/* Below are the setter and getter of the NON_DEFAULT_TEMPLATE_ARGS_COUNT
property. */
#define SET_NON_DEFAULT_TEMPLATE_ARGS_COUNT(NODE, INT_VALUE) \
NON_DEFAULT_TEMPLATE_ARGS_COUNT(NODE) = build_int_cst (NULL_TREE, INT_VALUE)
#if CHECKING_P
#define GET_NON_DEFAULT_TEMPLATE_ARGS_COUNT(NODE) \
int_cst_value (NON_DEFAULT_TEMPLATE_ARGS_COUNT (NODE))
#else
#define GET_NON_DEFAULT_TEMPLATE_ARGS_COUNT(NODE) \
NON_DEFAULT_TEMPLATE_ARGS_COUNT (NODE) \
? int_cst_value (NON_DEFAULT_TEMPLATE_ARGS_COUNT (NODE)) \
: TREE_VEC_LENGTH (INNERMOST_TEMPLATE_ARGS (NODE))
#endif
/* The list of typedefs - used in the template - that need
access checking at template instantiation time.
FIXME this should be associated with the TEMPLATE_DECL, not the
TEMPLATE_INFO. */
#define TI_TYPEDEFS_NEEDING_ACCESS_CHECKING(NODE) \
((struct tree_template_info*)TEMPLATE_INFO_CHECK \
(NODE))->typedefs_needing_access_checking
/* We use TREE_VECs to hold template arguments. If there is only one
level of template arguments, then the TREE_VEC contains the
arguments directly. If there is more than one level of template
arguments, then each entry in the TREE_VEC is itself a TREE_VEC,
containing the template arguments for a single level. The first
entry in the outer TREE_VEC is the outermost level of template
parameters; the last is the innermost.
It is incorrect to ever form a template argument vector containing
only one level of arguments, but which is a TREE_VEC containing as
its only entry the TREE_VEC for that level.
For each TREE_VEC containing the template arguments for a single
level, it's possible to get or set the number of non defaulted
template arguments by using the accessor macros
GET_NON_DEFAULT_TEMPLATE_ARGS_COUNT or
SET_NON_DEFAULT_TEMPLATE_ARGS_COUNT. */
/* Nonzero if the template arguments is actually a vector of vectors,
rather than just a vector. */
#define TMPL_ARGS_HAVE_MULTIPLE_LEVELS(NODE) \
(NODE && TREE_VEC_LENGTH (NODE) && TREE_VEC_ELT (NODE, 0) \
&& TREE_CODE (TREE_VEC_ELT (NODE, 0)) == TREE_VEC)
/* The depth of a template argument vector. When called directly by
the parser, we use a TREE_LIST rather than a TREE_VEC to represent
template arguments. In fact, we may even see NULL_TREE if there
are no template arguments. In both of those cases, there is only
one level of template arguments. */
#define TMPL_ARGS_DEPTH(NODE) \
(TMPL_ARGS_HAVE_MULTIPLE_LEVELS (NODE) ? TREE_VEC_LENGTH (NODE) : 1)
/* The LEVELth level of the template ARGS. The outermost level of
args is level 1, not level 0. */
#define TMPL_ARGS_LEVEL(ARGS, LEVEL) \
(TMPL_ARGS_HAVE_MULTIPLE_LEVELS (ARGS) \
? TREE_VEC_ELT (ARGS, (LEVEL) - 1) : (ARGS))
/* Set the LEVELth level of the template ARGS to VAL. This macro does
not work with single-level argument vectors. */
#define SET_TMPL_ARGS_LEVEL(ARGS, LEVEL, VAL) \
(TREE_VEC_ELT (ARGS, (LEVEL) - 1) = (VAL))
/* Accesses the IDXth parameter in the LEVELth level of the ARGS. */
#define TMPL_ARG(ARGS, LEVEL, IDX) \
(TREE_VEC_ELT (TMPL_ARGS_LEVEL (ARGS, LEVEL), IDX))
/* Given a single level of template arguments in NODE, return the
number of arguments. */
#define NUM_TMPL_ARGS(NODE) \
(TREE_VEC_LENGTH (NODE))
/* Returns the innermost level of template arguments in ARGS. */
#define INNERMOST_TEMPLATE_ARGS(NODE) \
(get_innermost_template_args ((NODE), 1))
/* The number of levels of template parameters given by NODE. */
#define TMPL_PARMS_DEPTH(NODE) \
((HOST_WIDE_INT) TREE_INT_CST_LOW (TREE_PURPOSE (NODE)))
/* The TEMPLATE_DECL instantiated or specialized by NODE. This
TEMPLATE_DECL will be the immediate parent, not the most general
template. For example, in:
template <class T> struct S { template <class U> void f(U); }
the FUNCTION_DECL for S<int>::f<double> will have, as its
DECL_TI_TEMPLATE, `template <class U> S<int>::f<U>'.
As a special case, for a member friend template of a template
class, this value will not be a TEMPLATE_DECL, but rather an
IDENTIFIER_NODE or OVERLOAD indicating the name of the template and
any explicit template arguments provided. For example, in:
template <class T> struct S { friend void f<int>(int, double); }
the DECL_TI_TEMPLATE will be an IDENTIFIER_NODE for `f' and the
DECL_TI_ARGS will be {int}.
For a FIELD_DECL with a non-static data member initializer, this value
is the FIELD_DECL it was instantiated from. */
#define DECL_TI_TEMPLATE(NODE) TI_TEMPLATE (DECL_TEMPLATE_INFO (NODE))
/* The template arguments used to obtain this decl from the most
general form of DECL_TI_TEMPLATE. For the example given for
DECL_TI_TEMPLATE, the DECL_TI_ARGS will be {int, double}. These
are always the full set of arguments required to instantiate this
declaration from the most general template specialized here. */
#define DECL_TI_ARGS(NODE) TI_ARGS (DECL_TEMPLATE_INFO (NODE))
/* The TEMPLATE_DECL associated with NODE, a class type. Even if NODE
will be generated from a partial specialization, the TEMPLATE_DECL
referred to here will be the original template. For example,
given:
template <typename T> struct S {};
template <typename T> struct S<T*> {};
the CLASSTPYE_TI_TEMPLATE for S<int*> will be S, not the S<T*>. */
#define CLASSTYPE_TI_TEMPLATE(NODE) TI_TEMPLATE (CLASSTYPE_TEMPLATE_INFO (NODE))
#define CLASSTYPE_TI_ARGS(NODE) TI_ARGS (CLASSTYPE_TEMPLATE_INFO (NODE))
/* For a template instantiation TYPE, returns the TYPE corresponding
to the primary template. Otherwise returns TYPE itself. */
#define CLASSTYPE_PRIMARY_TEMPLATE_TYPE(TYPE) \
((CLASSTYPE_USE_TEMPLATE ((TYPE)) \
&& !CLASSTYPE_TEMPLATE_SPECIALIZATION ((TYPE))) \
? TREE_TYPE (DECL_TEMPLATE_RESULT (DECL_PRIMARY_TEMPLATE \
(CLASSTYPE_TI_TEMPLATE ((TYPE))))) \
: (TYPE))
/* Like CLASS_TI_TEMPLATE, but also works for ENUMERAL_TYPEs. */
#define TYPE_TI_TEMPLATE(NODE) \
(TI_TEMPLATE (TYPE_TEMPLATE_INFO (NODE)))
/* Like DECL_TI_ARGS, but for an ENUMERAL_, RECORD_, or UNION_TYPE. */
#define TYPE_TI_ARGS(NODE) \
(TI_ARGS (TYPE_TEMPLATE_INFO (NODE)))
#define INNERMOST_TEMPLATE_PARMS(NODE) TREE_VALUE (NODE)
/* Nonzero if NODE (a TEMPLATE_DECL) is a member template, in the
sense of [temp.mem]. */
#define DECL_MEMBER_TEMPLATE_P(NODE) \
(DECL_LANG_FLAG_1 (TEMPLATE_DECL_CHECK (NODE)))
/* Nonzero if the NODE corresponds to the template parameters for a
member template, whose inline definition is being processed after
the class definition is complete. */
#define TEMPLATE_PARMS_FOR_INLINE(NODE) TREE_LANG_FLAG_1 (NODE)
/* Determine if a declaration (PARM_DECL or FIELD_DECL) is a pack. */
#define DECL_PACK_P(NODE) \
(DECL_P (NODE) && PACK_EXPANSION_P (TREE_TYPE (NODE)))
/* Determines if NODE is an expansion of one or more parameter packs,
e.g., a TYPE_PACK_EXPANSION or EXPR_PACK_EXPANSION. */
#define PACK_EXPANSION_P(NODE) \
(TREE_CODE (NODE) == TYPE_PACK_EXPANSION \
|| TREE_CODE (NODE) == EXPR_PACK_EXPANSION)
/* Extracts the type or expression pattern from a TYPE_PACK_EXPANSION or
EXPR_PACK_EXPANSION. */
#define PACK_EXPANSION_PATTERN(NODE) \
(TREE_CODE (NODE) == TYPE_PACK_EXPANSION ? TREE_TYPE (NODE) \
: TREE_OPERAND (NODE, 0))
/* Sets the type or expression pattern for a TYPE_PACK_EXPANSION or
EXPR_PACK_EXPANSION. */
#define SET_PACK_EXPANSION_PATTERN(NODE,VALUE) \
if (TREE_CODE (NODE) == TYPE_PACK_EXPANSION) \
TREE_TYPE (NODE) = VALUE; \
else \
TREE_OPERAND (NODE, 0) = VALUE
/* The list of parameter packs used in the PACK_EXPANSION_* node. The
TREE_VALUE of each TREE_LIST contains the parameter packs. */
#define PACK_EXPANSION_PARAMETER_PACKS(NODE) \
*(TREE_CODE (NODE) == EXPR_PACK_EXPANSION \
? &TREE_OPERAND (NODE, 1) \
: &TYPE_MIN_VALUE_RAW (TYPE_PACK_EXPANSION_CHECK (NODE)))
/* Any additional template args to be applied when substituting into
the pattern, set by tsubst_pack_expansion for partial instantiations.
If this is a TREE_LIST, the TREE_VALUE of the first element is the
usual template argument TREE_VEC, and the TREE_PURPOSE of later elements
are enclosing functions that provided function parameter packs we'll need
to map appropriately. */
#define PACK_EXPANSION_EXTRA_ARGS(NODE) \
*(TREE_CODE (NODE) == TYPE_PACK_EXPANSION \
? &TYPE_MAX_VALUE_RAW (NODE) \
: &TREE_OPERAND ((NODE), 2))
/* True iff this pack expansion is within a function context. */
#define PACK_EXPANSION_LOCAL_P(NODE) TREE_LANG_FLAG_0 (NODE)
/* True iff this pack expansion is for sizeof.... */
#define PACK_EXPANSION_SIZEOF_P(NODE) TREE_LANG_FLAG_1 (NODE)
/* True iff the wildcard can match a template parameter pack. */
#define WILDCARD_PACK_P(NODE) TREE_LANG_FLAG_0 (NODE)
/* Determine if this is an argument pack. */
#define ARGUMENT_PACK_P(NODE) \
(TREE_CODE (NODE) == TYPE_ARGUMENT_PACK \
|| TREE_CODE (NODE) == NONTYPE_ARGUMENT_PACK)
/* The arguments stored in an argument pack. Arguments are stored in a
TREE_VEC, which may have length zero. */
#define ARGUMENT_PACK_ARGS(NODE) \
(TREE_CODE (NODE) == TYPE_ARGUMENT_PACK? TREE_TYPE (NODE) \
: TREE_OPERAND (NODE, 0))
/* Set the arguments stored in an argument pack. VALUE must be a
TREE_VEC. */
#define SET_ARGUMENT_PACK_ARGS(NODE,VALUE) \
if (TREE_CODE (NODE) == TYPE_ARGUMENT_PACK) \
TREE_TYPE (NODE) = VALUE; \
else \
TREE_OPERAND (NODE, 0) = VALUE
/* Whether the argument pack is "incomplete", meaning that more
arguments can still be deduced. Incomplete argument packs are only
used when the user has provided an explicit template argument list
for a variadic function template. Some of the explicit template
arguments will be placed into the beginning of the argument pack,
but additional arguments might still be deduced. */
#define ARGUMENT_PACK_INCOMPLETE_P(NODE) \
TREE_ADDRESSABLE (ARGUMENT_PACK_ARGS (NODE))
/* When ARGUMENT_PACK_INCOMPLETE_P, stores the explicit template
arguments used to fill this pack. */
#define ARGUMENT_PACK_EXPLICIT_ARGS(NODE) \
TREE_TYPE (ARGUMENT_PACK_ARGS (NODE))
/* In an ARGUMENT_PACK_SELECT, the argument pack from which an
argument will be selected. */
#define ARGUMENT_PACK_SELECT_FROM_PACK(NODE) \
(((struct tree_argument_pack_select *)ARGUMENT_PACK_SELECT_CHECK (NODE))->argument_pack)
/* In an ARGUMENT_PACK_SELECT, the index of the argument we want to
select. */
#define ARGUMENT_PACK_SELECT_INDEX(NODE) \
(((struct tree_argument_pack_select *)ARGUMENT_PACK_SELECT_CHECK (NODE))->index)
#define FOLD_EXPR_CHECK(NODE) \
TREE_CHECK4 (NODE, UNARY_LEFT_FOLD_EXPR, UNARY_RIGHT_FOLD_EXPR, \
BINARY_LEFT_FOLD_EXPR, BINARY_RIGHT_FOLD_EXPR)
#define BINARY_FOLD_EXPR_CHECK(NODE) \
TREE_CHECK2 (NODE, BINARY_LEFT_FOLD_EXPR, BINARY_RIGHT_FOLD_EXPR)
/* True if NODE is UNARY_FOLD_EXPR or a BINARY_FOLD_EXPR */
#define FOLD_EXPR_P(NODE) \
(TREE_CODE (NODE) == UNARY_LEFT_FOLD_EXPR \
|| TREE_CODE (NODE) == UNARY_RIGHT_FOLD_EXPR \
|| TREE_CODE (NODE) == BINARY_LEFT_FOLD_EXPR \
|| TREE_CODE (NODE) == BINARY_RIGHT_FOLD_EXPR)
/* True when NODE is a fold over a compound assignment operator. */
#define FOLD_EXPR_MODIFY_P(NODE) \
TREE_LANG_FLAG_0 (FOLD_EXPR_CHECK (NODE))
/* An INTEGER_CST containing the tree code of the folded operator. */
#define FOLD_EXPR_OP(NODE) \
TREE_OPERAND (FOLD_EXPR_CHECK (NODE), 0)
/* The expression containing an unexpanded parameter pack. */
#define FOLD_EXPR_PACK(NODE) \
TREE_OPERAND (FOLD_EXPR_CHECK (NODE), 1)
/* In a binary fold expression, the argument with no unexpanded
parameter packs. */
#define FOLD_EXPR_INIT(NODE) \
TREE_OPERAND (BINARY_FOLD_EXPR_CHECK (NODE), 2)
/* In a FUNCTION_DECL, the saved language-specific per-function data. */
#define DECL_SAVED_FUNCTION_DATA(NODE) \
(LANG_DECL_FN_CHECK (FUNCTION_DECL_CHECK (NODE)) \
->u.saved_language_function)
/* True if NODE is an implicit INDIRECT_EXPR from convert_from_reference. */
#define REFERENCE_REF_P(NODE) \
(INDIRECT_REF_P (NODE) \
&& TREE_TYPE (TREE_OPERAND (NODE, 0)) \
&& (TREE_CODE (TREE_TYPE (TREE_OPERAND ((NODE), 0))) \
== REFERENCE_TYPE))
/* True if NODE is a REFERENCE_TYPE which is OK to instantiate to be a
reference to VLA type, because it's used for VLA capture. */
#define REFERENCE_VLA_OK(NODE) \
(TYPE_LANG_FLAG_5 (REFERENCE_TYPE_CHECK (NODE)))
#define NEW_EXPR_USE_GLOBAL(NODE) \
TREE_LANG_FLAG_0 (NEW_EXPR_CHECK (NODE))
#define DELETE_EXPR_USE_GLOBAL(NODE) \
TREE_LANG_FLAG_0 (DELETE_EXPR_CHECK (NODE))
#define DELETE_EXPR_USE_VEC(NODE) \
TREE_LANG_FLAG_1 (DELETE_EXPR_CHECK (NODE))
#define CALL_OR_AGGR_INIT_CHECK(NODE) \
TREE_CHECK2 ((NODE), CALL_EXPR, AGGR_INIT_EXPR)
/* Indicates that this is a non-dependent COMPOUND_EXPR which will
resolve to a function call. */
#define COMPOUND_EXPR_OVERLOADED(NODE) \
TREE_LANG_FLAG_0 (COMPOUND_EXPR_CHECK (NODE))
/* In a CALL_EXPR appearing in a template, true if Koenig lookup
should be performed at instantiation time. */
#define KOENIG_LOOKUP_P(NODE) TREE_LANG_FLAG_0 (CALL_EXPR_CHECK (NODE))
/* True if the arguments to NODE should be evaluated in left-to-right
order regardless of PUSH_ARGS_REVERSED. */
#define CALL_EXPR_ORDERED_ARGS(NODE) \
TREE_LANG_FLAG_3 (CALL_OR_AGGR_INIT_CHECK (NODE))
/* True if the arguments to NODE should be evaluated in right-to-left
order regardless of PUSH_ARGS_REVERSED. */
#define CALL_EXPR_REVERSE_ARGS(NODE) \
TREE_LANG_FLAG_5 (CALL_OR_AGGR_INIT_CHECK (NODE))
/* True if CALL_EXPR was written as an operator expression, not a function
call. */
#define CALL_EXPR_OPERATOR_SYNTAX(NODE) \
TREE_LANG_FLAG_6 (CALL_OR_AGGR_INIT_CHECK (NODE))
/* Indicates whether a string literal has been parenthesized. Such
usages are disallowed in certain circumstances. */
#define PAREN_STRING_LITERAL_P(NODE) \
TREE_LANG_FLAG_0 (STRING_CST_CHECK (NODE))
/* Indicates whether a COMPONENT_REF or a SCOPE_REF has been parenthesized, or
an INDIRECT_REF comes from parenthesizing a _DECL. Currently only set some
of the time in C++14 mode. */
#define REF_PARENTHESIZED_P(NODE) \
TREE_LANG_FLAG_2 (TREE_CHECK3 ((NODE), COMPONENT_REF, INDIRECT_REF, SCOPE_REF))
/* Nonzero if this AGGR_INIT_EXPR provides for initialization via a
constructor call, rather than an ordinary function call. */
#define AGGR_INIT_VIA_CTOR_P(NODE) \
TREE_LANG_FLAG_0 (AGGR_INIT_EXPR_CHECK (NODE))
/* Nonzero if expanding this AGGR_INIT_EXPR should first zero-initialize
the object. */
#define AGGR_INIT_ZERO_FIRST(NODE) \
TREE_LANG_FLAG_2 (AGGR_INIT_EXPR_CHECK (NODE))
/* Nonzero means that the call is the jump from a thunk to the
thunked-to function. */
#define AGGR_INIT_FROM_THUNK_P(NODE) \
(AGGR_INIT_EXPR_CHECK (NODE)->base.protected_flag)
/* AGGR_INIT_EXPR accessors. These are equivalent to the CALL_EXPR
accessors, except for AGGR_INIT_EXPR_SLOT (which takes the place of
CALL_EXPR_STATIC_CHAIN). */
#define AGGR_INIT_EXPR_FN(NODE) TREE_OPERAND (AGGR_INIT_EXPR_CHECK (NODE), 1)
#define AGGR_INIT_EXPR_SLOT(NODE) \
TREE_OPERAND (AGGR_INIT_EXPR_CHECK (NODE), 2)
#define AGGR_INIT_EXPR_ARG(NODE, I) \
TREE_OPERAND (AGGR_INIT_EXPR_CHECK (NODE), (I) + 3)
#define aggr_init_expr_nargs(NODE) (VL_EXP_OPERAND_LENGTH(NODE) - 3)
/* AGGR_INIT_EXPR_ARGP returns a pointer to the argument vector for NODE.
We can't use &AGGR_INIT_EXPR_ARG (NODE, 0) because that will complain if
the argument count is zero when checking is enabled. Instead, do
the pointer arithmetic to advance past the 3 fixed operands in a
AGGR_INIT_EXPR. That produces a valid pointer to just past the end of
the operand array, even if it's not valid to dereference it. */
#define AGGR_INIT_EXPR_ARGP(NODE) \
(&(TREE_OPERAND (AGGR_INIT_EXPR_CHECK (NODE), 0)) + 3)
/* Abstract iterators for AGGR_INIT_EXPRs. */
/* Structure containing iterator state. */
struct aggr_init_expr_arg_iterator {
tree t; /* the aggr_init_expr */
int n; /* argument count */
int i; /* next argument index */
};
/* Initialize the abstract argument list iterator object ITER with the
arguments from AGGR_INIT_EXPR node EXP. */
inline void
init_aggr_init_expr_arg_iterator (tree exp,
aggr_init_expr_arg_iterator *iter)
{
iter->t = exp;
iter->n = aggr_init_expr_nargs (exp);
iter->i = 0;
}
/* Return the next argument from abstract argument list iterator object ITER,
and advance its state. Return NULL_TREE if there are no more arguments. */
inline tree
next_aggr_init_expr_arg (aggr_init_expr_arg_iterator *iter)
{
tree result;
if (iter->i >= iter->n)
return NULL_TREE;
result = AGGR_INIT_EXPR_ARG (iter->t, iter->i);
iter->i++;
return result;
}
/* Initialize the abstract argument list iterator object ITER, then advance
past and return the first argument. Useful in for expressions, e.g.
for (arg = first_aggr_init_expr_arg (exp, &iter); arg;
arg = next_aggr_init_expr_arg (&iter)) */
inline tree
first_aggr_init_expr_arg (tree exp, aggr_init_expr_arg_iterator *iter)
{
init_aggr_init_expr_arg_iterator (exp, iter);
return next_aggr_init_expr_arg (iter);
}
/* Test whether there are more arguments in abstract argument list iterator
ITER, without changing its state. */
inline bool
more_aggr_init_expr_args_p (const aggr_init_expr_arg_iterator *iter)
{
return (iter->i < iter->n);
}
/* Iterate through each argument ARG of AGGR_INIT_EXPR CALL, using variable
ITER (of type aggr_init_expr_arg_iterator) to hold the iteration state. */
#define FOR_EACH_AGGR_INIT_EXPR_ARG(arg, iter, call) \
for ((arg) = first_aggr_init_expr_arg ((call), &(iter)); (arg); \
(arg) = next_aggr_init_expr_arg (&(iter)))
/* VEC_INIT_EXPR accessors. */
#define VEC_INIT_EXPR_SLOT(NODE) TREE_OPERAND (VEC_INIT_EXPR_CHECK (NODE), 0)
#define VEC_INIT_EXPR_INIT(NODE) TREE_OPERAND (VEC_INIT_EXPR_CHECK (NODE), 1)
/* Indicates that a VEC_INIT_EXPR is a potential constant expression.
Only set when the current function is constexpr. */
#define VEC_INIT_EXPR_IS_CONSTEXPR(NODE) \
TREE_LANG_FLAG_0 (VEC_INIT_EXPR_CHECK (NODE))
/* Indicates that a VEC_INIT_EXPR is expressing value-initialization. */
#define VEC_INIT_EXPR_VALUE_INIT(NODE) \
TREE_LANG_FLAG_1 (VEC_INIT_EXPR_CHECK (NODE))
/* The condition under which this MUST_NOT_THROW_EXPR actually blocks
exceptions. NULL_TREE means 'true'. */
#define MUST_NOT_THROW_COND(NODE) \
TREE_OPERAND (MUST_NOT_THROW_EXPR_CHECK (NODE), 1)
/* The TYPE_MAIN_DECL for a class template type is a TYPE_DECL, not a
TEMPLATE_DECL. This macro determines whether or not a given class
type is really a template type, as opposed to an instantiation or
specialization of one. */
#define CLASSTYPE_IS_TEMPLATE(NODE) \
(CLASSTYPE_TEMPLATE_INFO (NODE) \
&& !CLASSTYPE_USE_TEMPLATE (NODE) \
&& PRIMARY_TEMPLATE_P (CLASSTYPE_TI_TEMPLATE (NODE)))
/* The name used by the user to name the typename type. Typically,
this is an IDENTIFIER_NODE, and the same as the DECL_NAME on the
corresponding TYPE_DECL. However, this may also be a
TEMPLATE_ID_EXPR if we had something like `typename X::Y<T>'. */
#define TYPENAME_TYPE_FULLNAME(NODE) \
(TYPE_VALUES_RAW (TYPENAME_TYPE_CHECK (NODE)))
/* True if a TYPENAME_TYPE was declared as an "enum". */
#define TYPENAME_IS_ENUM_P(NODE) \
(TREE_LANG_FLAG_0 (TYPENAME_TYPE_CHECK (NODE)))
/* True if a TYPENAME_TYPE was declared as a "class", "struct", or
"union". */
#define TYPENAME_IS_CLASS_P(NODE) \
(TREE_LANG_FLAG_1 (TYPENAME_TYPE_CHECK (NODE)))
/* True if a TYPENAME_TYPE is in the process of being resolved. */
#define TYPENAME_IS_RESOLVING_P(NODE) \
(TREE_LANG_FLAG_2 (TYPENAME_TYPE_CHECK (NODE)))
/* [class.virtual]
A class that declares or inherits a virtual function is called a
polymorphic class. */
#define TYPE_POLYMORPHIC_P(NODE) (TREE_LANG_FLAG_2 (NODE))
/* Nonzero if this class has a virtual function table pointer. */
#define TYPE_CONTAINS_VPTR_P(NODE) \
(TYPE_POLYMORPHIC_P (NODE) || CLASSTYPE_VBASECLASSES (NODE))
/* This flag is true of a local VAR_DECL if it was declared in a for
statement, but we are no longer in the scope of the for. */
#define DECL_DEAD_FOR_LOCAL(NODE) DECL_LANG_FLAG_7 (VAR_DECL_CHECK (NODE))
/* This flag is set on a VAR_DECL that is a DECL_DEAD_FOR_LOCAL
if we already emitted a warning about using it. */
#define DECL_ERROR_REPORTED(NODE) DECL_LANG_FLAG_0 (VAR_DECL_CHECK (NODE))
/* Nonzero if NODE is a FUNCTION_DECL (for a function with global
scope) declared in a local scope. */
#define DECL_LOCAL_FUNCTION_P(NODE) \
DECL_LANG_FLAG_0 (FUNCTION_DECL_CHECK (NODE))
/* Nonzero if NODE is the target for genericization of 'break' stmts. */
#define LABEL_DECL_BREAK(NODE) \
DECL_LANG_FLAG_0 (LABEL_DECL_CHECK (NODE))
/* Nonzero if NODE is the target for genericization of 'continue' stmts. */
#define LABEL_DECL_CONTINUE(NODE) \
DECL_LANG_FLAG_1 (LABEL_DECL_CHECK (NODE))
/* Nonzero if NODE is the target for genericization of 'return' stmts
in constructors/destructors of targetm.cxx.cdtor_returns_this targets. */
#define LABEL_DECL_CDTOR(NODE) \
DECL_LANG_FLAG_2 (LABEL_DECL_CHECK (NODE))
/* True if NODE was declared with auto in its return type, but it has
started compilation and so the return type might have been changed by
return type deduction; its declared return type should be found in
DECL_STRUCT_FUNCTION(NODE)->language->x_auto_return_pattern. */
#define FNDECL_USED_AUTO(NODE) \
TREE_LANG_FLAG_2 (FUNCTION_DECL_CHECK (NODE))
/* Nonzero if NODE is a DECL which we know about but which has not
been explicitly declared, such as a built-in function or a friend
declared inside a class. In the latter case DECL_HIDDEN_FRIEND_P
will be set. */
#define DECL_ANTICIPATED(NODE) \
(DECL_LANG_SPECIFIC (TYPE_FUNCTION_OR_TEMPLATE_DECL_CHECK (NODE)) \
->u.base.anticipated_p)
/* Is DECL NODE a hidden name? */
#define DECL_HIDDEN_P(NODE) \
(DECL_LANG_SPECIFIC (NODE) && TYPE_FUNCTION_OR_TEMPLATE_DECL_P (NODE) \
&& DECL_ANTICIPATED (NODE))
/* True if this is a hidden class type. */
#define TYPE_HIDDEN_P(NODE) \
(DECL_LANG_SPECIFIC (TYPE_NAME (NODE)) \
&& DECL_ANTICIPATED (TYPE_NAME (NODE)))
/* True for artificial decls added for OpenMP privatized non-static
data members. */
#define DECL_OMP_PRIVATIZED_MEMBER(NODE) \
(DECL_LANG_SPECIFIC (VAR_DECL_CHECK (NODE))->u.base.anticipated_p)
/* Nonzero if NODE is a FUNCTION_DECL which was declared as a friend
within a class but has not been declared in the surrounding scope.
The function is invisible except via argument dependent lookup. */
#define DECL_HIDDEN_FRIEND_P(NODE) \
(LANG_DECL_FN_CHECK (DECL_COMMON_CHECK (NODE))->hidden_friend_p)
/* Nonzero if NODE is an artificial FUNCTION_DECL for
#pragma omp declare reduction. */
#define DECL_OMP_DECLARE_REDUCTION_P(NODE) \
(LANG_DECL_FN_CHECK (DECL_COMMON_CHECK (NODE))->omp_declare_reduction_p)
/* Nonzero if DECL has been declared threadprivate by
#pragma omp threadprivate. */
#define CP_DECL_THREADPRIVATE_P(DECL) \
(DECL_LANG_SPECIFIC (VAR_DECL_CHECK (DECL))->u.base.threadprivate_or_deleted_p)
/* Nonzero if NODE is a VAR_DECL which has been declared inline. */
#define DECL_VAR_DECLARED_INLINE_P(NODE) \
(DECL_LANG_SPECIFIC (VAR_DECL_CHECK (NODE)) \
? DECL_LANG_SPECIFIC (NODE)->u.base.var_declared_inline_p \
: false)
#define SET_DECL_VAR_DECLARED_INLINE_P(NODE) \
(DECL_LANG_SPECIFIC (VAR_DECL_CHECK (NODE))->u.base.var_declared_inline_p \
= true)
/* True if NODE is a constant variable with a value-dependent initializer. */
#define DECL_DEPENDENT_INIT_P(NODE) \
(DECL_LANG_SPECIFIC (VAR_DECL_CHECK (NODE)) \
&& DECL_LANG_SPECIFIC (NODE)->u.base.dependent_init_p)
#define SET_DECL_DEPENDENT_INIT_P(NODE, X) \
(DECL_LANG_SPECIFIC (VAR_DECL_CHECK (NODE))->u.base.dependent_init_p = (X))
/* Nonzero if NODE is an artificial VAR_DECL for a C++17 structured binding
declaration or one of VAR_DECLs for the user identifiers in it. */
#define DECL_DECOMPOSITION_P(NODE) \
(VAR_P (NODE) && DECL_LANG_SPECIFIC (NODE) \
? DECL_LANG_SPECIFIC (NODE)->u.base.selector == lds_decomp \
: false)
/* The underlying artificial VAR_DECL for structured binding. */
#define DECL_DECOMP_BASE(NODE) \
(LANG_DECL_DECOMP_CHECK (NODE)->base)
/* Nonzero if NODE is an inline VAR_DECL. In C++17, static data members
declared with constexpr specifier are implicitly inline variables. */
#define DECL_INLINE_VAR_P(NODE) \
(DECL_VAR_DECLARED_INLINE_P (NODE) \
|| (cxx_dialect >= cxx17 \
&& DECL_DECLARED_CONSTEXPR_P (NODE) \
&& DECL_CLASS_SCOPE_P (NODE)))
/* Nonzero if DECL was declared with '= delete'. */
#define DECL_DELETED_FN(DECL) \
(LANG_DECL_FN_CHECK (DECL)->min.base.threadprivate_or_deleted_p)
/* Nonzero if DECL was declared with '= default' (maybe implicitly). */
#define DECL_DEFAULTED_FN(DECL) \
(LANG_DECL_FN_CHECK (DECL)->defaulted_p)
/* Nonzero if DECL is explicitly defaulted in the class body. */
#define DECL_DEFAULTED_IN_CLASS_P(DECL) \
(DECL_DEFAULTED_FN (DECL) && DECL_INITIALIZED_IN_CLASS_P (DECL))
/* Nonzero if DECL was defaulted outside the class body. */
#define DECL_DEFAULTED_OUTSIDE_CLASS_P(DECL) \
(DECL_DEFAULTED_FN (DECL) \
&& !(DECL_ARTIFICIAL (DECL) || DECL_INITIALIZED_IN_CLASS_P (DECL)))
/* Record whether a typedef for type `int' was actually `signed int'. */
#define C_TYPEDEF_EXPLICITLY_SIGNED(EXP) DECL_LANG_FLAG_1 (EXP)
/* Returns nonzero if DECL has external linkage, as specified by the
language standard. (This predicate may hold even when the
corresponding entity is not actually given external linkage in the
object file; see decl_linkage for details.) */
#define DECL_EXTERNAL_LINKAGE_P(DECL) \
(decl_linkage (DECL) == lk_external)
/* Keep these codes in ascending code order. */
#define INTEGRAL_CODE_P(CODE) \
((CODE) == ENUMERAL_TYPE \
|| (CODE) == BOOLEAN_TYPE \
|| (CODE) == INTEGER_TYPE)
/* [basic.fundamental]
Types bool, char, wchar_t, and the signed and unsigned integer types
are collectively called integral types.
Note that INTEGRAL_TYPE_P, as defined in tree.h, allows enumeration
types as well, which is incorrect in C++. Keep these checks in
ascending code order. */
#define CP_INTEGRAL_TYPE_P(TYPE) \
(TREE_CODE (TYPE) == BOOLEAN_TYPE \
|| TREE_CODE (TYPE) == INTEGER_TYPE)
/* Returns true if TYPE is an integral or enumeration name. Keep
these checks in ascending code order. */
#define INTEGRAL_OR_ENUMERATION_TYPE_P(TYPE) \
(TREE_CODE (TYPE) == ENUMERAL_TYPE || CP_INTEGRAL_TYPE_P (TYPE))
/* Returns true if TYPE is an integral or unscoped enumeration type. */
#define INTEGRAL_OR_UNSCOPED_ENUMERATION_TYPE_P(TYPE) \
(UNSCOPED_ENUM_P (TYPE) || CP_INTEGRAL_TYPE_P (TYPE))
/* True if the class type TYPE is a literal type. */
#define CLASSTYPE_LITERAL_P(TYPE) \
(LANG_TYPE_CLASS_CHECK (TYPE)->is_literal)
/* [basic.fundamental]
Integral and floating types are collectively called arithmetic
types.
As a GNU extension, we also accept complex types.
Keep these checks in ascending code order. */
#define ARITHMETIC_TYPE_P(TYPE) \
(CP_INTEGRAL_TYPE_P (TYPE) \
|| TREE_CODE (TYPE) == REAL_TYPE \
|| TREE_CODE (TYPE) == COMPLEX_TYPE)
/* True iff TYPE is cv decltype(nullptr). */
#define NULLPTR_TYPE_P(TYPE) (TREE_CODE (TYPE) == NULLPTR_TYPE)
/* [basic.types]
Arithmetic types, enumeration types, pointer types,
pointer-to-member types, and std::nullptr_t are collectively called
scalar types.
Keep these checks in ascending code order. */
#define SCALAR_TYPE_P(TYPE) \
(TYPE_PTRDATAMEM_P (TYPE) \
|| TREE_CODE (TYPE) == ENUMERAL_TYPE \
|| ARITHMETIC_TYPE_P (TYPE) \
|| TYPE_PTR_P (TYPE) \
|| TYPE_PTRMEMFUNC_P (TYPE) \
|| NULLPTR_TYPE_P (TYPE))
/* Determines whether this type is a C++0x scoped enumeration
type. Scoped enumerations types are introduced via "enum class" or
"enum struct", e.g.,
enum class Color {
Red, Green, Blue
};
Scoped enumeration types are different from normal (unscoped)
enumeration types in several ways:
- The enumerators of a scoped enumeration type are only available
within the scope of the enumeration type and not in the
enclosing scope. For example, the Red color can be referred to
with "Color::Red" but not "Red".
- Scoped enumerators and enumerations do not implicitly convert
to integers or 'bool'.
- The underlying type of the enum is well-defined. */
#define SCOPED_ENUM_P(TYPE) \
(TREE_CODE (TYPE) == ENUMERAL_TYPE && ENUM_IS_SCOPED (TYPE))
/* Determine whether this is an unscoped enumeration type. */
#define UNSCOPED_ENUM_P(TYPE) \
(TREE_CODE (TYPE) == ENUMERAL_TYPE && !ENUM_IS_SCOPED (TYPE))
/* Set the flag indicating whether an ENUMERAL_TYPE is a C++0x scoped
enumeration type (1) or a normal (unscoped) enumeration type
(0). */
#define SET_SCOPED_ENUM_P(TYPE, VAL) \
(ENUM_IS_SCOPED (TYPE) = (VAL))
#define SET_OPAQUE_ENUM_P(TYPE, VAL) \
(ENUM_IS_OPAQUE (TYPE) = (VAL))
#define OPAQUE_ENUM_P(TYPE) \
(TREE_CODE (TYPE) == ENUMERAL_TYPE && ENUM_IS_OPAQUE (TYPE))
/* Determines whether an ENUMERAL_TYPE has an explicit
underlying type. */
#define ENUM_FIXED_UNDERLYING_TYPE_P(NODE) (TYPE_LANG_FLAG_5 (NODE))
/* Returns the underlying type of the given enumeration type. The
underlying type is determined in different ways, depending on the
properties of the enum:
- In C++0x, the underlying type can be explicitly specified, e.g.,
enum E1 : char { ... } // underlying type is char
- In a C++0x scoped enumeration, the underlying type is int
unless otherwises specified:
enum class E2 { ... } // underlying type is int
- Otherwise, the underlying type is determined based on the
values of the enumerators. In this case, the
ENUM_UNDERLYING_TYPE will not be set until after the definition
of the enumeration is completed by finish_enum. */
#define ENUM_UNDERLYING_TYPE(TYPE) \
TREE_TYPE (ENUMERAL_TYPE_CHECK (TYPE))
/* [dcl.init.aggr]
An aggregate is an array or a class with no user-provided
constructors, no brace-or-equal-initializers for non-static data
members, no private or protected non-static data members, no
base classes, and no virtual functions.
As an extension, we also treat vectors as aggregates. Keep these
checks in ascending code order. */
#define CP_AGGREGATE_TYPE_P(TYPE) \
(TREE_CODE (TYPE) == VECTOR_TYPE \
||TREE_CODE (TYPE) == ARRAY_TYPE \
|| (CLASS_TYPE_P (TYPE) && !CLASSTYPE_NON_AGGREGATE (TYPE)))
/* Nonzero for a class type means that the class type has a
user-declared constructor. */
#define TYPE_HAS_USER_CONSTRUCTOR(NODE) (TYPE_LANG_FLAG_1 (NODE))
/* Nonzero means that the FUNCTION_TYPE or METHOD_TYPE has a
late-specified return type. */
#define TYPE_HAS_LATE_RETURN_TYPE(NODE) \
(TYPE_LANG_FLAG_2 (FUNC_OR_METHOD_CHECK (NODE)))
/* When appearing in an INDIRECT_REF, it means that the tree structure
underneath is actually a call to a constructor. This is needed
when the constructor must initialize local storage (which can
be automatically destroyed), rather than allowing it to allocate
space from the heap.
When appearing in a SAVE_EXPR, it means that underneath
is a call to a constructor.
When appearing in a CONSTRUCTOR, the expression is a
compound literal.
When appearing in a FIELD_DECL, it means that this field
has been duly initialized in its constructor. */
#define TREE_HAS_CONSTRUCTOR(NODE) (TREE_LANG_FLAG_4 (NODE))
/* True if NODE is a brace-enclosed initializer. */
#define BRACE_ENCLOSED_INITIALIZER_P(NODE) \
(TREE_CODE (NODE) == CONSTRUCTOR && TREE_TYPE (NODE) == init_list_type_node)
/* True if NODE is a compound-literal, i.e., a brace-enclosed
initializer cast to a particular type. */
#define COMPOUND_LITERAL_P(NODE) \
(TREE_CODE (NODE) == CONSTRUCTOR && TREE_HAS_CONSTRUCTOR (NODE))
#define EMPTY_CONSTRUCTOR_P(NODE) (TREE_CODE (NODE) == CONSTRUCTOR \
&& vec_safe_is_empty(CONSTRUCTOR_ELTS(NODE))\
&& !TREE_HAS_CONSTRUCTOR (NODE))
/* True if NODE is a init-list used as a direct-initializer, i.e.
B b{1,2}, not B b({1,2}) or B b = {1,2}. */
#define CONSTRUCTOR_IS_DIRECT_INIT(NODE) (TREE_LANG_FLAG_0 (CONSTRUCTOR_CHECK (NODE)))
/* True if an uninitialized element in NODE should not be treated as
implicitly value-initialized. Only used in constexpr evaluation. */
#define CONSTRUCTOR_NO_IMPLICIT_ZERO(NODE) \
(TREE_LANG_FLAG_1 (CONSTRUCTOR_CHECK (NODE)))
/* True if this CONSTRUCTOR should not be used as a variable initializer
because it was loaded from a constexpr variable with mutable fields. */
#define CONSTRUCTOR_MUTABLE_POISON(NODE) \
(TREE_LANG_FLAG_2 (CONSTRUCTOR_CHECK (NODE)))
/* True if this typed CONSTRUCTOR represents C99 compound-literal syntax rather
than C++11 functional cast syntax. */
#define CONSTRUCTOR_C99_COMPOUND_LITERAL(NODE) \
(TREE_LANG_FLAG_3 (CONSTRUCTOR_CHECK (NODE)))
/* True if this CONSTRUCTOR contains PLACEHOLDER_EXPRs referencing the
CONSTRUCTOR's type not nested inside another CONSTRUCTOR marked with
CONSTRUCTOR_PLACEHOLDER_BOUNDARY. */
#define CONSTRUCTOR_PLACEHOLDER_BOUNDARY(NODE) \
(TREE_LANG_FLAG_5 (CONSTRUCTOR_CHECK (NODE)))
#define DIRECT_LIST_INIT_P(NODE) \
(BRACE_ENCLOSED_INITIALIZER_P (NODE) && CONSTRUCTOR_IS_DIRECT_INIT (NODE))
/* True if NODE represents a conversion for direct-initialization in a
template. Set by perform_implicit_conversion_flags. */
#define IMPLICIT_CONV_EXPR_DIRECT_INIT(NODE) \
(TREE_LANG_FLAG_0 (IMPLICIT_CONV_EXPR_CHECK (NODE)))
/* True if NODE represents a dependent conversion of a non-type template
argument. Set by maybe_convert_nontype_argument. */
#define IMPLICIT_CONV_EXPR_NONTYPE_ARG(NODE) \
(TREE_LANG_FLAG_1 (IMPLICIT_CONV_EXPR_CHECK (NODE)))
/* Nonzero means that an object of this type can not be initialized using
an initializer list. */
#define CLASSTYPE_NON_AGGREGATE(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->non_aggregate)
#define TYPE_NON_AGGREGATE_CLASS(NODE) \
(CLASS_TYPE_P (NODE) && CLASSTYPE_NON_AGGREGATE (NODE))
/* Nonzero if there is a non-trivial X::op=(cv X&) for this class. */
#define TYPE_HAS_COMPLEX_COPY_ASSIGN(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_complex_copy_assign)
/* Nonzero if there is a non-trivial X::X(cv X&) for this class. */
#define TYPE_HAS_COMPLEX_COPY_CTOR(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_complex_copy_ctor)
/* Nonzero if there is a non-trivial X::op=(X&&) for this class. */
#define TYPE_HAS_COMPLEX_MOVE_ASSIGN(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_complex_move_assign)
/* Nonzero if there is a non-trivial X::X(X&&) for this class. */
#define TYPE_HAS_COMPLEX_MOVE_CTOR(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_complex_move_ctor)
/* Nonzero if there is no trivial default constructor for this class. */
#define TYPE_HAS_COMPLEX_DFLT(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_complex_dflt)
/* Nonzero if TYPE has a trivial destructor. From [class.dtor]:
A destructor is trivial if it is an implicitly declared
destructor and if:
- all of the direct base classes of its class have trivial
destructors,
- for all of the non-static data members of its class that are
of class type (or array thereof), each such class has a
trivial destructor. */
#define TYPE_HAS_TRIVIAL_DESTRUCTOR(NODE) \
(!TYPE_HAS_NONTRIVIAL_DESTRUCTOR (NODE))
/* Nonzero for _TYPE node means that this type does not have a trivial
destructor. Therefore, destroying an object of this type will
involve a call to a destructor. This can apply to objects of
ARRAY_TYPE is the type of the elements needs a destructor. */
#define TYPE_HAS_NONTRIVIAL_DESTRUCTOR(NODE) \
(TYPE_LANG_FLAG_4 (NODE))
/* Nonzero for class type means that the default constructor is trivial. */
#define TYPE_HAS_TRIVIAL_DFLT(NODE) \
(TYPE_HAS_DEFAULT_CONSTRUCTOR (NODE) && ! TYPE_HAS_COMPLEX_DFLT (NODE))
/* Nonzero for class type means that copy initialization of this type can use
a bitwise copy. */
#define TYPE_HAS_TRIVIAL_COPY_CTOR(NODE) \
(TYPE_HAS_COPY_CTOR (NODE) && ! TYPE_HAS_COMPLEX_COPY_CTOR (NODE))
/* Nonzero for class type means that assignment of this type can use
a bitwise copy. */
#define TYPE_HAS_TRIVIAL_COPY_ASSIGN(NODE) \
(TYPE_HAS_COPY_ASSIGN (NODE) && ! TYPE_HAS_COMPLEX_COPY_ASSIGN (NODE))
/* Returns true if NODE is a pointer-to-data-member. */
#define TYPE_PTRDATAMEM_P(NODE) \
(TREE_CODE (NODE) == OFFSET_TYPE)
/* Returns true if NODE is a pointer. */
#define TYPE_PTR_P(NODE) \
(TREE_CODE (NODE) == POINTER_TYPE)
/* Returns true if NODE is an object type:
[basic.types]
An object type is a (possibly cv-qualified) type that is not a
function type, not a reference type, and not a void type.
Keep these checks in ascending order, for speed. */
#define TYPE_OBJ_P(NODE) \
(TREE_CODE (NODE) != REFERENCE_TYPE \
&& !VOID_TYPE_P (NODE) \
&& TREE_CODE (NODE) != FUNCTION_TYPE \
&& TREE_CODE (NODE) != METHOD_TYPE)
/* Returns true if NODE is a pointer to an object. Keep these checks
in ascending tree code order. */
#define TYPE_PTROB_P(NODE) \
(TYPE_PTR_P (NODE) && TYPE_OBJ_P (TREE_TYPE (NODE)))
/* Returns true if NODE is a reference to an object. Keep these checks
in ascending tree code order. */
#define TYPE_REF_OBJ_P(NODE) \
(TREE_CODE (NODE) == REFERENCE_TYPE && TYPE_OBJ_P (TREE_TYPE (NODE)))
/* Returns true if NODE is a pointer to an object, or a pointer to
void. Keep these checks in ascending tree code order. */
#define TYPE_PTROBV_P(NODE) \
(TYPE_PTR_P (NODE) \
&& !(TREE_CODE (TREE_TYPE (NODE)) == FUNCTION_TYPE \
|| TREE_CODE (TREE_TYPE (NODE)) == METHOD_TYPE))
/* Returns true if NODE is a pointer to function type. */
#define TYPE_PTRFN_P(NODE) \
(TYPE_PTR_P (NODE) \
&& TREE_CODE (TREE_TYPE (NODE)) == FUNCTION_TYPE)
/* Returns true if NODE is a reference to function type. */
#define TYPE_REFFN_P(NODE) \
(TREE_CODE (NODE) == REFERENCE_TYPE \
&& TREE_CODE (TREE_TYPE (NODE)) == FUNCTION_TYPE)
/* Returns true if NODE is a pointer to member function type. */
#define TYPE_PTRMEMFUNC_P(NODE) \
(TREE_CODE (NODE) == RECORD_TYPE \
&& TYPE_PTRMEMFUNC_FLAG (NODE))
#define TYPE_PTRMEMFUNC_FLAG(NODE) \
(TYPE_LANG_FLAG_2 (RECORD_TYPE_CHECK (NODE)))
/* Returns true if NODE is a pointer-to-member. */
#define TYPE_PTRMEM_P(NODE) \
(TYPE_PTRDATAMEM_P (NODE) || TYPE_PTRMEMFUNC_P (NODE))
/* Returns true if NODE is a pointer or a pointer-to-member. */
#define TYPE_PTR_OR_PTRMEM_P(NODE) \
(TYPE_PTR_P (NODE) || TYPE_PTRMEM_P (NODE))
/* Indicates when overload resolution may resolve to a pointer to
member function. [expr.unary.op]/3 */
#define PTRMEM_OK_P(NODE) \
TREE_LANG_FLAG_0 (TREE_CHECK3 ((NODE), ADDR_EXPR, OFFSET_REF, SCOPE_REF))
/* Get the POINTER_TYPE to the METHOD_TYPE associated with this
pointer to member function. TYPE_PTRMEMFUNC_P _must_ be true,
before using this macro. */
#define TYPE_PTRMEMFUNC_FN_TYPE(NODE) \
(cp_build_qualified_type (TREE_TYPE (TYPE_FIELDS (NODE)),\
cp_type_quals (NODE)))
/* As above, but can be used in places that want an lvalue at the expense
of not necessarily having the correct cv-qualifiers. */
#define TYPE_PTRMEMFUNC_FN_TYPE_RAW(NODE) \
(TREE_TYPE (TYPE_FIELDS (NODE)))
/* Returns `A' for a type like `int (A::*)(double)' */
#define TYPE_PTRMEMFUNC_OBJECT_TYPE(NODE) \
TYPE_METHOD_BASETYPE (TREE_TYPE (TYPE_PTRMEMFUNC_FN_TYPE (NODE)))
/* The canonical internal RECORD_TYPE from the POINTER_TYPE to
METHOD_TYPE. */
#define TYPE_PTRMEMFUNC_TYPE(NODE) \
TYPE_LANG_SLOT_1 (NODE)
/* For a pointer-to-member type of the form `T X::*', this is `X'.
For a type like `void (X::*)() const', this type is `X', not `const
X'. To get at the `const X' you have to look at the
TYPE_PTRMEM_POINTED_TO_TYPE; there, the first parameter will have
type `const X*'. */
#define TYPE_PTRMEM_CLASS_TYPE(NODE) \
(TYPE_PTRDATAMEM_P (NODE) \
? TYPE_OFFSET_BASETYPE (NODE) \
: TYPE_PTRMEMFUNC_OBJECT_TYPE (NODE))
/* For a pointer-to-member type of the form `T X::*', this is `T'. */
#define TYPE_PTRMEM_POINTED_TO_TYPE(NODE) \
(TYPE_PTRDATAMEM_P (NODE) \
? TREE_TYPE (NODE) \
: TREE_TYPE (TYPE_PTRMEMFUNC_FN_TYPE (NODE)))
/* For a pointer-to-member constant `X::Y' this is the RECORD_TYPE for
`X'. */
#define PTRMEM_CST_CLASS(NODE) \
TYPE_PTRMEM_CLASS_TYPE (TREE_TYPE (PTRMEM_CST_CHECK (NODE)))
/* For a pointer-to-member constant `X::Y' this is the _DECL for
`Y'. */
#define PTRMEM_CST_MEMBER(NODE) \
(((ptrmem_cst_t)PTRMEM_CST_CHECK (NODE))->member)
/* The expression in question for a TYPEOF_TYPE. */
#define TYPEOF_TYPE_EXPR(NODE) (TYPE_VALUES_RAW (TYPEOF_TYPE_CHECK (NODE)))
/* The type in question for an UNDERLYING_TYPE. */
#define UNDERLYING_TYPE_TYPE(NODE) \
(TYPE_VALUES_RAW (UNDERLYING_TYPE_CHECK (NODE)))
/* The type in question for BASES. */
#define BASES_TYPE(NODE) \
(TYPE_VALUES_RAW (BASES_CHECK (NODE)))
#define BASES_DIRECT(NODE) \
TREE_LANG_FLAG_0 (BASES_CHECK (NODE))
/* The expression in question for a DECLTYPE_TYPE. */
#define DECLTYPE_TYPE_EXPR(NODE) (TYPE_VALUES_RAW (DECLTYPE_TYPE_CHECK (NODE)))
/* Whether the DECLTYPE_TYPE_EXPR of NODE was originally parsed as an
id-expression or a member-access expression. When false, it was
parsed as a full expression. */
#define DECLTYPE_TYPE_ID_EXPR_OR_MEMBER_ACCESS_P(NODE) \
(DECLTYPE_TYPE_CHECK (NODE))->type_common.string_flag
/* These flags indicate that we want different semantics from normal
decltype: lambda capture just drops references, init capture
uses auto semantics, lambda proxies look through implicit dereference. */
#define DECLTYPE_FOR_LAMBDA_CAPTURE(NODE) \
TREE_LANG_FLAG_0 (DECLTYPE_TYPE_CHECK (NODE))
#define DECLTYPE_FOR_INIT_CAPTURE(NODE) \
TREE_LANG_FLAG_1 (DECLTYPE_TYPE_CHECK (NODE))
#define DECLTYPE_FOR_LAMBDA_PROXY(NODE) \
TREE_LANG_FLAG_2 (DECLTYPE_TYPE_CHECK (NODE))
#define DECLTYPE_FOR_REF_CAPTURE(NODE) \
TREE_LANG_FLAG_3 (DECLTYPE_TYPE_CHECK (NODE))
/* Nonzero for VAR_DECL and FUNCTION_DECL node means that `extern' was
specified in its declaration. This can also be set for an
erroneously declared PARM_DECL. */
#define DECL_THIS_EXTERN(NODE) \
DECL_LANG_FLAG_2 (VAR_FUNCTION_OR_PARM_DECL_CHECK (NODE))
/* Nonzero for VAR_DECL and FUNCTION_DECL node means that `static' was
specified in its declaration. This can also be set for an
erroneously declared PARM_DECL. */
#define DECL_THIS_STATIC(NODE) \
DECL_LANG_FLAG_6 (VAR_FUNCTION_OR_PARM_DECL_CHECK (NODE))
/* Nonzero for FIELD_DECL node means that this field is a lambda capture
field for an array of runtime bound. */
#define DECL_VLA_CAPTURE_P(NODE) \
DECL_LANG_FLAG_1 (FIELD_DECL_CHECK (NODE))
/* Nonzero for PARM_DECL node means that this is an array function
parameter, i.e, a[] rather than *a. */
#define DECL_ARRAY_PARAMETER_P(NODE) \
DECL_LANG_FLAG_1 (PARM_DECL_CHECK (NODE))
/* Nonzero for a FIELD_DECL who's NSMDI is currently being
instantiated. */
#define DECL_INSTANTIATING_NSDMI_P(NODE) \
DECL_LANG_FLAG_2 (FIELD_DECL_CHECK (NODE))
/* Nonzero for FIELD_DECL node means that this field is a base class
of the parent object, as opposed to a member field. */
#define DECL_FIELD_IS_BASE(NODE) \
DECL_LANG_FLAG_6 (FIELD_DECL_CHECK (NODE))
/* Nonzero for FIELD_DECL node means that this field is a simple (no
explicit initializer) lambda capture field, making it invisible to
name lookup in unevaluated contexts. */
#define DECL_NORMAL_CAPTURE_P(NODE) \
DECL_LANG_FLAG_7 (FIELD_DECL_CHECK (NODE))
/* Nonzero if TYPE is an anonymous union or struct type. We have to use a
flag for this because "A union for which objects or pointers are
declared is not an anonymous union" [class.union]. */
#define ANON_AGGR_TYPE_P(NODE) \
(CLASS_TYPE_P (NODE) && LANG_TYPE_CLASS_CHECK (NODE)->anon_aggr)
#define SET_ANON_AGGR_TYPE_P(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->anon_aggr = 1)
/* Nonzero if TYPE is an anonymous union type. */
#define ANON_UNION_TYPE_P(NODE) \
(TREE_CODE (NODE) == UNION_TYPE && ANON_AGGR_TYPE_P (NODE))
/* Define fields and accessors for nodes representing declared names. */
/* Nonzero if TYPE is an unnamed class with a typedef for linkage purposes. */
#define TYPE_WAS_UNNAMED(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->was_anonymous)
/* C++: all of these are overloaded! These apply only to TYPE_DECLs. */
/* The format of each node in the DECL_FRIENDLIST is as follows:
The TREE_PURPOSE will be the name of a function, i.e., an
IDENTIFIER_NODE. The TREE_VALUE will be itself a TREE_LIST, whose
TREE_VALUEs are friends with the given name. */
#define DECL_FRIENDLIST(NODE) (DECL_INITIAL (NODE))
#define FRIEND_NAME(LIST) (TREE_PURPOSE (LIST))
#define FRIEND_DECLS(LIST) (TREE_VALUE (LIST))
/* The DECL_ACCESS, if non-NULL, is a TREE_LIST. The TREE_PURPOSE of
each node is a type; the TREE_VALUE is the access granted for this
DECL in that type. The DECL_ACCESS is set by access declarations.
For example, if a member that would normally be public in a
derived class is made protected, then the derived class and the
protected_access_node will appear in the DECL_ACCESS for the node. */
#define DECL_ACCESS(NODE) (LANG_DECL_U2_CHECK (NODE, 0)->access)
/* Nonzero if the FUNCTION_DECL is a global constructor. */
#define DECL_GLOBAL_CTOR_P(NODE) \
(LANG_DECL_FN_CHECK (NODE)->global_ctor_p)
/* Nonzero if the FUNCTION_DECL is a global destructor. */
#define DECL_GLOBAL_DTOR_P(NODE) \
(LANG_DECL_FN_CHECK (NODE)->global_dtor_p)
/* Accessor macros for C++ template decl nodes. */
/* The DECL_TEMPLATE_PARMS are a list. The TREE_PURPOSE of each node
is a INT_CST whose TREE_INT_CST_LOW indicates the level of the
template parameters, with 1 being the outermost set of template
parameters. The TREE_VALUE is a vector, whose elements are the
template parameters at each level. Each element in the vector is a
TREE_LIST, whose TREE_VALUE is a PARM_DECL (if the parameter is a
non-type parameter), or a TYPE_DECL (if the parameter is a type
parameter). The TREE_PURPOSE is the default value, if any. The
TEMPLATE_PARM_INDEX for the parameter is available as the
DECL_INITIAL (for a PARM_DECL) or as the TREE_TYPE (for a
TYPE_DECL).
FIXME: CONST_CAST_TREE is a hack that hopefully will go away after
tree is converted to C++ class hiearchy. */
#define DECL_TEMPLATE_PARMS(NODE) \
((struct tree_template_decl *)CONST_CAST_TREE (TEMPLATE_DECL_CHECK (NODE)))->arguments
#define DECL_INNERMOST_TEMPLATE_PARMS(NODE) \
INNERMOST_TEMPLATE_PARMS (DECL_TEMPLATE_PARMS (NODE))
#define DECL_NTPARMS(NODE) \
TREE_VEC_LENGTH (DECL_INNERMOST_TEMPLATE_PARMS (NODE))
/* For function, method, class-data templates.
FIXME: CONST_CAST_TREE is a hack that hopefully will go away after
tree is converted to C++ class hiearchy. */
#define DECL_TEMPLATE_RESULT(NODE) \
((struct tree_template_decl *)CONST_CAST_TREE(TEMPLATE_DECL_CHECK (NODE)))->result
/* For a function template at namespace scope, DECL_TEMPLATE_INSTANTIATIONS
lists all instantiations and specializations of the function so that
tsubst_friend_function can reassign them to another template if we find
that the namespace-scope template is really a partial instantiation of a
friend template.
For a class template the DECL_TEMPLATE_INSTANTIATIONS lists holds
all instantiations and specializations of the class type, including
partial instantiations and partial specializations, so that if we
explicitly specialize a partial instantiation we can walk the list
in maybe_process_partial_specialization and reassign them or complain
as appropriate.
In both cases, the TREE_PURPOSE of each node contains the arguments
used; the TREE_VALUE contains the generated variable. The template
arguments are always complete. For example, given:
template <class T> struct S1 {
template <class U> struct S2 {};
template <class U> struct S2<U*> {};
};
the record for the partial specialization will contain, as its
argument list, { {T}, {U*} }, and will be on the
DECL_TEMPLATE_INSTANTIATIONS list for `template <class T> template
<class U> struct S1<T>::S2'.
This list is not used for other templates. */
#define DECL_TEMPLATE_INSTANTIATIONS(NODE) \
DECL_SIZE_UNIT (TEMPLATE_DECL_CHECK (NODE))
/* For a class template, this list contains the partial
specializations of this template. (Full specializations are not
recorded on this list.) The TREE_PURPOSE holds the arguments used
in the partial specialization (e.g., for `template <class T> struct
S<T*, int>' this will be `T*, int'.) The arguments will also include
any outer template arguments. The TREE_VALUE holds the TEMPLATE_DECL
for the partial specialization. The TREE_TYPE is the _TYPE node for
the partial specialization.
This list is not used for other templates. */
#define DECL_TEMPLATE_SPECIALIZATIONS(NODE) \
DECL_SIZE (TEMPLATE_DECL_CHECK (NODE))
/* Nonzero for a DECL which is actually a template parameter. Keep
these checks in ascending tree code order. */
#define DECL_TEMPLATE_PARM_P(NODE) \
(DECL_LANG_FLAG_0 (NODE) \
&& (TREE_CODE (NODE) == CONST_DECL \
|| TREE_CODE (NODE) == PARM_DECL \
|| TREE_CODE (NODE) == TYPE_DECL \
|| TREE_CODE (NODE) == TEMPLATE_DECL))
/* Nonzero for a raw template parameter node. */
#define TEMPLATE_PARM_P(NODE) \
(TREE_CODE (NODE) == TEMPLATE_TYPE_PARM \
|| TREE_CODE (NODE) == TEMPLATE_TEMPLATE_PARM \
|| TREE_CODE (NODE) == TEMPLATE_PARM_INDEX)
/* Mark NODE as a template parameter. */
#define SET_DECL_TEMPLATE_PARM_P(NODE) \
(DECL_LANG_FLAG_0 (NODE) = 1)
/* Nonzero if NODE is a template template parameter. */
#define DECL_TEMPLATE_TEMPLATE_PARM_P(NODE) \
(TREE_CODE (NODE) == TEMPLATE_DECL && DECL_TEMPLATE_PARM_P (NODE))
/* Nonzero for a DECL that represents a function template. */
#define DECL_FUNCTION_TEMPLATE_P(NODE) \
(TREE_CODE (NODE) == TEMPLATE_DECL \
&& DECL_TEMPLATE_RESULT (NODE) != NULL_TREE \
&& TREE_CODE (DECL_TEMPLATE_RESULT (NODE)) == FUNCTION_DECL)
/* Nonzero for a DECL that represents a class template or alias
template. */
#define DECL_TYPE_TEMPLATE_P(NODE) \
(TREE_CODE (NODE) == TEMPLATE_DECL \
&& DECL_TEMPLATE_RESULT (NODE) != NULL_TREE \
&& TREE_CODE (DECL_TEMPLATE_RESULT (NODE)) == TYPE_DECL)
/* Nonzero for a DECL that represents a class template. */
#define DECL_CLASS_TEMPLATE_P(NODE) \
(DECL_TYPE_TEMPLATE_P (NODE) \
&& DECL_IMPLICIT_TYPEDEF_P (DECL_TEMPLATE_RESULT (NODE)))
/* Nonzero for a TEMPLATE_DECL that represents an alias template. */
#define DECL_ALIAS_TEMPLATE_P(NODE) \
(DECL_TYPE_TEMPLATE_P (NODE) \
&& !DECL_ARTIFICIAL (DECL_TEMPLATE_RESULT (NODE)))
/* Nonzero for a NODE which declares a type. */
#define DECL_DECLARES_TYPE_P(NODE) \
(TREE_CODE (NODE) == TYPE_DECL || DECL_TYPE_TEMPLATE_P (NODE))
/* Nonzero if NODE declares a function. */
#define DECL_DECLARES_FUNCTION_P(NODE) \
(TREE_CODE (NODE) == FUNCTION_DECL || DECL_FUNCTION_TEMPLATE_P (NODE))
/* Nonzero if NODE is the typedef implicitly generated for a type when
the type is declared. In C++, `struct S {};' is roughly
equivalent to `struct S {}; typedef struct S S;' in C.
DECL_IMPLICIT_TYPEDEF_P will hold for the typedef indicated in this
example. In C++, there is a second implicit typedef for each
class, called the injected-class-name, in the scope of `S' itself, so that
you can say `S::S'. DECL_SELF_REFERENCE_P will hold for that typedef. */
#define DECL_IMPLICIT_TYPEDEF_P(NODE) \
(TREE_CODE (NODE) == TYPE_DECL && DECL_LANG_FLAG_2 (NODE))
#define SET_DECL_IMPLICIT_TYPEDEF_P(NODE) \
(DECL_LANG_FLAG_2 (NODE) = 1)
#define DECL_SELF_REFERENCE_P(NODE) \
(TREE_CODE (NODE) == TYPE_DECL && DECL_LANG_FLAG_4 (NODE))
#define SET_DECL_SELF_REFERENCE_P(NODE) \
(DECL_LANG_FLAG_4 (NODE) = 1)
/* A `primary' template is one that has its own template header and is not
a partial specialization. A member function of a class template is a
template, but not primary. A member template is primary. Friend
templates are primary, too. */
/* Returns the primary template corresponding to these parameters. */
#define DECL_PRIMARY_TEMPLATE(NODE) \
(TREE_TYPE (DECL_INNERMOST_TEMPLATE_PARMS (NODE)))
/* Returns nonzero if NODE is a primary template. */
#define PRIMARY_TEMPLATE_P(NODE) (DECL_PRIMARY_TEMPLATE (NODE) == (NODE))
/* Nonzero iff NODE is a specialization of a template. The value
indicates the type of specializations:
1=implicit instantiation
2=partial or explicit specialization, e.g.:
template <> int min<int> (int, int),
3=explicit instantiation, e.g.:
template int min<int> (int, int);
Note that NODE will be marked as a specialization even if the
template it is instantiating is not a primary template. For
example, given:
template <typename T> struct O {
void f();
struct I {};
};
both O<int>::f and O<int>::I will be marked as instantiations.
If DECL_USE_TEMPLATE is nonzero, then DECL_TEMPLATE_INFO will also
be non-NULL. */
#define DECL_USE_TEMPLATE(NODE) (DECL_LANG_SPECIFIC (NODE)->u.base.use_template)
/* Like DECL_USE_TEMPLATE, but for class types. */
#define CLASSTYPE_USE_TEMPLATE(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->use_template)
/* True if NODE is a specialization of a primary template. */
#define CLASSTYPE_SPECIALIZATION_OF_PRIMARY_TEMPLATE_P(NODE) \
(CLASS_TYPE_P (NODE) \
&& CLASSTYPE_USE_TEMPLATE (NODE) \
&& PRIMARY_TEMPLATE_P (CLASSTYPE_TI_TEMPLATE (NODE)))
#define DECL_TEMPLATE_INSTANTIATION(NODE) (DECL_USE_TEMPLATE (NODE) & 1)
#define CLASSTYPE_TEMPLATE_INSTANTIATION(NODE) \
(CLASSTYPE_USE_TEMPLATE (NODE) & 1)
#define DECL_TEMPLATE_SPECIALIZATION(NODE) (DECL_USE_TEMPLATE (NODE) == 2)
#define SET_DECL_TEMPLATE_SPECIALIZATION(NODE) (DECL_USE_TEMPLATE (NODE) = 2)
/* Returns true for an explicit or partial specialization of a class
template. */
#define CLASSTYPE_TEMPLATE_SPECIALIZATION(NODE) \
(CLASSTYPE_USE_TEMPLATE (NODE) == 2)
#define SET_CLASSTYPE_TEMPLATE_SPECIALIZATION(NODE) \
(CLASSTYPE_USE_TEMPLATE (NODE) = 2)
#define DECL_IMPLICIT_INSTANTIATION(NODE) (DECL_USE_TEMPLATE (NODE) == 1)
#define SET_DECL_IMPLICIT_INSTANTIATION(NODE) (DECL_USE_TEMPLATE (NODE) = 1)
#define CLASSTYPE_IMPLICIT_INSTANTIATION(NODE) \
(CLASSTYPE_USE_TEMPLATE (NODE) == 1)
#define SET_CLASSTYPE_IMPLICIT_INSTANTIATION(NODE) \
(CLASSTYPE_USE_TEMPLATE (NODE) = 1)
#define DECL_EXPLICIT_INSTANTIATION(NODE) (DECL_USE_TEMPLATE (NODE) == 3)
#define SET_DECL_EXPLICIT_INSTANTIATION(NODE) (DECL_USE_TEMPLATE (NODE) = 3)
#define CLASSTYPE_EXPLICIT_INSTANTIATION(NODE) \
(CLASSTYPE_USE_TEMPLATE (NODE) == 3)
#define SET_CLASSTYPE_EXPLICIT_INSTANTIATION(NODE) \
(CLASSTYPE_USE_TEMPLATE (NODE) = 3)
/* Nonzero if DECL is a friend function which is an instantiation
from the point of view of the compiler, but not from the point of
view of the language. For example given:
template <class T> struct S { friend void f(T) {}; };
the declaration of `void f(int)' generated when S<int> is
instantiated will not be a DECL_TEMPLATE_INSTANTIATION, but will be
a DECL_FRIEND_PSEUDO_TEMPLATE_INSTANTIATION. */
#define DECL_FRIEND_PSEUDO_TEMPLATE_INSTANTIATION(DECL) \
(DECL_LANG_SPECIFIC (DECL) && DECL_TEMPLATE_INFO (DECL) \
&& !DECL_USE_TEMPLATE (DECL))
/* Nonzero if DECL is a function generated from a function 'temploid',
i.e. template, member of class template, or dependent friend. */
#define DECL_TEMPLOID_INSTANTIATION(DECL) \
(DECL_TEMPLATE_INSTANTIATION (DECL) \
|| DECL_FRIEND_PSEUDO_TEMPLATE_INSTANTIATION (DECL))
/* Nonzero if DECL is either defined implicitly by the compiler or
generated from a temploid. */
#define DECL_GENERATED_P(DECL) \
(DECL_TEMPLOID_INSTANTIATION (DECL) || DECL_DEFAULTED_FN (DECL))
/* Nonzero iff we are currently processing a declaration for an
entity with its own template parameter list, and which is not a
full specialization. */
#define PROCESSING_REAL_TEMPLATE_DECL_P() \
(!processing_template_parmlist \
&& processing_template_decl > template_class_depth (current_scope ()))
/* Nonzero if this VAR_DECL or FUNCTION_DECL has already been
instantiated, i.e. its definition has been generated from the
pattern given in the template. */
#define DECL_TEMPLATE_INSTANTIATED(NODE) \
DECL_LANG_FLAG_1 (VAR_OR_FUNCTION_DECL_CHECK (NODE))
/* We know what we're doing with this decl now. */
#define DECL_INTERFACE_KNOWN(NODE) DECL_LANG_FLAG_5 (NODE)
/* DECL_EXTERNAL must be set on a decl until the decl is actually emitted,
so that assemble_external will work properly. So we have this flag to
tell us whether the decl is really not external.
This flag does not indicate whether or not the decl is defined in the
current translation unit; it indicates whether or not we should emit the
decl at the end of compilation if it is defined and needed. */
#define DECL_NOT_REALLY_EXTERN(NODE) \
(DECL_LANG_SPECIFIC (NODE)->u.base.not_really_extern)
#define DECL_REALLY_EXTERN(NODE) \
(DECL_EXTERNAL (NODE) \
&& (!DECL_LANG_SPECIFIC (NODE) || !DECL_NOT_REALLY_EXTERN (NODE)))
/* A thunk is a stub function.
A thunk is an alternate entry point for an ordinary FUNCTION_DECL.
The address of the ordinary FUNCTION_DECL is given by the
DECL_INITIAL, which is always an ADDR_EXPR whose operand is a
FUNCTION_DECL. The job of the thunk is to either adjust the this
pointer before transferring control to the FUNCTION_DECL, or call
FUNCTION_DECL and then adjust the result value. Note, the result
pointer adjusting thunk must perform a call to the thunked
function, (or be implemented via passing some invisible parameter
to the thunked function, which is modified to perform the
adjustment just before returning).
A thunk may perform either, or both, of the following operations:
o Adjust the this or result pointer by a constant offset.
o Adjust the this or result pointer by looking up a vcall or vbase offset
in the vtable.
A this pointer adjusting thunk converts from a base to a derived
class, and hence adds the offsets. A result pointer adjusting thunk
converts from a derived class to a base, and hence subtracts the
offsets. If both operations are performed, then the constant
adjustment is performed first for this pointer adjustment and last
for the result pointer adjustment.
The constant adjustment is given by THUNK_FIXED_OFFSET. If the
vcall or vbase offset is required, THUNK_VIRTUAL_OFFSET is
used. For this pointer adjusting thunks, it is the vcall offset
into the vtable. For result pointer adjusting thunks it is the
binfo of the virtual base to convert to. Use that binfo's vbase
offset.
It is possible to have equivalent covariant thunks. These are
distinct virtual covariant thunks whose vbase offsets happen to
have the same value. THUNK_ALIAS is used to pick one as the
canonical thunk, which will get all the this pointer adjusting
thunks attached to it. */
/* An integer indicating how many bytes should be subtracted from the
this or result pointer when this function is called. */
#define THUNK_FIXED_OFFSET(DECL) \
(DECL_LANG_SPECIFIC (THUNK_FUNCTION_CHECK (DECL))->u.fn.u5.fixed_offset)
/* A tree indicating how to perform the virtual adjustment. For a this
adjusting thunk it is the number of bytes to be added to the vtable
to find the vcall offset. For a result adjusting thunk, it is the
binfo of the relevant virtual base. If NULL, then there is no
virtual adjust. (The vptr is always located at offset zero from
the this or result pointer.) (If the covariant type is within the
class hierarchy being laid out, the vbase index is not yet known
at the point we need to create the thunks, hence the need to use
binfos.) */
#define THUNK_VIRTUAL_OFFSET(DECL) \
(LANG_DECL_U2_CHECK (FUNCTION_DECL_CHECK (DECL), 0)->access)
/* A thunk which is equivalent to another thunk. */
#define THUNK_ALIAS(DECL) \
(DECL_LANG_SPECIFIC (FUNCTION_DECL_CHECK (DECL))->u.min.template_info)
/* For thunk NODE, this is the FUNCTION_DECL thunked to. It is
possible for the target to be a thunk too. */
#define THUNK_TARGET(NODE) \
(LANG_DECL_FN_CHECK (NODE)->befriending_classes)
/* True for a SCOPE_REF iff the "template" keyword was used to
indicate that the qualified name denotes a template. */
#define QUALIFIED_NAME_IS_TEMPLATE(NODE) \
(TREE_LANG_FLAG_1 (SCOPE_REF_CHECK (NODE)))
/* True for an OMP_ATOMIC that has dependent parameters. These are stored
as an expr in operand 1, and integer_zero_node in operand 0. */
#define OMP_ATOMIC_DEPENDENT_P(NODE) \
(TREE_CODE (TREE_OPERAND (OMP_ATOMIC_CHECK (NODE), 0)) == INTEGER_CST)
/* Used while gimplifying continue statements bound to OMP_FOR nodes. */
#define OMP_FOR_GIMPLIFYING_P(NODE) \
(TREE_LANG_FLAG_0 (OMP_LOOP_CHECK (NODE)))
/* A language-specific token attached to the OpenMP data clauses to
hold code (or code fragments) related to ctors, dtors, and op=.
See semantics.c for details. */
#define CP_OMP_CLAUSE_INFO(NODE) \
TREE_TYPE (OMP_CLAUSE_RANGE_CHECK (NODE, OMP_CLAUSE_PRIVATE, \
OMP_CLAUSE_LINEAR))
/* Nonzero if this transaction expression's body contains statements. */
#define TRANSACTION_EXPR_IS_STMT(NODE) \
TREE_LANG_FLAG_0 (TRANSACTION_EXPR_CHECK (NODE))
/* These macros provide convenient access to the various _STMT nodes
created when parsing template declarations. */
#define TRY_STMTS(NODE) TREE_OPERAND (TRY_BLOCK_CHECK (NODE), 0)
#define TRY_HANDLERS(NODE) TREE_OPERAND (TRY_BLOCK_CHECK (NODE), 1)
#define EH_SPEC_STMTS(NODE) TREE_OPERAND (EH_SPEC_BLOCK_CHECK (NODE), 0)
#define EH_SPEC_RAISES(NODE) TREE_OPERAND (EH_SPEC_BLOCK_CHECK (NODE), 1)
#define USING_STMT_NAMESPACE(NODE) TREE_OPERAND (USING_STMT_CHECK (NODE), 0)
/* Nonzero if this try block is a function try block. */
#define FN_TRY_BLOCK_P(NODE) TREE_LANG_FLAG_3 (TRY_BLOCK_CHECK (NODE))
#define HANDLER_PARMS(NODE) TREE_OPERAND (HANDLER_CHECK (NODE), 0)
#define HANDLER_BODY(NODE) TREE_OPERAND (HANDLER_CHECK (NODE), 1)
#define HANDLER_TYPE(NODE) TREE_TYPE (HANDLER_CHECK (NODE))
/* CLEANUP_STMT accessors. The statement(s) covered, the cleanup to run
and the VAR_DECL for which this cleanup exists. */
#define CLEANUP_BODY(NODE) TREE_OPERAND (CLEANUP_STMT_CHECK (NODE), 0)
#define CLEANUP_EXPR(NODE) TREE_OPERAND (CLEANUP_STMT_CHECK (NODE), 1)
#define CLEANUP_DECL(NODE) TREE_OPERAND (CLEANUP_STMT_CHECK (NODE), 2)
/* IF_STMT accessors. These give access to the condition of the if
statement, the then block of the if statement, and the else block
of the if statement if it exists. */
#define IF_COND(NODE) TREE_OPERAND (IF_STMT_CHECK (NODE), 0)
#define THEN_CLAUSE(NODE) TREE_OPERAND (IF_STMT_CHECK (NODE), 1)
#define ELSE_CLAUSE(NODE) TREE_OPERAND (IF_STMT_CHECK (NODE), 2)
#define IF_SCOPE(NODE) TREE_OPERAND (IF_STMT_CHECK (NODE), 3)
#define IF_STMT_CONSTEXPR_P(NODE) TREE_LANG_FLAG_0 (IF_STMT_CHECK (NODE))
/* Like PACK_EXPANSION_EXTRA_ARGS, for constexpr if. IF_SCOPE is used while
building an IF_STMT; IF_STMT_EXTRA_ARGS is used after it is complete. */
#define IF_STMT_EXTRA_ARGS(NODE) IF_SCOPE (NODE)
/* WHILE_STMT accessors. These give access to the condition of the
while statement and the body of the while statement, respectively. */
#define WHILE_COND(NODE) TREE_OPERAND (WHILE_STMT_CHECK (NODE), 0)
#define WHILE_BODY(NODE) TREE_OPERAND (WHILE_STMT_CHECK (NODE), 1)
/* DO_STMT accessors. These give access to the condition of the do
statement and the body of the do statement, respectively. */
#define DO_COND(NODE) TREE_OPERAND (DO_STMT_CHECK (NODE), 0)
#define DO_BODY(NODE) TREE_OPERAND (DO_STMT_CHECK (NODE), 1)
/* FOR_STMT accessors. These give access to the init statement,
condition, update expression, and body of the for statement,
respectively. */
#define FOR_INIT_STMT(NODE) TREE_OPERAND (FOR_STMT_CHECK (NODE), 0)
#define FOR_COND(NODE) TREE_OPERAND (FOR_STMT_CHECK (NODE), 1)
#define FOR_EXPR(NODE) TREE_OPERAND (FOR_STMT_CHECK (NODE), 2)
#define FOR_BODY(NODE) TREE_OPERAND (FOR_STMT_CHECK (NODE), 3)
#define FOR_SCOPE(NODE) TREE_OPERAND (FOR_STMT_CHECK (NODE), 4)
/* RANGE_FOR_STMT accessors. These give access to the declarator,
expression, body, and scope of the statement, respectively. */
#define RANGE_FOR_DECL(NODE) TREE_OPERAND (RANGE_FOR_STMT_CHECK (NODE), 0)
#define RANGE_FOR_EXPR(NODE) TREE_OPERAND (RANGE_FOR_STMT_CHECK (NODE), 1)
#define RANGE_FOR_BODY(NODE) TREE_OPERAND (RANGE_FOR_STMT_CHECK (NODE), 2)
#define RANGE_FOR_SCOPE(NODE) TREE_OPERAND (RANGE_FOR_STMT_CHECK (NODE), 3)
#define RANGE_FOR_UNROLL(NODE) TREE_OPERAND (RANGE_FOR_STMT_CHECK (NODE), 4)
#define RANGE_FOR_IVDEP(NODE) TREE_LANG_FLAG_6 (RANGE_FOR_STMT_CHECK (NODE))
#define SWITCH_STMT_COND(NODE) TREE_OPERAND (SWITCH_STMT_CHECK (NODE), 0)
#define SWITCH_STMT_BODY(NODE) TREE_OPERAND (SWITCH_STMT_CHECK (NODE), 1)
#define SWITCH_STMT_TYPE(NODE) TREE_OPERAND (SWITCH_STMT_CHECK (NODE), 2)
#define SWITCH_STMT_SCOPE(NODE) TREE_OPERAND (SWITCH_STMT_CHECK (NODE), 3)
/* True if there are case labels for all possible values of switch cond, either
because there is a default: case label or because the case label ranges cover
all values. */
#define SWITCH_STMT_ALL_CASES_P(NODE) \
TREE_LANG_FLAG_0 (SWITCH_STMT_CHECK (NODE))
/* True if the body of a switch stmt contains no BREAK_STMTs. */
#define SWITCH_STMT_NO_BREAK_P(NODE) \
TREE_LANG_FLAG_2 (SWITCH_STMT_CHECK (NODE))
/* STMT_EXPR accessor. */
#define STMT_EXPR_STMT(NODE) TREE_OPERAND (STMT_EXPR_CHECK (NODE), 0)
/* EXPR_STMT accessor. This gives the expression associated with an
expression statement. */
#define EXPR_STMT_EXPR(NODE) TREE_OPERAND (EXPR_STMT_CHECK (NODE), 0)
/* True if this TARGET_EXPR was created by build_cplus_new, and so we can
discard it if it isn't useful. */
#define TARGET_EXPR_IMPLICIT_P(NODE) \
TREE_LANG_FLAG_0 (TARGET_EXPR_CHECK (NODE))
/* True if this TARGET_EXPR is the result of list-initialization of a
temporary. */
#define TARGET_EXPR_LIST_INIT_P(NODE) \
TREE_LANG_FLAG_1 (TARGET_EXPR_CHECK (NODE))
/* True if this TARGET_EXPR expresses direct-initialization of an object
to be named later. */
#define TARGET_EXPR_DIRECT_INIT_P(NODE) \
TREE_LANG_FLAG_2 (TARGET_EXPR_CHECK (NODE))
/* True if NODE is a TARGET_EXPR that just expresses a copy of its INITIAL; if
the initializer has void type, it's doing something more complicated. */
#define SIMPLE_TARGET_EXPR_P(NODE) \
(TREE_CODE (NODE) == TARGET_EXPR \
&& !VOID_TYPE_P (TREE_TYPE (TARGET_EXPR_INITIAL (NODE))))
/* True if EXPR expresses direct-initialization of a TYPE. */
#define DIRECT_INIT_EXPR_P(TYPE,EXPR) \
(TREE_CODE (EXPR) == TARGET_EXPR && TREE_LANG_FLAG_2 (EXPR) \
&& same_type_ignoring_top_level_qualifiers_p (TYPE, TREE_TYPE (EXPR)))
/* True if this CONVERT_EXPR is for a conversion to virtual base in
an NSDMI, and should be re-evaluated when used in a constructor. */
#define CONVERT_EXPR_VBASE_PATH(NODE) \
TREE_LANG_FLAG_0 (CONVERT_EXPR_CHECK (NODE))
/* True if SIZEOF_EXPR argument is type. */
#define SIZEOF_EXPR_TYPE_P(NODE) \
TREE_LANG_FLAG_0 (SIZEOF_EXPR_CHECK (NODE))
/* True if the ALIGNOF_EXPR was spelled "alignof". */
#define ALIGNOF_EXPR_STD_P(NODE) \
TREE_LANG_FLAG_0 (ALIGNOF_EXPR_CHECK (NODE))
/* An enumeration of the kind of tags that C++ accepts. */
enum tag_types {
none_type = 0, /* Not a tag type. */
record_type, /* "struct" types. */
class_type, /* "class" types. */
union_type, /* "union" types. */
enum_type, /* "enum" types. */
typename_type, /* "typename" types. */
scope_type /* namespace or tagged type name followed by :: */
};
/* The various kinds of lvalues we distinguish. */
enum cp_lvalue_kind_flags {
clk_none = 0, /* Things that are not an lvalue. */
clk_ordinary = 1, /* An ordinary lvalue. */
clk_rvalueref = 2,/* An xvalue (rvalue formed using an rvalue reference) */
clk_class = 4, /* A prvalue of class or array type. */
clk_bitfield = 8, /* An lvalue for a bit-field. */
clk_packed = 16 /* An lvalue for a packed field. */
};
/* This type is used for parameters and variables which hold
combinations of the flags in enum cp_lvalue_kind_flags. */
typedef int cp_lvalue_kind;
/* Various kinds of template specialization, instantiation, etc. */
enum tmpl_spec_kind {
tsk_none, /* Not a template at all. */
tsk_invalid_member_spec, /* An explicit member template
specialization, but the enclosing
classes have not all been explicitly
specialized. */
tsk_invalid_expl_inst, /* An explicit instantiation containing
template parameter lists. */
tsk_excessive_parms, /* A template declaration with too many
template parameter lists. */
tsk_insufficient_parms, /* A template declaration with too few
parameter lists. */
tsk_template, /* A template declaration. */
tsk_expl_spec, /* An explicit specialization. */
tsk_expl_inst /* An explicit instantiation. */
};
/* The various kinds of access. BINFO_ACCESS depends on these being
two bit quantities. The numerical values are important; they are
used to initialize RTTI data structures, so changing them changes
the ABI. */
enum access_kind {
ak_none = 0, /* Inaccessible. */
ak_public = 1, /* Accessible, as a `public' thing. */
ak_protected = 2, /* Accessible, as a `protected' thing. */
ak_private = 3 /* Accessible, as a `private' thing. */
};
/* The various kinds of special functions. If you add to this list,
you should update special_function_p as well. */
enum special_function_kind {
sfk_none = 0, /* Not a special function. This enumeral
must have value zero; see
special_function_p. */
sfk_constructor, /* A constructor. */
sfk_copy_constructor, /* A copy constructor. */
sfk_move_constructor, /* A move constructor. */
sfk_copy_assignment, /* A copy assignment operator. */
sfk_move_assignment, /* A move assignment operator. */
sfk_destructor, /* A destructor. */
sfk_complete_destructor, /* A destructor for complete objects. */
sfk_base_destructor, /* A destructor for base subobjects. */
sfk_deleting_destructor, /* A destructor for complete objects that
deletes the object after it has been
destroyed. */
sfk_conversion, /* A conversion operator. */
sfk_deduction_guide, /* A class template deduction guide. */
sfk_inheriting_constructor /* An inheriting constructor */
};
/* The various kinds of linkage. From [basic.link],
A name is said to have linkage when it might denote the same
object, reference, function, type, template, namespace or value
as a name introduced in another scope:
-- When a name has external linkage, the entity it denotes can
be referred to from scopes of other translation units or from
other scopes of the same translation unit.
-- When a name has internal linkage, the entity it denotes can
be referred to by names from other scopes in the same
translation unit.
-- When a name has no linkage, the entity it denotes cannot be
referred to by names from other scopes. */
enum linkage_kind {
lk_none, /* No linkage. */
lk_internal, /* Internal linkage. */
lk_external /* External linkage. */
};
enum duration_kind {
dk_static,
dk_thread,
dk_auto,
dk_dynamic
};
/* Bitmask flags to control type substitution. */
enum tsubst_flags {
tf_none = 0, /* nothing special */
tf_error = 1 << 0, /* give error messages */
tf_warning = 1 << 1, /* give warnings too */
tf_ignore_bad_quals = 1 << 2, /* ignore bad cvr qualifiers */
tf_keep_type_decl = 1 << 3, /* retain typedef type decls
(make_typename_type use) */
tf_ptrmem_ok = 1 << 4, /* pointers to member ok (internal
instantiate_type use) */
tf_user = 1 << 5, /* found template must be a user template
(lookup_template_class use) */
tf_conv = 1 << 6, /* We are determining what kind of
conversion might be permissible,
not actually performing the
conversion. */
tf_decltype = 1 << 7, /* We are the operand of decltype.
Used to implement the special rules
for calls in decltype (5.2.2/11). */
tf_partial = 1 << 8, /* Doing initial explicit argument
substitution in fn_type_unification. */
tf_fndecl_type = 1 << 9, /* Substituting the type of a function
declaration. */
tf_no_cleanup = 1 << 10, /* Do not build a cleanup
(build_target_expr and friends) */
/* Convenient substitution flags combinations. */
tf_warning_or_error = tf_warning | tf_error
};
/* This type is used for parameters and variables which hold
combinations of the flags in enum tsubst_flags. */
typedef int tsubst_flags_t;
/* The kind of checking we can do looking in a class hierarchy. */
enum base_access_flags {
ba_any = 0, /* Do not check access, allow an ambiguous base,
prefer a non-virtual base */
ba_unique = 1 << 0, /* Must be a unique base. */
ba_check_bit = 1 << 1, /* Check access. */
ba_check = ba_unique | ba_check_bit,
ba_ignore_scope = 1 << 2 /* Ignore access allowed by local scope. */
};
/* This type is used for parameters and variables which hold
combinations of the flags in enum base_access_flags. */
typedef int base_access;
/* The various kinds of access check during parsing. */
enum deferring_kind {
dk_no_deferred = 0, /* Check access immediately */
dk_deferred = 1, /* Deferred check */
dk_no_check = 2 /* No access check */
};
/* The kind of base we can find, looking in a class hierarchy.
Values <0 indicate we failed. */
enum base_kind {
bk_inaccessible = -3, /* The base is inaccessible */
bk_ambig = -2, /* The base is ambiguous */
bk_not_base = -1, /* It is not a base */
bk_same_type = 0, /* It is the same type */
bk_proper_base = 1, /* It is a proper base */
bk_via_virtual = 2 /* It is a proper base, but via a virtual
path. This might not be the canonical
binfo. */
};
/* Node for "pointer to (virtual) function".
This may be distinct from ptr_type_node so gdb can distinguish them. */
#define vfunc_ptr_type_node vtable_entry_type
/* For building calls to `delete'. */
extern GTY(()) tree integer_two_node;
/* The number of function bodies which we are currently processing.
(Zero if we are at namespace scope, one inside the body of a
function, two inside the body of a function in a local class, etc.) */
extern int function_depth;
/* Nonzero if we are inside eq_specializations, which affects comparison of
PARM_DECLs in cp_tree_equal. */
extern int comparing_specializations;
/* In parser.c. */
/* Nonzero if we are parsing an unevaluated operand: an operand to
sizeof, typeof, or alignof. This is a count since operands to
sizeof can be nested. */
extern int cp_unevaluated_operand;
/* RAII class used to inhibit the evaluation of operands during parsing
and template instantiation. Evaluation warnings are also inhibited. */
struct cp_unevaluated
{
cp_unevaluated ();
~cp_unevaluated ();
};
/* in pt.c */
/* These values are used for the `STRICT' parameter to type_unification and
fn_type_unification. Their meanings are described with the
documentation for fn_type_unification. */
enum unification_kind_t {
DEDUCE_CALL,
DEDUCE_CONV,
DEDUCE_EXACT
};
// An RAII class used to create a new pointer map for local
// specializations. When the stack goes out of scope, the
// previous pointer map is restored.
enum lss_policy { lss_blank, lss_copy };
struct local_specialization_stack
{
local_specialization_stack (lss_policy = lss_blank);
~local_specialization_stack ();
hash_map<tree, tree> *saved;
};
/* in class.c */
extern int current_class_depth;
/* An array of all local classes present in this translation unit, in
declaration order. */
extern GTY(()) vec<tree, va_gc> *local_classes;
/* in decl.c */
/* An array of static vars & fns. */
extern GTY(()) vec<tree, va_gc> *static_decls;
/* An array of vtable-needing types that have no key function, or have
an emitted key function. */
extern GTY(()) vec<tree, va_gc> *keyed_classes;
/* Here's where we control how name mangling takes place. */
/* Cannot use '$' up front, because this confuses gdb
(names beginning with '$' are gdb-local identifiers).
Note that all forms in which the '$' is significant are long enough
for direct indexing (meaning that if we know there is a '$'
at a particular location, we can index into the string at
any other location that provides distinguishing characters). */
/* Define NO_DOT_IN_LABEL in your favorite tm file if your assembler
doesn't allow '.' in symbol names. */
#ifndef NO_DOT_IN_LABEL
#define JOINER '.'
#define AUTO_TEMP_NAME "_.tmp_"
#define VFIELD_BASE ".vf"
#define VFIELD_NAME "_vptr."
#define VFIELD_NAME_FORMAT "_vptr.%s"
#else /* NO_DOT_IN_LABEL */
#ifndef NO_DOLLAR_IN_LABEL
#define JOINER '$'
#define AUTO_TEMP_NAME "_$tmp_"
#define VFIELD_BASE "$vf"
#define VFIELD_NAME "_vptr$"
#define VFIELD_NAME_FORMAT "_vptr$%s"
#else /* NO_DOLLAR_IN_LABEL */
#define AUTO_TEMP_NAME "__tmp_"
#define TEMP_NAME_P(ID_NODE) \
(!strncmp (IDENTIFIER_POINTER (ID_NODE), AUTO_TEMP_NAME, \
sizeof (AUTO_TEMP_NAME) - 1))
#define VTABLE_NAME "__vt_"
#define VTABLE_NAME_P(ID_NODE) \
(!strncmp (IDENTIFIER_POINTER (ID_NODE), VTABLE_NAME, \
sizeof (VTABLE_NAME) - 1))
#define VFIELD_BASE "__vfb"
#define VFIELD_NAME "__vptr_"
#define VFIELD_NAME_P(ID_NODE) \
(!strncmp (IDENTIFIER_POINTER (ID_NODE), VFIELD_NAME, \
sizeof (VFIELD_NAME) - 1))
#define VFIELD_NAME_FORMAT "__vptr_%s"
#endif /* NO_DOLLAR_IN_LABEL */
#endif /* NO_DOT_IN_LABEL */
#define LAMBDANAME_PREFIX "__lambda"
#define LAMBDANAME_FORMAT LAMBDANAME_PREFIX "%d"
#define UDLIT_OP_ANSI_PREFIX "operator\"\""
#define UDLIT_OP_ANSI_FORMAT UDLIT_OP_ANSI_PREFIX "%s"
#define UDLIT_OP_MANGLED_PREFIX "li"
#define UDLIT_OP_MANGLED_FORMAT UDLIT_OP_MANGLED_PREFIX "%s"
#define UDLIT_OPER_P(ID_NODE) \
(!strncmp (IDENTIFIER_POINTER (ID_NODE), \
UDLIT_OP_ANSI_PREFIX, \
sizeof (UDLIT_OP_ANSI_PREFIX) - 1))
#define UDLIT_OP_SUFFIX(ID_NODE) \
(IDENTIFIER_POINTER (ID_NODE) + sizeof (UDLIT_OP_ANSI_PREFIX) - 1)
#if !defined(NO_DOLLAR_IN_LABEL) || !defined(NO_DOT_IN_LABEL)
#define VTABLE_NAME_P(ID_NODE) (IDENTIFIER_POINTER (ID_NODE)[1] == 'v' \
&& IDENTIFIER_POINTER (ID_NODE)[2] == 't' \
&& IDENTIFIER_POINTER (ID_NODE)[3] == JOINER)
#define TEMP_NAME_P(ID_NODE) \
(!strncmp (IDENTIFIER_POINTER (ID_NODE), AUTO_TEMP_NAME, sizeof (AUTO_TEMP_NAME)-1))
#define VFIELD_NAME_P(ID_NODE) \
(!strncmp (IDENTIFIER_POINTER (ID_NODE), VFIELD_NAME, sizeof(VFIELD_NAME)-1))
#endif /* !defined(NO_DOLLAR_IN_LABEL) || !defined(NO_DOT_IN_LABEL) */
/* Nonzero if we're done parsing and into end-of-file activities.
Two if we're done with front-end processing. */
extern int at_eof;
/* True if note_mangling_alias should enqueue mangling aliases for
later generation, rather than emitting them right away. */
extern bool defer_mangling_aliases;
/* True if noexcept is part of the type (i.e. in C++17). */
extern bool flag_noexcept_type;
/* A list of namespace-scope objects which have constructors or
destructors which reside in the global scope. The decl is stored
in the TREE_VALUE slot and the initializer is stored in the
TREE_PURPOSE slot. */
extern GTY(()) tree static_aggregates;
/* Likewise, for thread local storage. */
extern GTY(()) tree tls_aggregates;
enum overload_flags { NO_SPECIAL = 0, DTOR_FLAG, TYPENAME_FLAG };
/* These are uses as bits in flags passed to various functions to
control their behavior. Despite the LOOKUP_ prefix, many of these
do not control name lookup. ??? Functions using these flags should
probably be modified to accept explicit boolean flags for the
behaviors relevant to them. */
/* Check for access violations. */
#define LOOKUP_PROTECT (1 << 0)
#define LOOKUP_NORMAL (LOOKUP_PROTECT)
/* Even if the function found by lookup is a virtual function, it
should be called directly. */
#define LOOKUP_NONVIRTUAL (1 << 1)
/* Non-converting (i.e., "explicit") constructors are not tried. This flag
indicates that we are not performing direct-initialization. */
#define LOOKUP_ONLYCONVERTING (1 << 2)
#define LOOKUP_IMPLICIT (LOOKUP_NORMAL | LOOKUP_ONLYCONVERTING)
/* If a temporary is created, it should be created so that it lives
as long as the current variable bindings; otherwise it only lives
until the end of the complete-expression. It also forces
direct-initialization in cases where other parts of the compiler
have already generated a temporary, such as reference
initialization and the catch parameter. */
#define DIRECT_BIND (1 << 3)
/* We're performing a user-defined conversion, so more user-defined
conversions are not permitted (only built-in conversions). */
#define LOOKUP_NO_CONVERSION (1 << 4)
/* The user has explicitly called a destructor. (Therefore, we do
not need to check that the object is non-NULL before calling the
destructor.) */
#define LOOKUP_DESTRUCTOR (1 << 5)
/* Do not permit references to bind to temporaries. */
#define LOOKUP_NO_TEMP_BIND (1 << 6)
/* Do not accept objects, and possibly namespaces. */
#define LOOKUP_PREFER_TYPES (1 << 7)
/* Do not accept objects, and possibly types. */
#define LOOKUP_PREFER_NAMESPACES (1 << 8)
/* Accept types or namespaces. */
#define LOOKUP_PREFER_BOTH (LOOKUP_PREFER_TYPES | LOOKUP_PREFER_NAMESPACES)
/* Return friend declarations and un-declared builtin functions.
(Normally, these entities are registered in the symbol table, but
not found by lookup.) */
#define LOOKUP_HIDDEN (LOOKUP_PREFER_NAMESPACES << 1)
/* We're trying to treat an lvalue as an rvalue. */
#define LOOKUP_PREFER_RVALUE (LOOKUP_HIDDEN << 1)
/* We're inside an init-list, so narrowing conversions are ill-formed. */
#define LOOKUP_NO_NARROWING (LOOKUP_PREFER_RVALUE << 1)
/* We're looking up a constructor for list-initialization. */
#define LOOKUP_LIST_INIT_CTOR (LOOKUP_NO_NARROWING << 1)
/* This is the first parameter of a copy constructor. */
#define LOOKUP_COPY_PARM (LOOKUP_LIST_INIT_CTOR << 1)
/* We only want to consider list constructors. */
#define LOOKUP_LIST_ONLY (LOOKUP_COPY_PARM << 1)
/* Return after determining which function to call and checking access.
Used by sythesized_method_walk to determine which functions will
be called to initialize subobjects, in order to determine exception
specification and possible implicit delete.
This is kind of a hack, but exiting early avoids problems with trying
to perform argument conversions when the class isn't complete yet. */
#define LOOKUP_SPECULATIVE (LOOKUP_LIST_ONLY << 1)
/* Used by calls from defaulted functions to limit the overload set to avoid
cycles trying to declare them (core issue 1092). */
#define LOOKUP_DEFAULTED (LOOKUP_SPECULATIVE << 1)
/* Used in calls to store_init_value to suppress its usual call to
digest_init. */
#define LOOKUP_ALREADY_DIGESTED (LOOKUP_DEFAULTED << 1)
/* An instantiation with explicit template arguments. */
#define LOOKUP_EXPLICIT_TMPL_ARGS (LOOKUP_ALREADY_DIGESTED << 1)
/* Like LOOKUP_NO_TEMP_BIND, but also prevent binding to xvalues. */
#define LOOKUP_NO_RVAL_BIND (LOOKUP_EXPLICIT_TMPL_ARGS << 1)
/* Used by case_conversion to disregard non-integral conversions. */
#define LOOKUP_NO_NON_INTEGRAL (LOOKUP_NO_RVAL_BIND << 1)
/* Used for delegating constructors in order to diagnose self-delegation. */
#define LOOKUP_DELEGATING_CONS (LOOKUP_NO_NON_INTEGRAL << 1)
#define LOOKUP_NAMESPACES_ONLY(F) \
(((F) & LOOKUP_PREFER_NAMESPACES) && !((F) & LOOKUP_PREFER_TYPES))
#define LOOKUP_TYPES_ONLY(F) \
(!((F) & LOOKUP_PREFER_NAMESPACES) && ((F) & LOOKUP_PREFER_TYPES))
#define LOOKUP_QUALIFIERS_ONLY(F) ((F) & LOOKUP_PREFER_BOTH)
/* These flags are used by the conversion code.
CONV_IMPLICIT : Perform implicit conversions (standard and user-defined).
CONV_STATIC : Perform the explicit conversions for static_cast.
CONV_CONST : Perform the explicit conversions for const_cast.
CONV_REINTERPRET: Perform the explicit conversions for reinterpret_cast.
CONV_PRIVATE : Perform upcasts to private bases.
CONV_FORCE_TEMP : Require a new temporary when converting to the same
aggregate type. */
#define CONV_IMPLICIT 1
#define CONV_STATIC 2
#define CONV_CONST 4
#define CONV_REINTERPRET 8
#define CONV_PRIVATE 16
/* #define CONV_NONCONVERTING 32 */
#define CONV_FORCE_TEMP 64
#define CONV_FOLD 128
#define CONV_OLD_CONVERT (CONV_IMPLICIT | CONV_STATIC | CONV_CONST \
| CONV_REINTERPRET)
#define CONV_C_CAST (CONV_IMPLICIT | CONV_STATIC | CONV_CONST \
| CONV_REINTERPRET | CONV_PRIVATE | CONV_FORCE_TEMP)
#define CONV_BACKEND_CONVERT (CONV_OLD_CONVERT | CONV_FOLD)
/* Used by build_expr_type_conversion to indicate which types are
acceptable as arguments to the expression under consideration. */
#define WANT_INT 1 /* integer types, including bool */
#define WANT_FLOAT 2 /* floating point types */
#define WANT_ENUM 4 /* enumerated types */
#define WANT_POINTER 8 /* pointer types */
#define WANT_NULL 16 /* null pointer constant */
#define WANT_VECTOR_OR_COMPLEX 32 /* vector or complex types */
#define WANT_ARITH (WANT_INT | WANT_FLOAT | WANT_VECTOR_OR_COMPLEX)
/* Used with comptypes, and related functions, to guide type
comparison. */
#define COMPARE_STRICT 0 /* Just check if the types are the
same. */
#define COMPARE_BASE 1 /* Check to see if the second type is
derived from the first. */
#define COMPARE_DERIVED 2 /* Like COMPARE_BASE, but in
reverse. */
#define COMPARE_REDECLARATION 4 /* The comparison is being done when
another declaration of an existing
entity is seen. */
#define COMPARE_STRUCTURAL 8 /* The comparison is intended to be
structural. The actual comparison
will be identical to
COMPARE_STRICT. */
/* Used with start function. */
#define SF_DEFAULT 0 /* No flags. */
#define SF_PRE_PARSED 1 /* The function declaration has
already been parsed. */
#define SF_INCLASS_INLINE 2 /* The function is an inline, defined
in the class body. */
/* Used with start_decl's initialized parameter. */
#define SD_UNINITIALIZED 0
#define SD_INITIALIZED 1
#define SD_DEFAULTED 2
#define SD_DELETED 3
/* Returns nonzero iff TYPE1 and TYPE2 are the same type, or if TYPE2
is derived from TYPE1, or if TYPE2 is a pointer (reference) to a
class derived from the type pointed to (referred to) by TYPE1. */
#define same_or_base_type_p(TYPE1, TYPE2) \
comptypes ((TYPE1), (TYPE2), COMPARE_BASE)
/* These macros are used to access a TEMPLATE_PARM_INDEX. */
#define TEMPLATE_PARM_INDEX_CAST(NODE) \
((template_parm_index*)TEMPLATE_PARM_INDEX_CHECK (NODE))
#define TEMPLATE_PARM_IDX(NODE) (TEMPLATE_PARM_INDEX_CAST (NODE)->index)
#define TEMPLATE_PARM_LEVEL(NODE) (TEMPLATE_PARM_INDEX_CAST (NODE)->level)
#define TEMPLATE_PARM_DESCENDANTS(NODE) (TREE_CHAIN (NODE))
#define TEMPLATE_PARM_ORIG_LEVEL(NODE) (TEMPLATE_PARM_INDEX_CAST (NODE)->orig_level)
#define TEMPLATE_PARM_DECL(NODE) (TEMPLATE_PARM_INDEX_CAST (NODE)->decl)
#define TEMPLATE_PARM_PARAMETER_PACK(NODE) \
(TREE_LANG_FLAG_0 (TEMPLATE_PARM_INDEX_CHECK (NODE)))
/* These macros are for accessing the fields of TEMPLATE_TYPE_PARM,
TEMPLATE_TEMPLATE_PARM and BOUND_TEMPLATE_TEMPLATE_PARM nodes. */
#define TEMPLATE_TYPE_PARM_INDEX(NODE) \
(TYPE_VALUES_RAW (TREE_CHECK3 ((NODE), TEMPLATE_TYPE_PARM, \
TEMPLATE_TEMPLATE_PARM, \
BOUND_TEMPLATE_TEMPLATE_PARM)))
#define TEMPLATE_TYPE_IDX(NODE) \
(TEMPLATE_PARM_IDX (TEMPLATE_TYPE_PARM_INDEX (NODE)))
#define TEMPLATE_TYPE_LEVEL(NODE) \
(TEMPLATE_PARM_LEVEL (TEMPLATE_TYPE_PARM_INDEX (NODE)))
#define TEMPLATE_TYPE_ORIG_LEVEL(NODE) \
(TEMPLATE_PARM_ORIG_LEVEL (TEMPLATE_TYPE_PARM_INDEX (NODE)))
#define TEMPLATE_TYPE_DECL(NODE) \
(TEMPLATE_PARM_DECL (TEMPLATE_TYPE_PARM_INDEX (NODE)))
#define TEMPLATE_TYPE_PARAMETER_PACK(NODE) \
(TEMPLATE_PARM_PARAMETER_PACK (TEMPLATE_TYPE_PARM_INDEX (NODE)))
/* For a C++17 class deduction placeholder, the template it represents. */
#define CLASS_PLACEHOLDER_TEMPLATE(NODE) \
(DECL_INITIAL (TYPE_NAME (TEMPLATE_TYPE_PARM_CHECK (NODE))))
/* Contexts in which auto deduction occurs. These flags are
used to control diagnostics in do_auto_deduction. */
enum auto_deduction_context
{
adc_unspecified, /* Not given */
adc_variable_type, /* Variable initializer deduction */
adc_return_type, /* Return type deduction */
adc_unify, /* Template argument deduction */
adc_requirement, /* Argument deduction constraint */
adc_decomp_type /* Decomposition declaration initializer deduction */
};
/* True if this type-parameter belongs to a class template, used by C++17
class template argument deduction. */
#define TEMPLATE_TYPE_PARM_FOR_CLASS(NODE) \
(TREE_LANG_FLAG_0 (TEMPLATE_TYPE_PARM_CHECK (NODE)))
/* True iff this TEMPLATE_TYPE_PARM represents decltype(auto). */
#define AUTO_IS_DECLTYPE(NODE) \
(TYPE_LANG_FLAG_5 (TEMPLATE_TYPE_PARM_CHECK (NODE)))
/* These constants can used as bit flags in the process of tree formatting.
TFF_PLAIN_IDENTIFIER: unqualified part of a name.
TFF_SCOPE: include the class and namespace scope of the name.
TFF_CHASE_TYPEDEF: print the original type-id instead of the typedef-name.
TFF_DECL_SPECIFIERS: print decl-specifiers.
TFF_CLASS_KEY_OR_ENUM: precede a class-type name (resp. enum name) with
a class-key (resp. `enum').
TFF_RETURN_TYPE: include function return type.
TFF_FUNCTION_DEFAULT_ARGUMENTS: include function default parameter values.
TFF_EXCEPTION_SPECIFICATION: show function exception specification.
TFF_TEMPLATE_HEADER: show the template<...> header in a
template-declaration.
TFF_TEMPLATE_NAME: show only template-name.
TFF_EXPR_IN_PARENS: parenthesize expressions.
TFF_NO_FUNCTION_ARGUMENTS: don't show function arguments.
TFF_UNQUALIFIED_NAME: do not print the qualifying scope of the
top-level entity.
TFF_NO_OMIT_DEFAULT_TEMPLATE_ARGUMENTS: do not omit template arguments
identical to their defaults.
TFF_NO_TEMPLATE_BINDINGS: do not print information about the template
arguments for a function template specialization.
TFF_POINTER: we are printing a pointer type. */
#define TFF_PLAIN_IDENTIFIER (0)
#define TFF_SCOPE (1)
#define TFF_CHASE_TYPEDEF (1 << 1)
#define TFF_DECL_SPECIFIERS (1 << 2)
#define TFF_CLASS_KEY_OR_ENUM (1 << 3)
#define TFF_RETURN_TYPE (1 << 4)
#define TFF_FUNCTION_DEFAULT_ARGUMENTS (1 << 5)
#define TFF_EXCEPTION_SPECIFICATION (1 << 6)
#define TFF_TEMPLATE_HEADER (1 << 7)
#define TFF_TEMPLATE_NAME (1 << 8)
#define TFF_EXPR_IN_PARENS (1 << 9)
#define TFF_NO_FUNCTION_ARGUMENTS (1 << 10)
#define TFF_UNQUALIFIED_NAME (1 << 11)
#define TFF_NO_OMIT_DEFAULT_TEMPLATE_ARGUMENTS (1 << 12)
#define TFF_NO_TEMPLATE_BINDINGS (1 << 13)
#define TFF_POINTER (1 << 14)
/* Returns the TEMPLATE_DECL associated to a TEMPLATE_TEMPLATE_PARM
node. */
#define TEMPLATE_TEMPLATE_PARM_TEMPLATE_DECL(NODE) \
((TREE_CODE (NODE) == BOUND_TEMPLATE_TEMPLATE_PARM) \
? TYPE_TI_TEMPLATE (NODE) \
: TYPE_NAME (NODE))
/* in lex.c */
extern void init_reswords (void);
/* Various flags for the overloaded operator information. */
enum ovl_op_flags
{
OVL_OP_FLAG_NONE = 0, /* Don't care. */
OVL_OP_FLAG_UNARY = 1, /* Is unary. */
OVL_OP_FLAG_BINARY = 2, /* Is binary. */
OVL_OP_FLAG_AMBIARY = 3, /* May be unary or binary. */
OVL_OP_FLAG_ALLOC = 4, /* operator new or delete. */
OVL_OP_FLAG_DELETE = 1, /* operator delete. */
OVL_OP_FLAG_VEC = 2 /* vector new or delete. */
};
/* Compressed operator codes. Order is determined by operators.def
and does not match that of tree_codes. */
enum ovl_op_code
{
OVL_OP_ERROR_MARK,
OVL_OP_NOP_EXPR,
#define DEF_OPERATOR(NAME, CODE, MANGLING, FLAGS) OVL_OP_##CODE,
#define DEF_ASSN_OPERATOR(NAME, CODE, MANGLING) /* NOTHING */
#include "operators.def"
OVL_OP_MAX
};
struct GTY(()) ovl_op_info_t {
/* The IDENTIFIER_NODE for the operator. */
tree identifier;
/* The name of the operator. */
const char *name;
/* The mangled name of the operator. */
const char *mangled_name;
/* The (regular) tree code. */
enum tree_code tree_code : 16;
/* The (compressed) operator code. */
enum ovl_op_code ovl_op_code : 8;
/* The ovl_op_flags of the operator */
unsigned flags : 8;
};
/* Overloaded operator info indexed by ass_op_p & ovl_op_code. */
extern GTY(()) ovl_op_info_t ovl_op_info[2][OVL_OP_MAX];
/* Mapping from tree_codes to ovl_op_codes. */
extern GTY(()) unsigned char ovl_op_mapping[MAX_TREE_CODES];
/* Mapping for ambi-ary operators from the binary to the unary. */
extern GTY(()) unsigned char ovl_op_alternate[OVL_OP_MAX];
/* Given an ass_op_p boolean and a tree code, return a pointer to its
overloaded operator info. Tree codes for non-overloaded operators
map to the error-operator. */
#define OVL_OP_INFO(IS_ASS_P, TREE_CODE) \
(&ovl_op_info[(IS_ASS_P) != 0][ovl_op_mapping[(TREE_CODE)]])
/* Overloaded operator info for an identifier for which
IDENTIFIER_OVL_OP_P is true. */
#define IDENTIFIER_OVL_OP_INFO(NODE) \
(&ovl_op_info[IDENTIFIER_KIND_BIT_0 (NODE)][IDENTIFIER_CP_INDEX (NODE)])
#define IDENTIFIER_OVL_OP_FLAGS(NODE) \
(IDENTIFIER_OVL_OP_INFO (NODE)->flags)
/* A type-qualifier, or bitmask therefore, using the TYPE_QUAL
constants. */
typedef int cp_cv_quals;
/* Non-static member functions have an optional virt-specifier-seq.
There is a VIRT_SPEC value for each virt-specifier.
They can be combined by bitwise-or to form the complete set of
virt-specifiers for a member function. */
enum virt_specifier
{
VIRT_SPEC_UNSPECIFIED = 0x0,
VIRT_SPEC_FINAL = 0x1,
VIRT_SPEC_OVERRIDE = 0x2
};
/* A type-qualifier, or bitmask therefore, using the VIRT_SPEC
constants. */
typedef int cp_virt_specifiers;
/* Wherever there is a function-cv-qual, there could also be a ref-qualifier:
[dcl.fct]
The return type, the parameter-type-list, the ref-qualifier, and
the cv-qualifier-seq, but not the default arguments or the exception
specification, are part of the function type.
REF_QUAL_NONE Ordinary member function with no ref-qualifier
REF_QUAL_LVALUE Member function with the &-ref-qualifier
REF_QUAL_RVALUE Member function with the &&-ref-qualifier */
enum cp_ref_qualifier {
REF_QUAL_NONE = 0,
REF_QUAL_LVALUE = 1,
REF_QUAL_RVALUE = 2
};
/* A storage class. */
enum cp_storage_class {
/* sc_none must be zero so that zeroing a cp_decl_specifier_seq
sets the storage_class field to sc_none. */
sc_none = 0,
sc_auto,
sc_register,
sc_static,
sc_extern,
sc_mutable
};
/* An individual decl-specifier. This is used to index the array of
locations for the declspecs in struct cp_decl_specifier_seq
below. */
enum cp_decl_spec {
ds_first,
ds_signed = ds_first,
ds_unsigned,
ds_short,
ds_long,
ds_const,
ds_volatile,
ds_restrict,
ds_inline,
ds_virtual,
ds_explicit,
ds_friend,
ds_typedef,
ds_alias,
ds_constexpr,
ds_complex,
ds_thread,
ds_type_spec,
ds_redefined_builtin_type_spec,
ds_attribute,
ds_std_attribute,
ds_storage_class,
ds_long_long,
ds_concept,
ds_last /* This enumerator must always be the last one. */
};
/* A decl-specifier-seq. */
struct cp_decl_specifier_seq {
/* An array of locations for the declaration sepecifiers, indexed by
enum cp_decl_spec_word. */
source_location locations[ds_last];
/* The primary type, if any, given by the decl-specifier-seq.
Modifiers, like "short", "const", and "unsigned" are not
reflected here. This field will be a TYPE, unless a typedef-name
was used, in which case it will be a TYPE_DECL. */
tree type;
/* The attributes, if any, provided with the specifier sequence. */
tree attributes;
/* The c++11 attributes that follows the type specifier. */
tree std_attributes;
/* If non-NULL, a built-in type that the user attempted to redefine
to some other type. */
tree redefined_builtin_type;
/* The storage class specified -- or sc_none if no storage class was
explicitly specified. */
cp_storage_class storage_class;
/* For the __intN declspec, this stores the index into the int_n_* arrays. */
int int_n_idx;
/* True iff TYPE_SPEC defines a class or enum. */
BOOL_BITFIELD type_definition_p : 1;
/* True iff multiple types were (erroneously) specified for this
decl-specifier-seq. */
BOOL_BITFIELD multiple_types_p : 1;
/* True iff multiple storage classes were (erroneously) specified
for this decl-specifier-seq or a combination of a storage class
with a typedef specifier. */
BOOL_BITFIELD conflicting_specifiers_p : 1;
/* True iff at least one decl-specifier was found. */
BOOL_BITFIELD any_specifiers_p : 1;
/* True iff at least one type-specifier was found. */
BOOL_BITFIELD any_type_specifiers_p : 1;
/* True iff "int" was explicitly provided. */
BOOL_BITFIELD explicit_int_p : 1;
/* True iff "__intN" was explicitly provided. */
BOOL_BITFIELD explicit_intN_p : 1;
/* True iff "char" was explicitly provided. */
BOOL_BITFIELD explicit_char_p : 1;
/* True iff ds_thread is set for __thread, not thread_local. */
BOOL_BITFIELD gnu_thread_keyword_p : 1;
/* True iff the type is a decltype. */
BOOL_BITFIELD decltype_p : 1;
};
/* The various kinds of declarators. */
enum cp_declarator_kind {
cdk_id,
cdk_function,
cdk_array,
cdk_pointer,
cdk_reference,
cdk_ptrmem,
cdk_decomp,
cdk_error
};
/* A declarator. */
typedef struct cp_declarator cp_declarator;
typedef struct cp_parameter_declarator cp_parameter_declarator;
/* A parameter, before it has been semantically analyzed. */
struct cp_parameter_declarator {
/* The next parameter, or NULL_TREE if none. */
cp_parameter_declarator *next;
/* The decl-specifiers-seq for the parameter. */
cp_decl_specifier_seq decl_specifiers;
/* The declarator for the parameter. */
cp_declarator *declarator;
/* The default-argument expression, or NULL_TREE, if none. */
tree default_argument;
/* True iff this is a template parameter pack. */
bool template_parameter_pack_p;
/* Location within source. */
location_t loc;
};
/* A declarator. */
struct cp_declarator {
/* The kind of declarator. */
ENUM_BITFIELD (cp_declarator_kind) kind : 4;
/* Whether we parsed an ellipsis (`...') just before the declarator,
to indicate this is a parameter pack. */
BOOL_BITFIELD parameter_pack_p : 1;
/* If this declarator is parenthesized, this the open-paren. It is
UNKNOWN_LOCATION when not parenthesized. */
location_t parenthesized;
location_t id_loc; /* Currently only set for cdk_id, cdk_decomp and
cdk_function. */
/* GNU Attributes that apply to this declarator. If the declarator
is a pointer or a reference, these attribute apply to the type
pointed to. */
tree attributes;
/* Standard C++11 attributes that apply to this declarator. If the
declarator is a pointer or a reference, these attributes apply
to the pointer, rather than to the type pointed to. */
tree std_attributes;
/* For all but cdk_id, cdk_decomp and cdk_error, the contained declarator.
For cdk_id, cdk_decomp and cdk_error, guaranteed to be NULL. */
cp_declarator *declarator;
union {
/* For identifiers. */
struct {
/* If non-NULL, the qualifying scope (a NAMESPACE_DECL or
*_TYPE) for this identifier. */
tree qualifying_scope;
/* The unqualified name of the entity -- an IDENTIFIER_NODE,
BIT_NOT_EXPR, or TEMPLATE_ID_EXPR. */
tree unqualified_name;
/* If this is the name of a function, what kind of special
function (if any). */
special_function_kind sfk;
} id;
/* For functions. */
struct {
/* The parameters to the function as a TREE_LIST of decl/default. */
tree parameters;
/* The cv-qualifiers for the function. */
cp_cv_quals qualifiers;
/* The virt-specifiers for the function. */
cp_virt_specifiers virt_specifiers;
/* The ref-qualifier for the function. */
cp_ref_qualifier ref_qualifier;
/* The transaction-safety qualifier for the function. */
tree tx_qualifier;
/* The exception-specification for the function. */
tree exception_specification;
/* The late-specified return type, if any. */
tree late_return_type;
/* The trailing requires-clause, if any. */
tree requires_clause;
} function;
/* For arrays. */
struct {
/* The bounds to the array. */
tree bounds;
} array;
/* For cdk_pointer and cdk_ptrmem. */
struct {
/* The cv-qualifiers for the pointer. */
cp_cv_quals qualifiers;
/* For cdk_ptrmem, the class type containing the member. */
tree class_type;
} pointer;
/* For cdk_reference */
struct {
/* The cv-qualifiers for the reference. These qualifiers are
only used to diagnose ill-formed code. */
cp_cv_quals qualifiers;
/* Whether this is an rvalue reference */
bool rvalue_ref;
} reference;
} u;
};
/* A level of template instantiation. */
struct GTY((chain_next ("%h.next"))) tinst_level {
/* The immediately deeper level in the chain. */
struct tinst_level *next;
/* The original node. TLDCL can be a DECL (for a function or static
data member), a TYPE (for a class), depending on what we were
asked to instantiate, or a TREE_LIST with the template as PURPOSE
and the template args as VALUE, if we are substituting for
overload resolution. In all these cases, TARGS is NULL.
However, to avoid creating TREE_LIST objects for substitutions if
we can help, we store PURPOSE and VALUE in TLDCL and TARGS,
respectively. So TLDCL stands for TREE_LIST or DECL (the
template is a DECL too), whereas TARGS stands for the template
arguments. */
tree tldcl, targs;
private:
/* Return TRUE iff the original node is a split list. */
bool split_list_p () const { return targs; }
/* Return TRUE iff the original node is a TREE_LIST object. */
bool tree_list_p () const
{
return !split_list_p () && TREE_CODE (tldcl) == TREE_LIST;
}
/* Return TRUE iff the original node is not a list, split or not. */
bool not_list_p () const
{
return !split_list_p () && !tree_list_p ();
}
/* Convert (in place) the original node from a split list to a
TREE_LIST. */
tree to_list ();
public:
/* Release storage for OBJ and node, if it's a TREE_LIST. */
static void free (tinst_level *obj);
/* Return TRUE iff the original node is a list, split or not. */
bool list_p () const { return !not_list_p (); }
/* Return the original node; if it's a split list, make it a
TREE_LIST first, so that it can be returned as a single tree
object. */
tree get_node () {
if (!split_list_p ()) return tldcl;
else return to_list ();
}
/* Return the original node if it's a DECL or a TREE_LIST, but do
NOT convert a split list to a TREE_LIST: return NULL instead. */
tree maybe_get_node () const {
if (!split_list_p ()) return tldcl;
else return NULL_TREE;
}
/* The location where the template is instantiated. */
location_t locus;
/* errorcount + sorrycount when we pushed this level. */
unsigned short errors;
/* Count references to this object. If refcount reaches
refcount_infinity value, we don't increment or decrement the
refcount anymore, as the refcount isn't accurate anymore.
The object can be still garbage collected if unreferenced from
anywhere, which might keep referenced objects referenced longer than
otherwise necessary. Hitting the infinity is rare though. */
unsigned short refcount;
/* Infinity value for the above refcount. */
static const unsigned short refcount_infinity = (unsigned short) ~0;
};
bool decl_spec_seq_has_spec_p (const cp_decl_specifier_seq *, cp_decl_spec);
/* Return the type of the `this' parameter of FNTYPE. */
inline tree
type_of_this_parm (const_tree fntype)
{
function_args_iterator iter;
gcc_assert (TREE_CODE (fntype) == METHOD_TYPE);
function_args_iter_init (&iter, fntype);
return function_args_iter_cond (&iter);
}
/* Return the class of the `this' parameter of FNTYPE. */
inline tree
class_of_this_parm (const_tree fntype)
{
return TREE_TYPE (type_of_this_parm (fntype));
}
/* True iff T is a variable template declaration. */
inline bool
variable_template_p (tree t)
{
if (TREE_CODE (t) != TEMPLATE_DECL)
return false;
if (!PRIMARY_TEMPLATE_P (t))
return false;
if (tree r = DECL_TEMPLATE_RESULT (t))
return VAR_P (r);
return false;
}
/* True iff T is a variable concept definition. That is, T is
a variable template declared with the concept specifier. */
inline bool
variable_concept_p (tree t)
{
if (TREE_CODE (t) != TEMPLATE_DECL)
return false;
if (tree r = DECL_TEMPLATE_RESULT (t))
return VAR_P (r) && DECL_DECLARED_CONCEPT_P (r);
return false;
}
/* True iff T is a concept definition. That is, T is a variable or function
template declared with the concept specifier. */
inline bool
concept_template_p (tree t)
{
if (TREE_CODE (t) != TEMPLATE_DECL)
return false;
if (tree r = DECL_TEMPLATE_RESULT (t))
return VAR_OR_FUNCTION_DECL_P (r) && DECL_DECLARED_CONCEPT_P (r);
return false;
}
/* A parameter list indicating for a function with no parameters,
e.g "int f(void)". */
extern cp_parameter_declarator *no_parameters;
/* Various dump ids. */
extern int class_dump_id;
extern int raw_dump_id;
/* in call.c */
extern bool check_dtor_name (tree, tree);
int magic_varargs_p (tree);
extern tree build_conditional_expr (location_t, tree, tree, tree,
tsubst_flags_t);
extern tree build_addr_func (tree, tsubst_flags_t);
extern void set_flags_from_callee (tree);
extern tree build_call_a (tree, int, tree*);
extern tree build_call_n (tree, int, ...);
extern bool null_ptr_cst_p (tree);
extern bool null_member_pointer_value_p (tree);
extern bool sufficient_parms_p (const_tree);
extern tree type_decays_to (tree);
extern tree extract_call_expr (tree);
extern tree build_user_type_conversion (tree, tree, int,
tsubst_flags_t);
extern tree build_new_function_call (tree, vec<tree, va_gc> **,
tsubst_flags_t);
extern tree build_operator_new_call (tree, vec<tree, va_gc> **,
tree *, tree *, tree, tree,
tree *, tsubst_flags_t);
extern tree build_new_method_call (tree, tree,
vec<tree, va_gc> **, tree,
int, tree *, tsubst_flags_t);
extern tree build_special_member_call (tree, tree,
vec<tree, va_gc> **,
tree, int, tsubst_flags_t);
extern tree build_new_op (location_t, enum tree_code,
int, tree, tree, tree, tree *,
tsubst_flags_t);
extern tree build_op_call (tree, vec<tree, va_gc> **,
tsubst_flags_t);
extern bool aligned_allocation_fn_p (tree);
extern bool usual_deallocation_fn_p (tree);
extern tree build_op_delete_call (enum tree_code, tree, tree,
bool, tree, tree,
tsubst_flags_t);
extern bool can_convert (tree, tree, tsubst_flags_t);
extern bool can_convert_standard (tree, tree, tsubst_flags_t);
extern bool can_convert_arg (tree, tree, tree, int,
tsubst_flags_t);
extern bool can_convert_arg_bad (tree, tree, tree, int,
tsubst_flags_t);
extern location_t get_fndecl_argument_location (tree, int);
/* A class for recording information about access failures (e.g. private
fields), so that we can potentially supply a fix-it hint about
an accessor (from a context in which the constness of the object
is known). */
class access_failure_info
{
public:
access_failure_info () : m_was_inaccessible (false), m_basetype_path (NULL_TREE),
m_field_decl (NULL_TREE) {}
void record_access_failure (tree basetype_path, tree field_decl);
void maybe_suggest_accessor (bool const_p) const;
private:
bool m_was_inaccessible;
tree m_basetype_path;
tree m_field_decl;
};
extern bool enforce_access (tree, tree, tree,
tsubst_flags_t,
access_failure_info *afi = NULL);
extern void push_defarg_context (tree);
extern void pop_defarg_context (void);
extern tree convert_default_arg (tree, tree, tree, int,
tsubst_flags_t);
extern tree convert_arg_to_ellipsis (tree, tsubst_flags_t);
extern tree build_x_va_arg (source_location, tree, tree);
extern tree cxx_type_promotes_to (tree);
extern tree type_passed_as (tree);
extern tree convert_for_arg_passing (tree, tree, tsubst_flags_t);
extern bool is_properly_derived_from (tree, tree);
extern tree initialize_reference (tree, tree, int,
tsubst_flags_t);
extern tree extend_ref_init_temps (tree, tree, vec<tree, va_gc>**);
extern tree make_temporary_var_for_ref_to_temp (tree, tree);
extern bool type_has_extended_temps (tree);
extern tree strip_top_quals (tree);
extern bool reference_related_p (tree, tree);
extern int remaining_arguments (tree);
extern tree perform_implicit_conversion (tree, tree, tsubst_flags_t);
extern tree perform_implicit_conversion_flags (tree, tree, tsubst_flags_t, int);
extern tree build_converted_constant_expr (tree, tree, tsubst_flags_t);
extern tree perform_direct_initialization_if_possible (tree, tree, bool,
tsubst_flags_t);
extern tree in_charge_arg_for_name (tree);
extern tree build_cxx_call (tree, int, tree *,
tsubst_flags_t);
extern bool is_std_init_list (tree);
extern bool is_list_ctor (tree);
extern void validate_conversion_obstack (void);
extern void mark_versions_used (tree);
extern tree get_function_version_dispatcher (tree);
/* in class.c */
extern tree build_vfield_ref (tree, tree);
extern tree build_if_in_charge (tree true_stmt, tree false_stmt = void_node);
extern tree build_base_path (enum tree_code, tree,
tree, int, tsubst_flags_t);
extern tree convert_to_base (tree, tree, bool, bool,
tsubst_flags_t);
extern tree convert_to_base_statically (tree, tree);
extern tree build_vtbl_ref (tree, tree);
extern tree build_vfn_ref (tree, tree);
extern tree get_vtable_decl (tree, int);
extern bool add_method (tree, tree, bool);
extern tree declared_access (tree);
extern tree currently_open_class (tree);
extern tree currently_open_derived_class (tree);
extern tree outermost_open_class (void);
extern tree current_nonlambda_class_type (void);
extern tree finish_struct (tree, tree);
extern void finish_struct_1 (tree);
extern int resolves_to_fixed_type_p (tree, int *);
extern void init_class_processing (void);
extern int is_empty_class (tree);
extern bool is_really_empty_class (tree);
extern void pushclass (tree);
extern void popclass (void);
extern void push_nested_class (tree);
extern void pop_nested_class (void);
extern int current_lang_depth (void);
extern void push_lang_context (tree);
extern void pop_lang_context (void);
extern tree instantiate_type (tree, tree, tsubst_flags_t);
extern void build_self_reference (void);
extern int same_signature_p (const_tree, const_tree);
extern void maybe_add_class_template_decl_list (tree, tree, int);
extern void unreverse_member_declarations (tree);
extern void invalidate_class_lookup_cache (void);
extern void maybe_note_name_used_in_class (tree, tree);
extern void note_name_declared_in_class (tree, tree);
extern tree get_vtbl_decl_for_binfo (tree);
extern bool vptr_via_virtual_p (tree);
extern void debug_class (tree);
extern void debug_thunks (tree);
extern void set_linkage_according_to_type (tree, tree);
extern void determine_key_method (tree);
extern void check_for_override (tree, tree);
extern void push_class_stack (void);
extern void pop_class_stack (void);
extern bool default_ctor_p (tree);
extern bool type_has_user_nondefault_constructor (tree);
extern tree in_class_defaulted_default_constructor (tree);
extern bool user_provided_p (tree);
extern bool type_has_user_provided_constructor (tree);
extern bool type_has_non_user_provided_default_constructor (tree);
extern bool vbase_has_user_provided_move_assign (tree);
extern tree default_init_uninitialized_part (tree);
extern bool trivial_default_constructor_is_constexpr (tree);
extern bool type_has_constexpr_default_constructor (tree);
extern bool type_has_virtual_destructor (tree);
extern bool classtype_has_move_assign_or_move_ctor_p (tree, bool user_declared);
extern bool classtype_has_non_deleted_move_ctor (tree);
extern bool type_build_ctor_call (tree);
extern bool type_build_dtor_call (tree);
extern void explain_non_literal_class (tree);
extern void inherit_targ_abi_tags (tree);
extern void defaulted_late_check (tree);
extern bool defaultable_fn_check (tree);
extern void check_abi_tags (tree);
extern tree missing_abi_tags (tree);
extern void fixup_type_variants (tree);
extern void fixup_attribute_variants (tree);
extern tree* decl_cloned_function_p (const_tree, bool);
extern void clone_function_decl (tree, bool);
extern void adjust_clone_args (tree);
extern void deduce_noexcept_on_destructor (tree);
extern bool uniquely_derived_from_p (tree, tree);
extern bool publicly_uniquely_derived_p (tree, tree);
extern tree common_enclosing_class (tree, tree);
/* in cvt.c */
extern tree convert_to_reference (tree, tree, int, int, tree,
tsubst_flags_t);
extern tree convert_from_reference (tree);
extern tree force_rvalue (tree, tsubst_flags_t);
extern tree ocp_convert (tree, tree, int, int,
tsubst_flags_t);
extern tree cp_convert (tree, tree, tsubst_flags_t);
extern tree cp_convert_and_check (tree, tree, tsubst_flags_t);
extern tree cp_fold_convert (tree, tree);
extern tree cp_get_callee (tree);
extern tree cp_get_callee_fndecl (tree);
extern tree cp_get_callee_fndecl_nofold (tree);
extern tree cp_get_fndecl_from_callee (tree, bool fold = true);
extern tree convert_to_void (tree, impl_conv_void,
tsubst_flags_t);
extern tree convert_force (tree, tree, int,
tsubst_flags_t);
extern tree build_expr_type_conversion (int, tree, bool);
extern tree type_promotes_to (tree);
extern bool can_convert_qual (tree, tree);
extern tree perform_qualification_conversions (tree, tree);
extern bool tx_safe_fn_type_p (tree);
extern tree tx_unsafe_fn_variant (tree);
extern bool fnptr_conv_p (tree, tree);
extern tree strip_fnptr_conv (tree);
/* in name-lookup.c */
extern void maybe_push_cleanup_level (tree);
extern tree make_anon_name (void);
extern tree check_for_out_of_scope_variable (tree);
extern void dump (cp_binding_level &ref);
extern void dump (cp_binding_level *ptr);
extern void print_other_binding_stack (cp_binding_level *);
extern tree maybe_push_decl (tree);
extern tree current_decl_namespace (void);
/* decl.c */
extern tree poplevel (int, int, int);
extern void cxx_init_decl_processing (void);
enum cp_tree_node_structure_enum cp_tree_node_structure
(union lang_tree_node *);
extern void finish_scope (void);
extern void push_switch (tree);
extern void pop_switch (void);
extern void note_break_stmt (void);
extern bool note_iteration_stmt_body_start (void);
extern void note_iteration_stmt_body_end (bool);
extern tree make_lambda_name (void);
extern int decls_match (tree, tree, bool = true);
extern bool maybe_version_functions (tree, tree, bool);
extern tree duplicate_decls (tree, tree, bool);
extern tree declare_local_label (tree);
extern tree define_label (location_t, tree);
extern void check_goto (tree);
extern bool check_omp_return (void);
extern tree make_typename_type (tree, tree, enum tag_types, tsubst_flags_t);
extern tree build_typename_type (tree, tree, tree, tag_types);
extern tree make_unbound_class_template (tree, tree, tree, tsubst_flags_t);
extern tree build_library_fn_ptr (const char *, tree, int);
extern tree build_cp_library_fn_ptr (const char *, tree, int);
extern tree push_library_fn (tree, tree, tree, int);
extern tree push_void_library_fn (tree, tree, int);
extern tree push_throw_library_fn (tree, tree);
extern void warn_misplaced_attr_for_class_type (source_location location,
tree class_type);
extern tree check_tag_decl (cp_decl_specifier_seq *, bool);
extern tree shadow_tag (cp_decl_specifier_seq *);
extern tree groktypename (cp_decl_specifier_seq *, const cp_declarator *, bool);
extern tree start_decl (const cp_declarator *, cp_decl_specifier_seq *, int, tree, tree, tree *);
extern void start_decl_1 (tree, bool);
extern bool check_array_initializer (tree, tree, tree);
extern void cp_finish_decl (tree, tree, bool, tree, int);
extern tree lookup_decomp_type (tree);
extern void cp_maybe_mangle_decomp (tree, tree, unsigned int);
extern void cp_finish_decomp (tree, tree, unsigned int);
extern int cp_complete_array_type (tree *, tree, bool);
extern int cp_complete_array_type_or_error (tree *, tree, bool, tsubst_flags_t);
extern tree build_ptrmemfunc_type (tree);
extern tree build_ptrmem_type (tree, tree);
/* the grokdeclarator prototype is in decl.h */
extern tree build_this_parm (tree, tree, cp_cv_quals);
extern tree grokparms (tree, tree *);
extern int copy_fn_p (const_tree);
extern bool move_fn_p (const_tree);
extern bool move_signature_fn_p (const_tree);
extern tree get_scope_of_declarator (const cp_declarator *);
extern void grok_special_member_properties (tree);
extern bool grok_ctor_properties (const_tree, const_tree);
extern bool grok_op_properties (tree, bool);
extern tree xref_tag (enum tag_types, tree, tag_scope, bool);
extern tree xref_tag_from_type (tree, tree, tag_scope);
extern void xref_basetypes (tree, tree);
extern tree start_enum (tree, tree, tree, tree, bool, bool *);
extern void finish_enum_value_list (tree);
extern void finish_enum (tree);
extern void build_enumerator (tree, tree, tree, tree, location_t);
extern tree lookup_enumerator (tree, tree);
extern bool start_preparsed_function (tree, tree, int);
extern bool start_function (cp_decl_specifier_seq *,
const cp_declarator *, tree);
extern tree begin_function_body (void);
extern void finish_function_body (tree);
extern tree outer_curly_brace_block (tree);
extern tree finish_function (bool);
extern tree grokmethod (cp_decl_specifier_seq *, const cp_declarator *, tree);
extern void maybe_register_incomplete_var (tree);
extern void maybe_commonize_var (tree);
extern void complete_vars (tree);
extern tree static_fn_type (tree);
extern void revert_static_member_fn (tree);
extern void fixup_anonymous_aggr (tree);
extern tree compute_array_index_type (tree, tree, tsubst_flags_t);
extern tree check_default_argument (tree, tree, tsubst_flags_t);
extern int wrapup_namespace_globals ();
extern tree create_implicit_typedef (tree, tree);
extern int local_variable_p (const_tree);
extern tree register_dtor_fn (tree);
extern tmpl_spec_kind current_tmpl_spec_kind (int);
extern tree cp_fname_init (const char *, tree *);
extern tree cxx_builtin_function (tree decl);
extern tree cxx_builtin_function_ext_scope (tree decl);
extern tree check_elaborated_type_specifier (enum tag_types, tree, bool);
extern void warn_extern_redeclared_static (tree, tree);
extern tree cxx_comdat_group (tree);
extern bool cp_missing_noreturn_ok_p (tree);
extern bool is_direct_enum_init (tree, tree);
extern void initialize_artificial_var (tree, vec<constructor_elt, va_gc> *);
extern tree check_var_type (tree, tree);
extern tree reshape_init (tree, tree, tsubst_flags_t);
extern tree next_initializable_field (tree);
extern tree fndecl_declared_return_type (tree);
extern bool undeduced_auto_decl (tree);
extern bool require_deduced_type (tree, tsubst_flags_t = tf_warning_or_error);
extern tree finish_case_label (location_t, tree, tree);
extern tree cxx_maybe_build_cleanup (tree, tsubst_flags_t);
extern bool check_array_designated_initializer (constructor_elt *,
unsigned HOST_WIDE_INT);
extern bool check_for_uninitialized_const_var (tree, bool, tsubst_flags_t);
/* in decl2.c */
extern void record_mangling (tree, bool);
extern void overwrite_mangling (tree, tree);
extern void note_mangling_alias (tree, tree);
extern void generate_mangling_aliases (void);
extern tree build_memfn_type (tree, tree, cp_cv_quals, cp_ref_qualifier);
extern tree build_pointer_ptrmemfn_type (tree);
extern tree change_return_type (tree, tree);
extern void maybe_retrofit_in_chrg (tree);
extern void maybe_make_one_only (tree);
extern bool vague_linkage_p (tree);
extern void grokclassfn (tree, tree,
enum overload_flags);
extern tree grok_array_decl (location_t, tree, tree, bool);
extern tree delete_sanity (tree, tree, bool, int, tsubst_flags_t);
extern tree check_classfn (tree, tree, tree);
extern void check_member_template (tree);
extern tree grokfield (const cp_declarator *, cp_decl_specifier_seq *,
tree, bool, tree, tree);
extern tree grokbitfield (const cp_declarator *, cp_decl_specifier_seq *,
tree, tree, tree);
extern bool any_dependent_type_attributes_p (tree);
extern tree cp_reconstruct_complex_type (tree, tree);
extern bool attributes_naming_typedef_ok (tree);
extern void cplus_decl_attributes (tree *, tree, int);
extern void finish_anon_union (tree);
extern void cxx_post_compilation_parsing_cleanups (void);
extern tree coerce_new_type (tree);
extern tree coerce_delete_type (tree);
extern void comdat_linkage (tree);
extern void determine_visibility (tree);
extern void constrain_class_visibility (tree);
extern void reset_type_linkage (tree);
extern void tentative_decl_linkage (tree);
extern void import_export_decl (tree);
extern tree build_cleanup (tree);
extern tree build_offset_ref_call_from_tree (tree, vec<tree, va_gc> **,
tsubst_flags_t);
extern bool decl_defined_p (tree);
extern bool decl_constant_var_p (tree);
extern bool decl_maybe_constant_var_p (tree);
extern void no_linkage_error (tree);
extern void check_default_args (tree);
extern bool mark_used (tree);
extern bool mark_used (tree, tsubst_flags_t);
extern void finish_static_data_member_decl (tree, tree, bool, tree, int);
extern tree cp_build_parm_decl (tree, tree, tree);
extern tree get_guard (tree);
extern tree get_guard_cond (tree, bool);
extern tree set_guard (tree);
extern tree get_tls_wrapper_fn (tree);
extern void mark_needed (tree);
extern bool decl_needed_p (tree);
extern void note_vague_linkage_fn (tree);
extern void note_variable_template_instantiation (tree);
extern tree build_artificial_parm (tree, tree, tree);
extern bool possibly_inlined_p (tree);
extern int parm_index (tree);
extern tree vtv_start_verification_constructor_init_function (void);
extern tree vtv_finish_verification_constructor_init_function (tree);
extern bool cp_omp_mappable_type (tree);
/* in error.c */
extern const char *type_as_string (tree, int);
extern const char *type_as_string_translate (tree, int);
extern const char *decl_as_string (tree, int);
extern const char *decl_as_string_translate (tree, int);
extern const char *decl_as_dwarf_string (tree, int);
extern const char *expr_as_string (tree, int);
extern const char *lang_decl_name (tree, int, bool);
extern const char *lang_decl_dwarf_name (tree, int, bool);
extern const char *language_to_string (enum languages);
extern const char *class_key_or_enum_as_string (tree);
extern void maybe_warn_variadic_templates (void);
extern void maybe_warn_cpp0x (cpp0x_warn_str str);
extern bool pedwarn_cxx98 (location_t, int, const char *, ...) ATTRIBUTE_GCC_DIAG(3,4);
extern location_t location_of (tree);
extern void qualified_name_lookup_error (tree, tree, tree,
location_t);
/* in except.c */
extern void init_exception_processing (void);
extern tree expand_start_catch_block (tree);
extern void expand_end_catch_block (void);
extern tree build_exc_ptr (void);
extern tree build_throw (tree);
extern int nothrow_libfn_p (const_tree);
extern void check_handlers (tree);
extern tree finish_noexcept_expr (tree, tsubst_flags_t);
extern bool expr_noexcept_p (tree, tsubst_flags_t);
extern void perform_deferred_noexcept_checks (void);
extern bool nothrow_spec_p (const_tree);
extern bool type_noexcept_p (const_tree);
extern bool type_throw_all_p (const_tree);
extern tree build_noexcept_spec (tree, int);
extern void choose_personality_routine (enum languages);
extern tree build_must_not_throw_expr (tree,tree);
extern tree eh_type_info (tree);
extern tree begin_eh_spec_block (void);
extern void finish_eh_spec_block (tree, tree);
extern tree build_eh_type_type (tree);
extern tree cp_protect_cleanup_actions (void);
extern tree create_try_catch_expr (tree, tree);
/* in expr.c */
extern tree cplus_expand_constant (tree);
extern tree mark_use (tree expr, bool rvalue_p, bool read_p,
location_t = UNKNOWN_LOCATION,
bool reject_builtin = true);
extern tree mark_rvalue_use (tree,
location_t = UNKNOWN_LOCATION,
bool reject_builtin = true);
extern tree mark_lvalue_use (tree);
extern tree mark_lvalue_use_nonread (tree);
extern tree mark_type_use (tree);
extern tree mark_discarded_use (tree);
extern void mark_exp_read (tree);
/* friend.c */
extern int is_friend (tree, tree);
extern void make_friend_class (tree, tree, bool);
extern void add_friend (tree, tree, bool);
extern tree do_friend (tree, tree, tree, tree,
enum overload_flags, bool);
extern void set_global_friend (tree);
extern bool is_global_friend (tree);
/* in init.c */
extern tree expand_member_init (tree);
extern void emit_mem_initializers (tree);
extern tree build_aggr_init (tree, tree, int,
tsubst_flags_t);
extern int is_class_type (tree, int);
extern tree get_type_value (tree);
extern tree build_zero_init (tree, tree, bool);
extern tree build_value_init (tree, tsubst_flags_t);
extern tree build_value_init_noctor (tree, tsubst_flags_t);
extern tree get_nsdmi (tree, bool, tsubst_flags_t);
extern tree build_offset_ref (tree, tree, bool,
tsubst_flags_t);
extern tree throw_bad_array_new_length (void);
extern bool type_has_new_extended_alignment (tree);
extern unsigned malloc_alignment (void);
extern tree build_new (vec<tree, va_gc> **, tree, tree,
vec<tree, va_gc> **, int,
tsubst_flags_t);
extern tree get_temp_regvar (tree, tree);
extern tree build_vec_init (tree, tree, tree, bool, int,
tsubst_flags_t);
extern tree build_delete (tree, tree,
special_function_kind,
int, int, tsubst_flags_t);
extern void push_base_cleanups (void);
extern tree build_vec_delete (tree, tree,
special_function_kind, int,
tsubst_flags_t);
extern tree create_temporary_var (tree);
extern void initialize_vtbl_ptrs (tree);
extern tree scalar_constant_value (tree);
extern tree decl_really_constant_value (tree);
extern int diagnose_uninitialized_cst_or_ref_member (tree, bool, bool);
extern tree build_vtbl_address (tree);
extern bool maybe_reject_flexarray_init (tree, tree);
/* in lex.c */
extern void cxx_dup_lang_specific_decl (tree);
extern void yyungetc (int, int);
extern tree unqualified_name_lookup_error (tree,
location_t = UNKNOWN_LOCATION);
extern tree unqualified_fn_lookup_error (cp_expr);
extern tree make_conv_op_name (tree);
extern tree build_lang_decl (enum tree_code, tree, tree);
extern tree build_lang_decl_loc (location_t, enum tree_code, tree, tree);
extern void retrofit_lang_decl (tree);
extern void fit_decomposition_lang_decl (tree, tree);
extern tree copy_decl (tree CXX_MEM_STAT_INFO);
extern tree copy_type (tree CXX_MEM_STAT_INFO);
extern tree cxx_make_type (enum tree_code);
extern tree make_class_type (enum tree_code);
extern const char *get_identifier_kind_name (tree);
extern void set_identifier_kind (tree, cp_identifier_kind);
extern bool cxx_init (void);
extern void cxx_finish (void);
extern bool in_main_input_context (void);
/* in method.c */
extern void init_method (void);
extern tree make_thunk (tree, bool, tree, tree);
extern void finish_thunk (tree);
extern void use_thunk (tree, bool);
extern bool trivial_fn_p (tree);
extern tree forward_parm (tree);
extern bool is_trivially_xible (enum tree_code, tree, tree);
extern bool is_xible (enum tree_code, tree, tree);
extern tree get_defaulted_eh_spec (tree, tsubst_flags_t = tf_warning_or_error);
extern void after_nsdmi_defaulted_late_checks (tree);
extern bool maybe_explain_implicit_delete (tree);
extern void explain_implicit_non_constexpr (tree);
extern void deduce_inheriting_ctor (tree);
extern void synthesize_method (tree);
extern tree lazily_declare_fn (special_function_kind,
tree);
extern tree skip_artificial_parms_for (const_tree, tree);
extern int num_artificial_parms_for (const_tree);
extern tree make_alias_for (tree, tree);
extern tree get_copy_ctor (tree, tsubst_flags_t);
extern tree get_copy_assign (tree);
extern tree get_default_ctor (tree);
extern tree get_dtor (tree, tsubst_flags_t);
extern tree strip_inheriting_ctors (tree);
extern tree inherited_ctor_binfo (tree);
extern bool ctor_omit_inherited_parms (tree);
extern tree locate_ctor (tree);
extern tree implicitly_declare_fn (special_function_kind, tree,
bool, tree, tree);
/* In optimize.c */
extern bool maybe_clone_body (tree);
/* In parser.c */
extern tree cp_convert_range_for (tree, tree, tree, tree, unsigned int, bool,
unsigned short);
extern bool parsing_nsdmi (void);
extern bool parsing_default_capturing_generic_lambda_in_template (void);
extern void inject_this_parameter (tree, cp_cv_quals);
extern location_t defarg_location (tree);
extern void maybe_show_extern_c_location (void);
/* in pt.c */
extern bool check_template_shadow (tree);
extern bool check_auto_in_tmpl_args (tree, tree);
extern tree get_innermost_template_args (tree, int);
extern void maybe_begin_member_template_processing (tree);
extern void maybe_end_member_template_processing (void);
extern tree finish_member_template_decl (tree);
extern void begin_template_parm_list (void);
extern bool begin_specialization (void);
extern void reset_specialization (void);
extern void end_specialization (void);
extern void begin_explicit_instantiation (void);
extern void end_explicit_instantiation (void);
extern void check_unqualified_spec_or_inst (tree, location_t);
extern tree check_explicit_specialization (tree, tree, int, int,
tree = NULL_TREE);
extern int num_template_headers_for_class (tree);
extern void check_template_variable (tree);
extern tree make_auto (void);
extern tree make_decltype_auto (void);
extern tree make_template_placeholder (tree);
extern bool template_placeholder_p (tree);
extern tree do_auto_deduction (tree, tree, tree,
tsubst_flags_t
= tf_warning_or_error,
auto_deduction_context
= adc_unspecified,
tree = NULL_TREE,
int = LOOKUP_NORMAL);
extern tree type_uses_auto (tree);
extern tree type_uses_auto_or_concept (tree);
extern void append_type_to_template_for_access_check (tree, tree, tree,
location_t);
extern tree convert_generic_types_to_packs (tree, int, int);
extern tree splice_late_return_type (tree, tree);
extern bool is_auto (const_tree);
extern tree process_template_parm (tree, location_t, tree,
bool, bool);
extern tree end_template_parm_list (tree);
extern void end_template_parm_list (void);
extern void end_template_decl (void);
extern tree maybe_update_decl_type (tree, tree);
extern bool check_default_tmpl_args (tree, tree, bool, bool, int);
extern tree push_template_decl (tree);
extern tree push_template_decl_real (tree, bool);
extern tree add_inherited_template_parms (tree, tree);
extern bool redeclare_class_template (tree, tree, tree);
extern tree lookup_template_class (tree, tree, tree, tree,
int, tsubst_flags_t);
extern tree lookup_template_function (tree, tree);
extern tree lookup_template_variable (tree, tree);
extern int uses_template_parms (tree);
extern bool uses_template_parms_level (tree, int);
extern bool in_template_function (void);
extern bool need_generic_capture (void);
extern tree instantiate_class_template (tree);
extern tree instantiate_template (tree, tree, tsubst_flags_t);
extern tree fn_type_unification (tree, tree, tree,
const tree *, unsigned int,
tree, unification_kind_t, int,
bool, bool);
extern void mark_decl_instantiated (tree, int);
extern int more_specialized_fn (tree, tree, int);
extern void do_decl_instantiation (tree, tree);
extern void do_type_instantiation (tree, tree, tsubst_flags_t);
extern bool always_instantiate_p (tree);
extern bool maybe_instantiate_noexcept (tree, tsubst_flags_t = tf_warning_or_error);
extern tree instantiate_decl (tree, bool, bool);
extern int comp_template_parms (const_tree, const_tree);
extern bool builtin_pack_fn_p (tree);
extern bool uses_parameter_packs (tree);
extern bool template_parameter_pack_p (const_tree);
extern bool function_parameter_pack_p (const_tree);
extern bool function_parameter_expanded_from_pack_p (tree, tree);
extern tree make_pack_expansion (tree, tsubst_flags_t = tf_warning_or_error);
extern bool check_for_bare_parameter_packs (tree, location_t = UNKNOWN_LOCATION);
extern tree build_template_info (tree, tree);
extern tree get_template_info (const_tree);
extern vec<qualified_typedef_usage_t, va_gc> *get_types_needing_access_check (tree);
extern int template_class_depth (tree);
extern int is_specialization_of (tree, tree);
extern bool is_specialization_of_friend (tree, tree);
extern tree get_pattern_parm (tree, tree);
extern int comp_template_args (tree, tree, tree * = NULL,
tree * = NULL, bool = false);
extern int template_args_equal (tree, tree, bool = false);
extern tree maybe_process_partial_specialization (tree);
extern tree most_specialized_instantiation (tree);
extern void print_candidates (tree);
extern void instantiate_pending_templates (int);
extern tree tsubst_default_argument (tree, int, tree, tree,
tsubst_flags_t);
extern tree tsubst (tree, tree, tsubst_flags_t, tree);
extern tree tsubst_copy_and_build (tree, tree, tsubst_flags_t,
tree, bool, bool);
extern tree tsubst_expr (tree, tree, tsubst_flags_t,
tree, bool);
extern tree tsubst_pack_expansion (tree, tree, tsubst_flags_t, tree);
extern tree most_general_template (tree);
extern tree get_mostly_instantiated_function_type (tree);
extern bool problematic_instantiation_changed (void);
extern void record_last_problematic_instantiation (void);
extern struct tinst_level *current_instantiation(void);
extern bool instantiating_current_function_p (void);
extern tree maybe_get_template_decl_from_type_decl (tree);
extern int processing_template_parmlist;
extern bool dependent_type_p (tree);
extern bool dependent_scope_p (tree);
extern bool any_dependent_template_arguments_p (const_tree);
extern bool any_erroneous_template_args_p (const_tree);
extern bool dependent_template_p (tree);
extern bool dependent_template_id_p (tree, tree);
extern bool type_dependent_expression_p (tree);
extern bool type_dependent_object_expression_p (tree);
extern bool any_type_dependent_arguments_p (const vec<tree, va_gc> *);
extern bool any_type_dependent_elements_p (const_tree);
extern bool type_dependent_expression_p_push (tree);
extern bool value_dependent_expression_p (tree);
extern bool instantiation_dependent_expression_p (tree);
extern bool instantiation_dependent_uneval_expression_p (tree);
extern bool any_value_dependent_elements_p (const_tree);
extern bool dependent_omp_for_p (tree, tree, tree, tree);
extern tree resolve_typename_type (tree, bool);
extern tree template_for_substitution (tree);
extern tree build_non_dependent_expr (tree);
extern void make_args_non_dependent (vec<tree, va_gc> *);
extern bool reregister_specialization (tree, tree, tree);
extern tree instantiate_non_dependent_expr (tree);
extern tree instantiate_non_dependent_expr_sfinae (tree, tsubst_flags_t);
extern tree instantiate_non_dependent_expr_internal (tree, tsubst_flags_t);
extern tree instantiate_non_dependent_or_null (tree);
extern bool variable_template_specialization_p (tree);
extern bool alias_type_or_template_p (tree);
extern bool alias_template_specialization_p (const_tree);
extern bool dependent_alias_template_spec_p (const_tree);
extern bool explicit_class_specialization_p (tree);
extern bool push_tinst_level (tree);
extern bool push_tinst_level_loc (tree, location_t);
extern void pop_tinst_level (void);
extern struct tinst_level *outermost_tinst_level(void);
extern void init_template_processing (void);
extern void print_template_statistics (void);
bool template_template_parameter_p (const_tree);
bool template_type_parameter_p (const_tree);
extern bool primary_template_specialization_p (const_tree);
extern tree get_primary_template_innermost_parameters (const_tree);
extern tree get_template_parms_at_level (tree, int);
extern tree get_template_innermost_arguments (const_tree);
extern tree get_template_argument_pack_elems (const_tree);
extern tree get_function_template_decl (const_tree);
extern tree resolve_nondeduced_context (tree, tsubst_flags_t);
extern hashval_t iterative_hash_template_arg (tree arg, hashval_t val);
extern tree coerce_template_parms (tree, tree, tree);
extern tree coerce_template_parms (tree, tree, tree, tsubst_flags_t);
extern void register_local_specialization (tree, tree);
extern tree retrieve_local_specialization (tree);
extern tree extract_fnparm_pack (tree, tree *);
extern tree template_parm_to_arg (tree);
extern tree dguide_name (tree);
extern bool dguide_name_p (tree);
extern bool deduction_guide_p (const_tree);
extern bool copy_guide_p (const_tree);
extern bool template_guide_p (const_tree);
/* in repo.c */
extern void init_repo (void);
extern int repo_emit_p (tree);
extern bool repo_export_class_p (const_tree);
extern void finish_repo (void);
/* in rtti.c */
/* A vector of all tinfo decls that haven't been emitted yet. */
extern GTY(()) vec<tree, va_gc> *unemitted_tinfo_decls;
extern void init_rtti_processing (void);
extern tree build_typeid (tree, tsubst_flags_t);
extern tree get_tinfo_decl (tree);
extern tree get_typeid (tree, tsubst_flags_t);
extern tree build_headof (tree);
extern tree build_dynamic_cast (tree, tree, tsubst_flags_t);
extern void emit_support_tinfos (void);
extern bool emit_tinfo_decl (tree);
/* in search.c */
extern bool accessible_base_p (tree, tree, bool);
extern tree lookup_base (tree, tree, base_access,
base_kind *, tsubst_flags_t);
extern tree dcast_base_hint (tree, tree);
extern int accessible_p (tree, tree, bool);
extern int accessible_in_template_p (tree, tree);
extern tree lookup_field (tree, tree, int, bool);
extern tree lookup_fnfields (tree, tree, int);
extern tree lookup_member (tree, tree, int, bool,
tsubst_flags_t,
access_failure_info *afi = NULL);
extern tree lookup_member_fuzzy (tree, tree, bool);
extern tree locate_field_accessor (tree, tree, bool);
extern int look_for_overrides (tree, tree);
extern void get_pure_virtuals (tree);
extern void maybe_suppress_debug_info (tree);
extern void note_debug_info_needed (tree);
extern tree current_scope (void);
extern int at_function_scope_p (void);
extern bool at_class_scope_p (void);
extern bool at_namespace_scope_p (void);
extern tree context_for_name_lookup (tree);
extern tree lookup_conversions (tree);
extern tree binfo_from_vbase (tree);
extern tree binfo_for_vbase (tree, tree);
extern tree look_for_overrides_here (tree, tree);
#define dfs_skip_bases ((tree)1)
extern tree dfs_walk_all (tree, tree (*) (tree, void *),
tree (*) (tree, void *), void *);
extern tree dfs_walk_once (tree, tree (*) (tree, void *),
tree (*) (tree, void *), void *);
extern tree binfo_via_virtual (tree, tree);
extern bool binfo_direct_p (tree);
extern tree build_baselink (tree, tree, tree, tree);
extern tree adjust_result_of_qualified_name_lookup
(tree, tree, tree);
extern tree copied_binfo (tree, tree);
extern tree original_binfo (tree, tree);
extern int shared_member_p (tree);
extern bool any_dependent_bases_p (tree = current_nonlambda_class_type ());
/* The representation of a deferred access check. */
struct GTY(()) deferred_access_check {
/* The base class in which the declaration is referenced. */
tree binfo;
/* The declaration whose access must be checked. */
tree decl;
/* The declaration that should be used in the error message. */
tree diag_decl;
/* The location of this access. */
location_t loc;
};
/* in semantics.c */
extern void push_deferring_access_checks (deferring_kind);
extern void resume_deferring_access_checks (void);
extern void stop_deferring_access_checks (void);
extern void pop_deferring_access_checks (void);
extern vec<deferred_access_check, va_gc> *get_deferred_access_checks (void);
extern void reopen_deferring_access_checks (vec<deferred_access_check, va_gc> *);
extern void pop_to_parent_deferring_access_checks (void);
extern bool perform_access_checks (vec<deferred_access_check, va_gc> *,
tsubst_flags_t);
extern bool perform_deferred_access_checks (tsubst_flags_t);
extern bool perform_or_defer_access_check (tree, tree, tree,
tsubst_flags_t,
access_failure_info *afi = NULL);
/* RAII sentinel to ensures that deferred access checks are popped before
a function returns. */
struct deferring_access_check_sentinel
{
deferring_access_check_sentinel (enum deferring_kind kind = dk_deferred)
{
push_deferring_access_checks (kind);
}
~deferring_access_check_sentinel ()
{
pop_deferring_access_checks ();
}
};
extern int stmts_are_full_exprs_p (void);
extern void init_cp_semantics (void);
extern tree do_poplevel (tree);
extern void break_maybe_infinite_loop (void);
extern void add_decl_expr (tree);
extern tree maybe_cleanup_point_expr_void (tree);
extern tree finish_expr_stmt (tree);
extern tree begin_if_stmt (void);
extern tree finish_if_stmt_cond (tree, tree);
extern tree finish_then_clause (tree);
extern void begin_else_clause (tree);
extern void finish_else_clause (tree);
extern void finish_if_stmt (tree);
extern tree begin_while_stmt (void);
extern void finish_while_stmt_cond (tree, tree, bool, unsigned short);
extern void finish_while_stmt (tree);
extern tree begin_do_stmt (void);
extern void finish_do_body (tree);
extern void finish_do_stmt (tree, tree, bool, unsigned short);
extern tree finish_return_stmt (tree);
extern tree begin_for_scope (tree *);
extern tree begin_for_stmt (tree, tree);
extern void finish_init_stmt (tree);
extern void finish_for_cond (tree, tree, bool, unsigned short);
extern void finish_for_expr (tree, tree);
extern void finish_for_stmt (tree);
extern tree begin_range_for_stmt (tree, tree);
extern void finish_range_for_decl (tree, tree, tree);
extern void finish_range_for_stmt (tree);
extern tree finish_break_stmt (void);
extern tree finish_continue_stmt (void);
extern tree begin_switch_stmt (void);
extern void finish_switch_cond (tree, tree);
extern void finish_switch_stmt (tree);
extern tree finish_goto_stmt (tree);
extern tree begin_try_block (void);
extern void finish_try_block (tree);
extern void finish_handler_sequence (tree);
extern tree begin_function_try_block (tree *);
extern void finish_function_try_block (tree);
extern void finish_function_handler_sequence (tree, tree);
extern void finish_cleanup_try_block (tree);
extern tree begin_handler (void);
extern void finish_handler_parms (tree, tree);
extern void finish_handler (tree);
extern void finish_cleanup (tree, tree);
extern bool is_this_parameter (tree);
enum {
BCS_NORMAL = 0,
BCS_NO_SCOPE = 1,
BCS_TRY_BLOCK = 2,
BCS_FN_BODY = 4,
BCS_TRANSACTION = 8
};
extern tree begin_compound_stmt (unsigned int);
extern void finish_compound_stmt (tree);
extern tree finish_asm_stmt (int, tree, tree, tree, tree,
tree);
extern tree finish_label_stmt (tree);
extern void finish_label_decl (tree);
extern cp_expr finish_parenthesized_expr (cp_expr);
extern tree force_paren_expr (tree);
extern tree maybe_undo_parenthesized_ref (tree);
extern tree finish_non_static_data_member (tree, tree, tree);
extern tree begin_stmt_expr (void);
extern tree finish_stmt_expr_expr (tree, tree);
extern tree finish_stmt_expr (tree, bool);
extern tree stmt_expr_value_expr (tree);
bool empty_expr_stmt_p (tree);
extern cp_expr perform_koenig_lookup (cp_expr, vec<tree, va_gc> *,
tsubst_flags_t);
extern tree finish_call_expr (tree, vec<tree, va_gc> **, bool,
bool, tsubst_flags_t);
extern tree lookup_and_finish_template_variable (tree, tree, tsubst_flags_t = tf_warning_or_error);
extern tree finish_template_variable (tree, tsubst_flags_t = tf_warning_or_error);
extern cp_expr finish_increment_expr (cp_expr, enum tree_code);
extern tree finish_this_expr (void);
extern tree finish_pseudo_destructor_expr (tree, tree, tree, location_t);
extern cp_expr finish_unary_op_expr (location_t, enum tree_code, cp_expr,
tsubst_flags_t);
/* Whether this call to finish_compound_literal represents a C++11 functional
cast or a C99 compound literal. */
enum fcl_t { fcl_functional, fcl_c99 };
extern tree finish_compound_literal (tree, tree, tsubst_flags_t, fcl_t = fcl_functional);
extern tree finish_fname (tree);
extern void finish_translation_unit (void);
extern tree finish_template_type_parm (tree, tree);
extern tree finish_template_template_parm (tree, tree);
extern tree begin_class_definition (tree);
extern void finish_template_decl (tree);
extern tree finish_template_type (tree, tree, int);
extern tree finish_base_specifier (tree, tree, bool);
extern void finish_member_declaration (tree);
extern bool outer_automatic_var_p (tree);
extern tree process_outer_var_ref (tree, tsubst_flags_t, bool force_use = false);
extern cp_expr finish_id_expression (tree, tree, tree,
cp_id_kind *,
bool, bool, bool *,
bool, bool, bool, bool,
const char **,
location_t);
extern tree finish_typeof (tree);
extern tree finish_underlying_type (tree);
extern tree calculate_bases (tree, tsubst_flags_t);
extern tree finish_bases (tree, bool);
extern tree calculate_direct_bases (tree, tsubst_flags_t);
extern tree finish_offsetof (tree, tree, location_t);
extern void finish_decl_cleanup (tree, tree);
extern void finish_eh_cleanup (tree);
extern void emit_associated_thunks (tree);
extern void finish_mem_initializers (tree);
extern tree check_template_template_default_arg (tree);
extern bool expand_or_defer_fn_1 (tree);
extern void expand_or_defer_fn (tree);
extern void add_typedef_to_current_template_for_access_check (tree, tree,
location_t);
extern void check_accessibility_of_qualified_id (tree, tree, tree);
extern tree finish_qualified_id_expr (tree, tree, bool, bool,
bool, bool, tsubst_flags_t);
extern void simplify_aggr_init_expr (tree *);
extern void finalize_nrv (tree *, tree, tree);
extern tree omp_reduction_id (enum tree_code, tree, tree);
extern tree cp_remove_omp_priv_cleanup_stmt (tree *, int *, void *);
extern void cp_check_omp_declare_reduction (tree);
extern void finish_omp_declare_simd_methods (tree);
extern tree finish_omp_clauses (tree, enum c_omp_region_type);
extern tree push_omp_privatization_clauses (bool);
extern void pop_omp_privatization_clauses (tree);
extern void save_omp_privatization_clauses (vec<tree> &);
extern void restore_omp_privatization_clauses (vec<tree> &);
extern void finish_omp_threadprivate (tree);
extern tree begin_omp_structured_block (void);
extern tree finish_omp_structured_block (tree);
extern tree finish_oacc_data (tree, tree);
extern tree finish_oacc_host_data (tree, tree);
extern tree finish_omp_construct (enum tree_code, tree, tree);
extern tree begin_omp_parallel (void);
extern tree finish_omp_parallel (tree, tree);
extern tree begin_omp_task (void);
extern tree finish_omp_task (tree, tree);
extern tree finish_omp_for (location_t, enum tree_code,
tree, tree, tree, tree, tree,
tree, tree, vec<tree> *, tree);
extern void finish_omp_atomic (enum tree_code, enum tree_code,
tree, tree, tree, tree, tree,
bool);
extern void finish_omp_barrier (void);
extern void finish_omp_flush (void);
extern void finish_omp_taskwait (void);
extern void finish_omp_taskyield (void);
extern void finish_omp_cancel (tree);
extern void finish_omp_cancellation_point (tree);
extern tree omp_privatize_field (tree, bool);
extern tree begin_transaction_stmt (location_t, tree *, int);
extern void finish_transaction_stmt (tree, tree, int, tree);
extern tree build_transaction_expr (location_t, tree, int, tree);
extern bool cxx_omp_create_clause_info (tree, tree, bool, bool,
bool, bool);
extern tree baselink_for_fns (tree);
extern void finish_static_assert (tree, tree, location_t,
bool);
extern tree finish_decltype_type (tree, bool, tsubst_flags_t);
extern tree finish_trait_expr (enum cp_trait_kind, tree, tree);
extern tree build_lambda_expr (void);
extern tree build_lambda_object (tree);
extern tree begin_lambda_type (tree);
extern tree lambda_capture_field_type (tree, bool, bool);
extern tree lambda_return_type (tree);
extern tree lambda_proxy_type (tree);
extern tree lambda_function (tree);
extern void apply_deduced_return_type (tree, tree);
extern tree add_capture (tree, tree, tree, bool, bool);
extern tree add_default_capture (tree, tree, tree);
extern void insert_capture_proxy (tree);
extern void insert_pending_capture_proxies (void);
extern bool is_capture_proxy (tree);
extern bool is_normal_capture_proxy (tree);
extern bool is_constant_capture_proxy (tree);
extern void register_capture_members (tree);
extern tree lambda_expr_this_capture (tree, bool);
extern void maybe_generic_this_capture (tree, tree);
extern tree maybe_resolve_dummy (tree, bool);
extern tree current_nonlambda_function (void);
extern tree nonlambda_method_basetype (void);
extern tree current_nonlambda_scope (void);
extern tree current_lambda_expr (void);
extern bool generic_lambda_fn_p (tree);
extern tree do_dependent_capture (tree, bool = false);
extern bool lambda_fn_in_template_p (tree);
extern void maybe_add_lambda_conv_op (tree);
extern bool is_lambda_ignored_entity (tree);
extern bool lambda_static_thunk_p (tree);
extern tree finish_builtin_launder (location_t, tree,
tsubst_flags_t);
extern void start_lambda_scope (tree);
extern void record_lambda_scope (tree);
extern void record_null_lambda_scope (tree);
extern void finish_lambda_scope (void);
extern tree start_lambda_function (tree fn, tree lambda_expr);
extern void finish_lambda_function (tree body);
/* in tree.c */
extern int cp_tree_operand_length (const_tree);
extern int cp_tree_code_length (enum tree_code);
extern void cp_free_lang_data (tree t);
extern tree force_target_expr (tree, tree, tsubst_flags_t);
extern tree build_target_expr_with_type (tree, tree, tsubst_flags_t);
extern void lang_check_failed (const char *, int,
const char *) ATTRIBUTE_NORETURN
ATTRIBUTE_COLD;
extern tree stabilize_expr (tree, tree *);
extern void stabilize_call (tree, tree *);
extern bool stabilize_init (tree, tree *);
extern tree add_stmt_to_compound (tree, tree);
extern void init_tree (void);
extern bool pod_type_p (const_tree);
extern bool layout_pod_type_p (const_tree);
extern bool std_layout_type_p (const_tree);
extern bool trivial_type_p (const_tree);
extern bool trivially_copyable_p (const_tree);
extern bool type_has_unique_obj_representations (const_tree);
extern bool scalarish_type_p (const_tree);
extern bool type_has_nontrivial_default_init (const_tree);
extern bool type_has_nontrivial_copy_init (const_tree);
extern void maybe_warn_parm_abi (tree, location_t);
extern bool class_tmpl_impl_spec_p (const_tree);
extern int zero_init_p (const_tree);
extern bool check_abi_tag_redeclaration (const_tree, const_tree,
const_tree);
extern bool check_abi_tag_args (tree, tree);
extern tree strip_typedefs (tree, bool * = NULL);
extern tree strip_typedefs_expr (tree, bool * = NULL);
extern tree copy_binfo (tree, tree, tree,
tree *, int);
extern int member_p (const_tree);
extern cp_lvalue_kind real_lvalue_p (const_tree);
extern cp_lvalue_kind lvalue_kind (const_tree);
extern bool glvalue_p (const_tree);
extern bool obvalue_p (const_tree);
extern bool xvalue_p (const_tree);
extern bool bitfield_p (const_tree);
extern tree cp_stabilize_reference (tree);
extern bool builtin_valid_in_constant_expr_p (const_tree);
extern tree build_min (enum tree_code, tree, ...);
extern tree build_min_nt_loc (location_t, enum tree_code,
...);
extern tree build_min_non_dep (enum tree_code, tree, ...);
extern tree build_min_non_dep_op_overload (enum tree_code, tree, tree, ...);
extern tree build_min_nt_call_vec (tree, vec<tree, va_gc> *);
extern tree build_min_non_dep_call_vec (tree, tree, vec<tree, va_gc> *);
extern vec<tree, va_gc>* vec_copy_and_insert (vec<tree, va_gc>*, tree, unsigned);
extern tree build_cplus_new (tree, tree, tsubst_flags_t);
extern tree build_aggr_init_expr (tree, tree);
extern tree get_target_expr (tree);
extern tree get_target_expr_sfinae (tree, tsubst_flags_t);
extern tree build_cplus_array_type (tree, tree);
extern tree build_array_of_n_type (tree, int);
extern bool array_of_runtime_bound_p (tree);
extern bool vla_type_p (tree);
extern tree build_array_copy (tree);
extern tree build_vec_init_expr (tree, tree, tsubst_flags_t);
extern void diagnose_non_constexpr_vec_init (tree);
extern tree hash_tree_cons (tree, tree, tree);
extern tree hash_tree_chain (tree, tree);
extern tree build_qualified_name (tree, tree, tree, bool);
extern tree build_ref_qualified_type (tree, cp_ref_qualifier);
inline tree ovl_first (tree) ATTRIBUTE_PURE;
extern tree ovl_make (tree fn,
tree next = NULL_TREE);
extern tree ovl_insert (tree fn, tree maybe_ovl,
bool using_p = false);
extern tree ovl_skip_hidden (tree) ATTRIBUTE_PURE;
extern void lookup_mark (tree lookup, bool val);
extern tree lookup_add (tree fns, tree lookup);
extern tree lookup_maybe_add (tree fns, tree lookup,
bool deduping);
extern void lookup_keep (tree lookup, bool keep);
extern void lookup_list_keep (tree list, bool keep);
extern int is_overloaded_fn (tree) ATTRIBUTE_PURE;
extern bool really_overloaded_fn (tree) ATTRIBUTE_PURE;
extern tree dependent_name (tree);
extern tree get_fns (tree) ATTRIBUTE_PURE;
extern tree get_first_fn (tree) ATTRIBUTE_PURE;
extern tree ovl_scope (tree);
extern const char *cxx_printable_name (tree, int);
extern const char *cxx_printable_name_translate (tree, int);
extern tree canonical_eh_spec (tree);
extern tree build_exception_variant (tree, tree);
extern tree bind_template_template_parm (tree, tree);
extern tree array_type_nelts_total (tree);
extern tree array_type_nelts_top (tree);
extern tree break_out_target_exprs (tree, bool = false);
extern tree build_ctor_subob_ref (tree, tree, tree);
extern tree replace_placeholders (tree, tree, bool * = NULL);
extern bool find_placeholders (tree);
extern tree get_type_decl (tree);
extern tree decl_namespace_context (tree);
extern bool decl_anon_ns_mem_p (const_tree);
extern tree lvalue_type (tree);
extern tree error_type (tree);
extern int varargs_function_p (const_tree);
extern bool cp_tree_equal (tree, tree);
extern tree no_linkage_check (tree, bool);
extern void debug_binfo (tree);
extern tree build_dummy_object (tree);
extern tree maybe_dummy_object (tree, tree *);
extern int is_dummy_object (const_tree);
extern const struct attribute_spec cxx_attribute_table[];
extern tree make_ptrmem_cst (tree, tree);
extern tree cp_build_type_attribute_variant (tree, tree);
extern tree cp_build_reference_type (tree, bool);
extern tree move (tree);
extern tree cp_build_qualified_type_real (tree, int, tsubst_flags_t);
#define cp_build_qualified_type(TYPE, QUALS) \
cp_build_qualified_type_real ((TYPE), (QUALS), tf_warning_or_error)
extern bool cv_qualified_p (const_tree);
extern tree cv_unqualified (tree);
extern special_function_kind special_function_p (const_tree);
extern int count_trees (tree);
extern int char_type_p (tree);
extern void verify_stmt_tree (tree);
extern linkage_kind decl_linkage (tree);
extern duration_kind decl_storage_duration (tree);
extern tree cp_walk_subtrees (tree*, int*, walk_tree_fn,
void*, hash_set<tree> *);
#define cp_walk_tree(tp,func,data,pset) \
walk_tree_1 (tp, func, data, pset, cp_walk_subtrees)
#define cp_walk_tree_without_duplicates(tp,func,data) \
walk_tree_without_duplicates_1 (tp, func, data, cp_walk_subtrees)
extern tree rvalue (tree);
extern tree convert_bitfield_to_declared_type (tree);
extern tree cp_save_expr (tree);
extern bool cast_valid_in_integral_constant_expression_p (tree);
extern bool cxx_type_hash_eq (const_tree, const_tree);
extern tree cxx_copy_lang_qualifiers (const_tree, const_tree);
extern void cxx_print_statistics (void);
extern bool maybe_warn_zero_as_null_pointer_constant (tree, location_t);
extern void cp_warn_deprecated_use (tree);
/* in ptree.c */
extern void cxx_print_xnode (FILE *, tree, int);
extern void cxx_print_decl (FILE *, tree, int);
extern void cxx_print_type (FILE *, tree, int);
extern void cxx_print_identifier (FILE *, tree, int);
extern void cxx_print_error_function (diagnostic_context *,
const char *,
struct diagnostic_info *);
/* in typeck.c */
extern bool cxx_mark_addressable (tree, bool = false);
extern int string_conv_p (const_tree, const_tree, int);
extern tree cp_truthvalue_conversion (tree);
extern tree condition_conversion (tree);
extern tree require_complete_type (tree);
extern tree require_complete_type_sfinae (tree, tsubst_flags_t);
extern tree complete_type (tree);
extern tree complete_type_or_else (tree, tree);
extern tree complete_type_or_maybe_complain (tree, tree, tsubst_flags_t);
inline bool type_unknown_p (const_tree);
enum { ce_derived, ce_type, ce_normal, ce_exact };
extern bool comp_except_specs (const_tree, const_tree, int);
extern bool comptypes (tree, tree, int);
extern bool same_type_ignoring_top_level_qualifiers_p (tree, tree);
extern bool compparms (const_tree, const_tree);
extern int comp_cv_qualification (const_tree, const_tree);
extern int comp_cv_qualification (int, int);
extern int comp_cv_qual_signature (tree, tree);
extern tree cxx_sizeof_or_alignof_expr (tree, enum tree_code, bool);
extern tree cxx_sizeof_or_alignof_type (tree, enum tree_code, bool, bool);
extern tree cxx_alignas_expr (tree);
extern tree cxx_sizeof_nowarn (tree);
extern tree is_bitfield_expr_with_lowered_type (const_tree);
extern tree unlowered_expr_type (const_tree);
extern tree decay_conversion (tree,
tsubst_flags_t,
bool = true);
extern tree build_class_member_access_expr (cp_expr, tree, tree, bool,
tsubst_flags_t);
extern tree finish_class_member_access_expr (cp_expr, tree, bool,
tsubst_flags_t);
extern tree build_x_indirect_ref (location_t, tree,
ref_operator, tsubst_flags_t);
extern tree cp_build_indirect_ref (tree, ref_operator,
tsubst_flags_t);
extern tree cp_build_fold_indirect_ref (tree);
extern tree build_array_ref (location_t, tree, tree);
extern tree cp_build_array_ref (location_t, tree, tree,
tsubst_flags_t);
extern tree get_member_function_from_ptrfunc (tree *, tree, tsubst_flags_t);
extern tree cp_build_function_call_nary (tree, tsubst_flags_t, ...)
ATTRIBUTE_SENTINEL;
extern tree cp_build_function_call_vec (tree, vec<tree, va_gc> **,
tsubst_flags_t);
extern tree build_x_binary_op (location_t,
enum tree_code, tree,
enum tree_code, tree,
enum tree_code, tree *,
tsubst_flags_t);
extern tree build_x_array_ref (location_t, tree, tree,
tsubst_flags_t);
extern tree build_x_unary_op (location_t,
enum tree_code, cp_expr,
tsubst_flags_t);
extern tree cp_build_addressof (location_t, tree,
tsubst_flags_t);
extern tree cp_build_addr_expr (tree, tsubst_flags_t);
extern tree cp_build_unary_op (enum tree_code, tree, bool,
tsubst_flags_t);
extern tree genericize_compound_lvalue (tree);
extern tree unary_complex_lvalue (enum tree_code, tree);
extern tree build_x_conditional_expr (location_t, tree, tree, tree,
tsubst_flags_t);
extern tree build_x_compound_expr_from_list (tree, expr_list_kind,
tsubst_flags_t);
extern tree build_x_compound_expr_from_vec (vec<tree, va_gc> *,
const char *, tsubst_flags_t);
extern tree build_x_compound_expr (location_t, tree, tree,
tsubst_flags_t);
extern tree build_compound_expr (location_t, tree, tree);
extern tree cp_build_compound_expr (tree, tree, tsubst_flags_t);
extern tree build_static_cast (tree, tree, tsubst_flags_t);
extern tree build_reinterpret_cast (tree, tree, tsubst_flags_t);
extern tree build_const_cast (tree, tree, tsubst_flags_t);
extern tree build_c_cast (location_t, tree, tree);
extern cp_expr build_c_cast (location_t loc, tree type,
cp_expr expr);
extern tree cp_build_c_cast (tree, tree, tsubst_flags_t);
extern cp_expr build_x_modify_expr (location_t, tree,
enum tree_code, tree,
tsubst_flags_t);
extern tree cp_build_modify_expr (location_t, tree,
enum tree_code, tree,
tsubst_flags_t);
extern tree convert_for_initialization (tree, tree, tree, int,
impl_conv_rhs, tree, int,
tsubst_flags_t);
extern int comp_ptr_ttypes (tree, tree);
extern bool comp_ptr_ttypes_const (tree, tree);
extern bool error_type_p (const_tree);
extern bool ptr_reasonably_similar (const_tree, const_tree);
extern tree build_ptrmemfunc (tree, tree, int, bool,
tsubst_flags_t);
extern int cp_type_quals (const_tree);
extern int type_memfn_quals (const_tree);
extern cp_ref_qualifier type_memfn_rqual (const_tree);
extern tree apply_memfn_quals (tree, cp_cv_quals, cp_ref_qualifier);
extern bool cp_has_mutable_p (const_tree);
extern bool at_least_as_qualified_p (const_tree, const_tree);
extern void cp_apply_type_quals_to_decl (int, tree);
extern tree build_ptrmemfunc1 (tree, tree, tree);
extern void expand_ptrmemfunc_cst (tree, tree *, tree *);
extern tree type_after_usual_arithmetic_conversions (tree, tree);
extern tree common_pointer_type (tree, tree);
extern tree composite_pointer_type (tree, tree, tree, tree,
composite_pointer_operation,
tsubst_flags_t);
extern tree merge_types (tree, tree);
extern tree strip_array_domain (tree);
extern tree check_return_expr (tree, bool *);
extern tree cp_build_binary_op (location_t,
enum tree_code, tree, tree,
tsubst_flags_t);
extern tree build_x_vec_perm_expr (location_t,
tree, tree, tree,
tsubst_flags_t);
#define cxx_sizeof(T) cxx_sizeof_or_alignof_type (T, SIZEOF_EXPR, false, true)
extern tree build_simple_component_ref (tree, tree);
extern tree build_ptrmemfunc_access_expr (tree, tree);
extern tree build_address (tree);
extern tree build_nop (tree, tree);
extern tree non_reference (tree);
extern tree lookup_anon_field (tree, tree);
extern bool invalid_nonstatic_memfn_p (location_t, tree,
tsubst_flags_t);
extern tree convert_member_func_to_ptr (tree, tree, tsubst_flags_t);
extern tree convert_ptrmem (tree, tree, bool, bool,
tsubst_flags_t);
extern int lvalue_or_else (tree, enum lvalue_use,
tsubst_flags_t);
extern void check_template_keyword (tree);
extern bool check_raw_literal_operator (const_tree decl);
extern bool check_literal_operator_args (const_tree, bool *, bool *);
extern void maybe_warn_about_useless_cast (tree, tree, tsubst_flags_t);
extern tree cp_perform_integral_promotions (tree, tsubst_flags_t);
extern tree finish_left_unary_fold_expr (tree, int);
extern tree finish_right_unary_fold_expr (tree, int);
extern tree finish_binary_fold_expr (tree, tree, int);
/* in typeck2.c */
extern void require_complete_eh_spec_types (tree, tree);
extern void cxx_incomplete_type_diagnostic (location_t, const_tree,
const_tree, diagnostic_t);
inline void
cxx_incomplete_type_diagnostic (const_tree value, const_tree type,
diagnostic_t diag_kind)
{
cxx_incomplete_type_diagnostic (EXPR_LOC_OR_LOC (value, input_location),
value, type, diag_kind);
}
extern void cxx_incomplete_type_error (location_t, const_tree,
const_tree);
inline void
cxx_incomplete_type_error (const_tree value, const_tree type)
{
cxx_incomplete_type_diagnostic (value, type, DK_ERROR);
}
extern void cxx_incomplete_type_inform (const_tree);
extern tree error_not_base_type (tree, tree);
extern tree binfo_or_else (tree, tree);
extern void cxx_readonly_error (tree, enum lvalue_use);
extern void complete_type_check_abstract (tree);
extern int abstract_virtuals_error (tree, tree);
extern int abstract_virtuals_error (abstract_class_use, tree);
extern int abstract_virtuals_error_sfinae (tree, tree, tsubst_flags_t);
extern int abstract_virtuals_error_sfinae (abstract_class_use, tree, tsubst_flags_t);
extern tree store_init_value (tree, tree, vec<tree, va_gc>**, int);
extern tree split_nonconstant_init (tree, tree);
extern bool check_narrowing (tree, tree, tsubst_flags_t);
extern tree digest_init (tree, tree, tsubst_flags_t);
extern tree digest_init_flags (tree, tree, int, tsubst_flags_t);
extern tree digest_nsdmi_init (tree, tree, tsubst_flags_t);
extern tree build_scoped_ref (tree, tree, tree *);
extern tree build_x_arrow (location_t, tree,
tsubst_flags_t);
extern tree build_m_component_ref (tree, tree, tsubst_flags_t);
extern tree build_functional_cast (tree, tree, tsubst_flags_t);
extern tree add_exception_specifier (tree, tree, int);
extern tree merge_exception_specifiers (tree, tree);
/* in mangle.c */
extern void init_mangle (void);
extern void mangle_decl (tree);
extern const char *mangle_type_string (tree);
extern tree mangle_typeinfo_for_type (tree);
extern tree mangle_typeinfo_string_for_type (tree);
extern tree mangle_vtbl_for_type (tree);
extern tree mangle_vtt_for_type (tree);
extern tree mangle_ctor_vtbl_for_type (tree, tree);
extern tree mangle_thunk (tree, int, tree, tree, tree);
extern tree mangle_guard_variable (tree);
extern tree mangle_tls_init_fn (tree);
extern tree mangle_tls_wrapper_fn (tree);
extern bool decl_tls_wrapper_p (tree);
extern tree mangle_ref_init_variable (tree);
extern char * get_mangled_vtable_map_var_name (tree);
extern bool mangle_return_type_p (tree);
extern tree mangle_decomp (tree, vec<tree> &);
/* in dump.c */
extern bool cp_dump_tree (void *, tree);
/* In cp/cp-objcp-common.c. */
extern alias_set_type cxx_get_alias_set (tree);
extern bool cxx_warn_unused_global_decl (const_tree);
extern size_t cp_tree_size (enum tree_code);
extern bool cp_var_mod_type_p (tree, tree);
extern void cxx_initialize_diagnostics (diagnostic_context *);
extern int cxx_types_compatible_p (tree, tree);
extern void init_shadowed_var_for_decl (void);
extern bool cxx_block_may_fallthru (const_tree);
/* in cp-gimplify.c */
extern int cp_gimplify_expr (tree *, gimple_seq *,
gimple_seq *);
extern void cp_genericize (tree);
extern bool cxx_omp_const_qual_no_mutable (tree);
extern enum omp_clause_default_kind cxx_omp_predetermined_sharing_1 (tree);
extern enum omp_clause_default_kind cxx_omp_predetermined_sharing (tree);
extern tree cxx_omp_clause_default_ctor (tree, tree, tree);
extern tree cxx_omp_clause_copy_ctor (tree, tree, tree);
extern tree cxx_omp_clause_assign_op (tree, tree, tree);
extern tree cxx_omp_clause_dtor (tree, tree);
extern void cxx_omp_finish_clause (tree, gimple_seq *);
extern bool cxx_omp_privatize_by_reference (const_tree);
extern bool cxx_omp_disregard_value_expr (tree, bool);
extern void cp_fold_function (tree);
extern tree cp_fully_fold (tree);
extern void clear_fold_cache (void);
/* in name-lookup.c */
extern void suggest_alternatives_for (location_t, tree, bool);
extern bool suggest_alternative_in_explicit_scope (location_t, tree, tree);
extern tree strip_using_decl (tree);
/* Tell the binding oracle what kind of binding we are looking for. */
enum cp_oracle_request
{
CP_ORACLE_IDENTIFIER
};
/* If this is non-NULL, then it is a "binding oracle" which can lazily
create bindings when needed by the C compiler. The oracle is told
the name and type of the binding to create. It can call pushdecl
or the like to ensure the binding is visible; or do nothing,
leaving the binding untouched. c-decl.c takes note of when the
oracle has been called and will not call it again if it fails to
create a given binding. */
typedef void cp_binding_oracle_function (enum cp_oracle_request, tree identifier);
extern cp_binding_oracle_function *cp_binding_oracle;
/* in constraint.cc */
extern void init_constraint_processing ();
extern bool constraint_p (tree);
extern tree conjoin_constraints (tree, tree);
extern tree conjoin_constraints (tree);
extern tree get_constraints (tree);
extern void set_constraints (tree, tree);
extern void remove_constraints (tree);
extern tree current_template_constraints (void);
extern tree associate_classtype_constraints (tree);
extern tree build_constraints (tree, tree);
extern tree get_shorthand_constraints (tree);
extern tree build_concept_check (tree, tree, tree = NULL_TREE);
extern tree build_constrained_parameter (tree, tree, tree = NULL_TREE);
extern tree make_constrained_auto (tree, tree);
extern void placeholder_extract_concept_and_args (tree, tree&, tree&);
extern bool equivalent_placeholder_constraints (tree, tree);
extern hashval_t hash_placeholder_constraint (tree);
extern bool deduce_constrained_parameter (tree, tree&, tree&);
extern tree resolve_constraint_check (tree);
extern tree check_function_concept (tree);
extern tree finish_template_introduction (tree, tree);
extern bool valid_requirements_p (tree);
extern tree finish_concept_name (tree);
extern tree finish_shorthand_constraint (tree, tree);
extern tree finish_requires_expr (tree, tree);
extern tree finish_simple_requirement (tree);
extern tree finish_type_requirement (tree);
extern tree finish_compound_requirement (tree, tree, bool);
extern tree finish_nested_requirement (tree);
extern void check_constrained_friend (tree, tree);
extern tree tsubst_requires_expr (tree, tree, tsubst_flags_t, tree);
extern tree tsubst_constraint (tree, tree, tsubst_flags_t, tree);
extern tree tsubst_constraint_info (tree, tree, tsubst_flags_t, tree);
extern bool function_concept_check_p (tree);
extern tree normalize_expression (tree);
extern tree expand_concept (tree, tree);
extern bool expanding_concept ();
extern tree evaluate_constraints (tree, tree);
extern tree evaluate_function_concept (tree, tree);
extern tree evaluate_variable_concept (tree, tree);
extern tree evaluate_constraint_expression (tree, tree);
extern bool constraints_satisfied_p (tree);
extern bool constraints_satisfied_p (tree, tree);
extern tree lookup_constraint_satisfaction (tree, tree);
extern tree memoize_constraint_satisfaction (tree, tree, tree);
extern tree lookup_concept_satisfaction (tree, tree);
extern tree memoize_concept_satisfaction (tree, tree, tree);
extern tree get_concept_expansion (tree, tree);
extern tree save_concept_expansion (tree, tree, tree);
extern bool* lookup_subsumption_result (tree, tree);
extern bool save_subsumption_result (tree, tree, bool);
extern bool equivalent_constraints (tree, tree);
extern bool equivalently_constrained (tree, tree);
extern bool subsumes_constraints (tree, tree);
extern bool strictly_subsumes (tree, tree);
extern int more_constrained (tree, tree);
extern void diagnose_constraints (location_t, tree, tree);
/* in logic.cc */
extern tree decompose_conclusions (tree);
extern bool subsumes (tree, tree);
/* In class.c */
extern void cp_finish_injected_record_type (tree);
/* in vtable-class-hierarchy.c */
extern void vtv_compute_class_hierarchy_transitive_closure (void);
extern void vtv_generate_init_routine (void);
extern void vtv_save_class_info (tree);
extern void vtv_recover_class_info (void);
extern void vtv_build_vtable_verify_fndecl (void);
/* In constexpr.c */
extern void fini_constexpr (void);
extern bool literal_type_p (tree);
extern tree register_constexpr_fundef (tree, tree);
extern bool is_valid_constexpr_fn (tree, bool);
extern bool check_constexpr_ctor_body (tree, tree, bool);
extern tree constexpr_fn_retval (tree);
extern tree ensure_literal_type_for_constexpr_object (tree);
extern bool potential_constant_expression (tree);
extern bool is_constant_expression (tree);
extern bool is_nondependent_constant_expression (tree);
extern bool is_nondependent_static_init_expression (tree);
extern bool is_static_init_expression (tree);
extern bool potential_rvalue_constant_expression (tree);
extern bool require_potential_constant_expression (tree);
extern bool require_constant_expression (tree);
extern bool require_rvalue_constant_expression (tree);
extern bool require_potential_rvalue_constant_expression (tree);
extern tree cxx_constant_value (tree, tree = NULL_TREE);
extern tree cxx_constant_init (tree, tree = NULL_TREE);
extern tree maybe_constant_value (tree, tree = NULL_TREE);
extern tree maybe_constant_init (tree, tree = NULL_TREE);
extern tree fold_non_dependent_expr (tree, tsubst_flags_t = tf_none);
extern tree fold_simple (tree);
extern bool is_sub_constant_expr (tree);
extern bool reduced_constant_expression_p (tree);
extern bool is_instantiation_of_constexpr (tree);
extern bool var_in_constexpr_fn (tree);
extern bool var_in_maybe_constexpr_fn (tree);
extern void explain_invalid_constexpr_fn (tree);
extern vec<tree> cx_error_context (void);
extern tree fold_sizeof_expr (tree);
extern void clear_cv_and_fold_caches (void);
/* In cp-ubsan.c */
extern void cp_ubsan_maybe_instrument_member_call (tree);
extern void cp_ubsan_instrument_member_accesses (tree *);
extern tree cp_ubsan_maybe_instrument_downcast (location_t, tree, tree, tree);
extern tree cp_ubsan_maybe_instrument_cast_to_vbase (location_t, tree, tree);
extern void cp_ubsan_maybe_initialize_vtbl_ptrs (tree);
/* Inline bodies. */
inline tree
ovl_first (tree node)
{
while (TREE_CODE (node) == OVERLOAD)
node = OVL_FUNCTION (node);
return node;
}
inline bool
type_unknown_p (const_tree expr)
{
return TREE_TYPE (expr) == unknown_type_node;
}
inline hashval_t
named_decl_hash::hash (const value_type decl)
{
tree name = OVL_NAME (decl);
return name ? IDENTIFIER_HASH_VALUE (name) : 0;
}
inline bool
named_decl_hash::equal (const value_type existing, compare_type candidate)
{
tree name = OVL_NAME (existing);
return candidate == name;
}
inline bool
null_node_p (const_tree expr)
{
STRIP_ANY_LOCATION_WRAPPER (expr);
return expr == null_node;
}
#if CHECKING_P
namespace selftest {
extern void run_cp_tests (void);
/* Declarations for specific families of tests within cp,
by source file, in alphabetical order. */
extern void cp_pt_c_tests ();
extern void cp_tree_c_tests (void);
} // namespace selftest
#endif /* #if CHECKING_P */
/* -- end of C++ */
#endif /* ! GCC_CP_TREE_H */
|
visual-effects.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% V V IIIII SSSSS U U AAA L %
% V V I SS U U A A L %
% V V I SSS U U AAAAA L %
% V V I SS U U A A L %
% V IIIII SSSSS UUU A A LLLLL %
% %
% EEEEE FFFFF FFFFF EEEEE CCCC TTTTT SSSSS %
% E F F E C T SS %
% EEE FFF FFF EEE C T SSS %
% E F F E C T SS %
% EEEEE F F EEEEE CCCC T SSSSS %
% %
% %
% MagickCore Image Special Effects Methods %
% %
% Software Design %
% Cristy %
% October 1996 %
% %
% %
% %
% Copyright @ 1999 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/accelerate-private.h"
#include "MagickCore/annotate.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/decorate.h"
#include "MagickCore/distort.h"
#include "MagickCore/draw.h"
#include "MagickCore/effect.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/gem-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/layer.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/property.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/random_.h"
#include "MagickCore/random-private.h"
#include "MagickCore/resample.h"
#include "MagickCore/resample-private.h"
#include "MagickCore/resize.h"
#include "MagickCore/resource_.h"
#include "MagickCore/splay-tree.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/threshold.h"
#include "MagickCore/transform.h"
#include "MagickCore/transform-private.h"
#include "MagickCore/utility.h"
#include "MagickCore/visual-effects.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A d d N o i s e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AddNoiseImage() adds random noise to the image.
%
% The format of the AddNoiseImage method is:
%
% Image *AddNoiseImage(const Image *image,const NoiseType noise_type,
% const double attenuate,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel type.
%
% o noise_type: The type of noise: Uniform, Gaussian, Multiplicative,
% Impulse, Laplacian, or Poisson.
%
% o attenuate: attenuate the random distribution.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AddNoiseImage(const Image *image,const NoiseType noise_type,
const double attenuate,ExceptionInfo *exception)
{
#define AddNoiseImageTag "AddNoise/Image"
CacheView
*image_view,
*noise_view;
Image
*noise_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RandomInfo
**magick_restrict random_info;
ssize_t
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
unsigned long
key;
#endif
/*
Initialize noise image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
noise_image=AccelerateAddNoiseImage(image,noise_type,attenuate,exception);
if (noise_image != (Image *) NULL)
return(noise_image);
#endif
noise_image=CloneImage(image,0,0,MagickTrue,exception);
if (noise_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(noise_image,DirectClass,exception) == MagickFalse)
{
noise_image=DestroyImage(noise_image);
return((Image *) NULL);
}
/*
Add noise in each row.
*/
status=MagickTrue;
progress=0;
random_info=AcquireRandomInfoTLS();
image_view=AcquireVirtualCacheView(image,exception);
noise_view=AcquireAuthenticCacheView(noise_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,noise_image,image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
const Quantum
*magick_restrict p;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(noise_view,0,y,noise_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait noise_traits=GetPixelChannelTraits(noise_image,channel);
if ((traits == UndefinedPixelTrait) ||
(noise_traits == UndefinedPixelTrait))
continue;
if ((noise_traits & CopyPixelTrait) != 0)
{
SetPixelChannel(noise_image,channel,p[i],q);
continue;
}
SetPixelChannel(noise_image,channel,ClampToQuantum(
GenerateDifferentialNoise(random_info[id],p[i],noise_type,attenuate)),
q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(noise_image);
}
sync=SyncCacheViewAuthenticPixels(noise_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,AddNoiseImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
noise_view=DestroyCacheView(noise_view);
image_view=DestroyCacheView(image_view);
random_info=DestroyRandomInfoTLS(random_info);
if (status == MagickFalse)
noise_image=DestroyImage(noise_image);
return(noise_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% B l u e S h i f t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BlueShiftImage() mutes the colors of the image to simulate a scene at
% nighttime in the moonlight.
%
% The format of the BlueShiftImage method is:
%
% Image *BlueShiftImage(const Image *image,const double factor,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o factor: the shift factor.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *BlueShiftImage(const Image *image,const double factor,
ExceptionInfo *exception)
{
#define BlueShiftImageTag "BlueShift/Image"
CacheView
*image_view,
*shift_view;
Image
*shift_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Allocate blue shift image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
shift_image=CloneImage(image,0,0,MagickTrue,exception);
if (shift_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(shift_image,DirectClass,exception) == MagickFalse)
{
shift_image=DestroyImage(shift_image);
return((Image *) NULL);
}
/*
Blue-shift DirectClass image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
shift_view=AcquireAuthenticCacheView(shift_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,shift_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
PixelInfo
pixel;
Quantum
quantum;
const Quantum
*magick_restrict p;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(shift_view,0,y,shift_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
quantum=GetPixelRed(image,p);
if (GetPixelGreen(image,p) < quantum)
quantum=GetPixelGreen(image,p);
if (GetPixelBlue(image,p) < quantum)
quantum=GetPixelBlue(image,p);
pixel.red=0.5*(GetPixelRed(image,p)+factor*quantum);
pixel.green=0.5*(GetPixelGreen(image,p)+factor*quantum);
pixel.blue=0.5*(GetPixelBlue(image,p)+factor*quantum);
quantum=GetPixelRed(image,p);
if (GetPixelGreen(image,p) > quantum)
quantum=GetPixelGreen(image,p);
if (GetPixelBlue(image,p) > quantum)
quantum=GetPixelBlue(image,p);
pixel.red=0.5*(pixel.red+factor*quantum);
pixel.green=0.5*(pixel.green+factor*quantum);
pixel.blue=0.5*(pixel.blue+factor*quantum);
SetPixelRed(shift_image,ClampToQuantum(pixel.red),q);
SetPixelGreen(shift_image,ClampToQuantum(pixel.green),q);
SetPixelBlue(shift_image,ClampToQuantum(pixel.blue),q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(shift_image);
}
sync=SyncCacheViewAuthenticPixels(shift_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,BlueShiftImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
shift_view=DestroyCacheView(shift_view);
if (status == MagickFalse)
shift_image=DestroyImage(shift_image);
return(shift_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C h a r c o a l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CharcoalImage() creates a new image that is a copy of an existing one with
% the edge highlighted. It allocates the memory necessary for the new Image
% structure and returns a pointer to the new image.
%
% The format of the CharcoalImage method is:
%
% Image *CharcoalImage(const Image *image,const double radius,
% const double sigma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the pixel neighborhood.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *CharcoalImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
Image
*charcoal_image,
*edge_image;
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
edge_image=EdgeImage(image,radius,exception);
if (edge_image == (Image *) NULL)
return((Image *) NULL);
edge_image->alpha_trait=UndefinedPixelTrait;
charcoal_image=(Image *) NULL;
status=ClampImage(edge_image,exception);
if (status != MagickFalse)
charcoal_image=BlurImage(edge_image,radius,sigma,exception);
edge_image=DestroyImage(edge_image);
if (charcoal_image == (Image *) NULL)
return((Image *) NULL);
status=NormalizeImage(charcoal_image,exception);
if (status != MagickFalse)
status=NegateImage(charcoal_image,MagickFalse,exception);
if (status != MagickFalse)
status=GrayscaleImage(charcoal_image,image->intensity,exception);
if (status == MagickFalse)
charcoal_image=DestroyImage(charcoal_image);
return(charcoal_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o l o r i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ColorizeImage() blends the fill color with each pixel in the image.
% A percentage blend is specified with opacity. Control the application
% of different color components by specifying a different percentage for
% each component (e.g. 90/100/10 is 90% red, 100% green, and 10% blue).
%
% The format of the ColorizeImage method is:
%
% Image *ColorizeImage(const Image *image,const char *blend,
% const PixelInfo *colorize,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o blend: A character string indicating the level of blending as a
% percentage.
%
% o colorize: A color value.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ColorizeImage(const Image *image,const char *blend,
const PixelInfo *colorize,ExceptionInfo *exception)
{
#define ColorizeImageTag "Colorize/Image"
#define Colorize(pixel,blend_percentage,colorize) \
(((pixel)*(100.0-(blend_percentage))+(colorize)*(blend_percentage))/100.0)
CacheView
*image_view;
GeometryInfo
geometry_info;
Image
*colorize_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickStatusType
flags;
PixelInfo
blend_percentage;
ssize_t
y;
/*
Allocate colorized image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
colorize_image=CloneImage(image,0,0,MagickTrue,exception);
if (colorize_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(colorize_image,DirectClass,exception) == MagickFalse)
{
colorize_image=DestroyImage(colorize_image);
return((Image *) NULL);
}
if ((IsGrayColorspace(colorize_image->colorspace) != MagickFalse) ||
(IsPixelInfoGray(colorize) != MagickFalse))
(void) SetImageColorspace(colorize_image,sRGBColorspace,exception);
if ((colorize_image->alpha_trait == UndefinedPixelTrait) &&
(colorize->alpha_trait != UndefinedPixelTrait))
(void) SetImageAlpha(colorize_image,OpaqueAlpha,exception);
if (blend == (const char *) NULL)
return(colorize_image);
GetPixelInfo(colorize_image,&blend_percentage);
flags=ParseGeometry(blend,&geometry_info);
blend_percentage.red=geometry_info.rho;
blend_percentage.green=geometry_info.rho;
blend_percentage.blue=geometry_info.rho;
blend_percentage.black=geometry_info.rho;
blend_percentage.alpha=(MagickRealType) TransparentAlpha;
if ((flags & SigmaValue) != 0)
blend_percentage.green=geometry_info.sigma;
if ((flags & XiValue) != 0)
blend_percentage.blue=geometry_info.xi;
if ((flags & PsiValue) != 0)
blend_percentage.alpha=geometry_info.psi;
if (blend_percentage.colorspace == CMYKColorspace)
{
if ((flags & PsiValue) != 0)
blend_percentage.black=geometry_info.psi;
if ((flags & ChiValue) != 0)
blend_percentage.alpha=geometry_info.chi;
}
/*
Colorize DirectClass image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(colorize_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(colorize_image,colorize_image,colorize_image->rows,1)
#endif
for (y=0; y < (ssize_t) colorize_image->rows; y++)
{
MagickBooleanType
sync;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,colorize_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) colorize_image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(colorize_image); i++)
{
PixelTrait traits = GetPixelChannelTraits(colorize_image,
(PixelChannel) i);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & CopyPixelTrait) != 0)
continue;
SetPixelChannel(colorize_image,(PixelChannel) i,ClampToQuantum(
Colorize(q[i],GetPixelInfoChannel(&blend_percentage,(PixelChannel) i),
GetPixelInfoChannel(colorize,(PixelChannel) i))),q);
}
q+=GetPixelChannels(colorize_image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ColorizeImageTag,progress,
colorize_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
colorize_image=DestroyImage(colorize_image);
return(colorize_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o l o r M a t r i x I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ColorMatrixImage() applies color transformation to an image. This method
% permits saturation changes, hue rotation, luminance to alpha, and various
% other effects. Although variable-sized transformation matrices can be used,
% typically one uses a 5x5 matrix for an RGBA image and a 6x6 for CMYKA
% (or RGBA with offsets). The matrix is similar to those used by Adobe Flash
% except offsets are in column 6 rather than 5 (in support of CMYKA images)
% and offsets are normalized (divide Flash offset by 255).
%
% The format of the ColorMatrixImage method is:
%
% Image *ColorMatrixImage(const Image *image,
% const KernelInfo *color_matrix,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o color_matrix: the color matrix.
%
% o exception: return any errors or warnings in this structure.
%
*/
/* FUTURE: modify to make use of a MagickMatrix Mutliply function
That should be provided in "matrix.c"
(ASIDE: actually distorts should do this too but currently doesn't)
*/
MagickExport Image *ColorMatrixImage(const Image *image,
const KernelInfo *color_matrix,ExceptionInfo *exception)
{
#define ColorMatrixImageTag "ColorMatrix/Image"
CacheView
*color_view,
*image_view;
double
ColorMatrix[6][6] =
{
{ 1.0, 0.0, 0.0, 0.0, 0.0, 0.0 },
{ 0.0, 1.0, 0.0, 0.0, 0.0, 0.0 },
{ 0.0, 0.0, 1.0, 0.0, 0.0, 0.0 },
{ 0.0, 0.0, 0.0, 1.0, 0.0, 0.0 },
{ 0.0, 0.0, 0.0, 0.0, 1.0, 0.0 },
{ 0.0, 0.0, 0.0, 0.0, 0.0, 1.0 }
};
Image
*color_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
i;
ssize_t
u,
v,
y;
/*
Map given color_matrix, into a 6x6 matrix RGBKA and a constant
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
i=0;
for (v=0; v < (ssize_t) color_matrix->height; v++)
for (u=0; u < (ssize_t) color_matrix->width; u++)
{
if ((v < 6) && (u < 6))
ColorMatrix[v][u]=color_matrix->values[i];
i++;
}
/*
Initialize color image.
*/
color_image=CloneImage(image,0,0,MagickTrue,exception);
if (color_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(color_image,DirectClass,exception) == MagickFalse)
{
color_image=DestroyImage(color_image);
return((Image *) NULL);
}
if (image->debug != MagickFalse)
{
char
format[MagickPathExtent],
*message;
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" ColorMatrix image with color matrix:");
message=AcquireString("");
for (v=0; v < 6; v++)
{
*message='\0';
(void) FormatLocaleString(format,MagickPathExtent,"%.20g: ",(double) v);
(void) ConcatenateString(&message,format);
for (u=0; u < 6; u++)
{
(void) FormatLocaleString(format,MagickPathExtent,"%+f ",
ColorMatrix[v][u]);
(void) ConcatenateString(&message,format);
}
(void) LogMagickEvent(TransformEvent,GetMagickModule(),"%s",message);
}
message=DestroyString(message);
}
/*
Apply the ColorMatrix to image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
color_view=AcquireAuthenticCacheView(color_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,color_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
PixelInfo
pixel;
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=GetCacheViewAuthenticPixels(color_view,0,y,color_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
GetPixelInfo(image,&pixel);
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
h;
size_t
height;
GetPixelInfoPixel(image,p,&pixel);
height=color_matrix->height > 6 ? 6UL : color_matrix->height;
for (h=0; h < (ssize_t) height; h++)
{
double
sum;
sum=ColorMatrix[h][0]*GetPixelRed(image,p)+ColorMatrix[h][1]*
GetPixelGreen(image,p)+ColorMatrix[h][2]*GetPixelBlue(image,p);
if (image->colorspace == CMYKColorspace)
sum+=ColorMatrix[h][3]*GetPixelBlack(image,p);
if (image->alpha_trait != UndefinedPixelTrait)
sum+=ColorMatrix[h][4]*GetPixelAlpha(image,p);
sum+=QuantumRange*ColorMatrix[h][5];
switch (h)
{
case 0: pixel.red=sum; break;
case 1: pixel.green=sum; break;
case 2: pixel.blue=sum; break;
case 3: pixel.black=sum; break;
case 4: pixel.alpha=sum; break;
default: break;
}
}
SetPixelViaPixelInfo(color_image,&pixel,q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(color_image);
}
if (SyncCacheViewAuthenticPixels(color_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ColorMatrixImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
color_view=DestroyCacheView(color_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
color_image=DestroyImage(color_image);
return(color_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I m p l o d e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ImplodeImage() creates a new image that is a copy of an existing
% one with the image pixels "implode" by the specified percentage. It
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% The format of the ImplodeImage method is:
%
% Image *ImplodeImage(const Image *image,const double amount,
% const PixelInterpolateMethod method,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o implode_image: Method ImplodeImage returns a pointer to the image
% after it is implode. A null image is returned if there is a memory
% shortage.
%
% o image: the image.
%
% o amount: Define the extent of the implosion.
%
% o method: the pixel interpolation method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ImplodeImage(const Image *image,const double amount,
const PixelInterpolateMethod method,ExceptionInfo *exception)
{
#define ImplodeImageTag "Implode/Image"
CacheView
*canvas_view,
*implode_view,
*interpolate_view;
double
radius;
Image
*canvas_image,
*implode_image;
MagickBooleanType
status;
MagickOffsetType
progress;
PointInfo
center,
scale;
ssize_t
y;
/*
Initialize implode image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
canvas_image=CloneImage(image,0,0,MagickTrue,exception);
if (canvas_image == (Image *) NULL)
return((Image *) NULL);
if ((canvas_image->alpha_trait == UndefinedPixelTrait) &&
(canvas_image->background_color.alpha != OpaqueAlpha))
(void) SetImageAlphaChannel(canvas_image,OpaqueAlphaChannel,exception);
implode_image=CloneImage(canvas_image,0,0,MagickTrue,exception);
if (implode_image == (Image *) NULL)
{
canvas_image=DestroyImage(canvas_image);
return((Image *) NULL);
}
if (SetImageStorageClass(implode_image,DirectClass,exception) == MagickFalse)
{
canvas_image=DestroyImage(canvas_image);
implode_image=DestroyImage(implode_image);
return((Image *) NULL);
}
/*
Compute scaling factor.
*/
scale.x=1.0;
scale.y=1.0;
center.x=0.5*canvas_image->columns;
center.y=0.5*canvas_image->rows;
radius=center.x;
if (canvas_image->columns > canvas_image->rows)
scale.y=(double) canvas_image->columns*PerceptibleReciprocal((double)
canvas_image->rows);
else
if (canvas_image->columns < canvas_image->rows)
{
scale.x=(double) canvas_image->rows*PerceptibleReciprocal((double)
canvas_image->columns);
radius=center.y;
}
/*
Implode image.
*/
status=MagickTrue;
progress=0;
canvas_view=AcquireVirtualCacheView(canvas_image,exception);
interpolate_view=AcquireVirtualCacheView(canvas_image,exception);
implode_view=AcquireAuthenticCacheView(implode_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(canvas_image,implode_image,canvas_image->rows,1)
#endif
for (y=0; y < (ssize_t) canvas_image->rows; y++)
{
double
distance;
PointInfo
delta;
const Quantum
*magick_restrict p;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(canvas_view,0,y,canvas_image->columns,1,
exception);
q=QueueCacheViewAuthenticPixels(implode_view,0,y,implode_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
delta.y=scale.y*(double) (y-center.y);
for (x=0; x < (ssize_t) canvas_image->columns; x++)
{
ssize_t
i;
/*
Determine if the pixel is within an ellipse.
*/
delta.x=scale.x*(double) (x-center.x);
distance=delta.x*delta.x+delta.y*delta.y;
if (distance >= (radius*radius))
for (i=0; i < (ssize_t) GetPixelChannels(canvas_image); i++)
{
PixelChannel channel = GetPixelChannelChannel(canvas_image,i);
PixelTrait traits = GetPixelChannelTraits(canvas_image,channel);
PixelTrait implode_traits = GetPixelChannelTraits(implode_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(implode_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(implode_image,channel,p[i],q);
}
else
{
double
factor;
/*
Implode the pixel.
*/
factor=1.0;
if (distance > 0.0)
factor=pow(sin(MagickPI*sqrt((double) distance)*PerceptibleReciprocal(radius)/2),-amount);
status=InterpolatePixelChannels(canvas_image,interpolate_view,
implode_image,method,(double) (factor*delta.x*PerceptibleReciprocal(scale.x)+center.x),
(double) (factor*delta.y*PerceptibleReciprocal(scale.y)+center.y),q,exception);
if (status == MagickFalse)
break;
}
p+=GetPixelChannels(canvas_image);
q+=GetPixelChannels(implode_image);
}
if (SyncCacheViewAuthenticPixels(implode_view,exception) == MagickFalse)
status=MagickFalse;
if (canvas_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(canvas_image,ImplodeImageTag,progress,
canvas_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
implode_view=DestroyCacheView(implode_view);
interpolate_view=DestroyCacheView(interpolate_view);
canvas_view=DestroyCacheView(canvas_view);
canvas_image=DestroyImage(canvas_image);
if (status == MagickFalse)
implode_image=DestroyImage(implode_image);
return(implode_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o r p h I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% The MorphImages() method requires a minimum of two images. The first
% image is transformed into the second by a number of intervening images
% as specified by frames.
%
% The format of the MorphImage method is:
%
% Image *MorphImages(const Image *image,const size_t number_frames,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o number_frames: Define the number of in-between image to generate.
% The more in-between frames, the smoother the morph.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MorphImages(const Image *image,const size_t number_frames,
ExceptionInfo *exception)
{
#define MorphImageTag "Morph/Image"
double
alpha,
beta;
Image
*morph_image,
*morph_images;
MagickBooleanType
status;
MagickOffsetType
scene;
const Image
*next;
ssize_t
n;
ssize_t
y;
/*
Clone first frame in sequence.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
morph_images=CloneImage(image,0,0,MagickTrue,exception);
if (morph_images == (Image *) NULL)
return((Image *) NULL);
if (GetNextImageInList(image) == (Image *) NULL)
{
/*
Morph single image.
*/
for (n=1; n < (ssize_t) number_frames; n++)
{
morph_image=CloneImage(image,0,0,MagickTrue,exception);
if (morph_image == (Image *) NULL)
{
morph_images=DestroyImageList(morph_images);
return((Image *) NULL);
}
AppendImageToList(&morph_images,morph_image);
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,MorphImageTag,(MagickOffsetType) n,
number_frames);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(GetFirstImageInList(morph_images));
}
/*
Morph image sequence.
*/
status=MagickTrue;
scene=0;
next=image;
for ( ; GetNextImageInList(next) != (Image *) NULL; next=GetNextImageInList(next))
{
for (n=0; n < (ssize_t) number_frames; n++)
{
CacheView
*image_view,
*morph_view;
beta=(double) (n+1.0)/(double) (number_frames+1.0);
alpha=1.0-beta;
morph_image=ResizeImage(next,(size_t) (alpha*next->columns+beta*
GetNextImageInList(next)->columns+0.5),(size_t) (alpha*next->rows+beta*
GetNextImageInList(next)->rows+0.5),next->filter,exception);
if (morph_image == (Image *) NULL)
{
morph_images=DestroyImageList(morph_images);
return((Image *) NULL);
}
status=SetImageStorageClass(morph_image,DirectClass,exception);
if (status == MagickFalse)
{
morph_image=DestroyImage(morph_image);
return((Image *) NULL);
}
AppendImageToList(&morph_images,morph_image);
morph_images=GetLastImageInList(morph_images);
morph_image=ResizeImage(GetNextImageInList(next),morph_images->columns,
morph_images->rows,GetNextImageInList(next)->filter,exception);
if (morph_image == (Image *) NULL)
{
morph_images=DestroyImageList(morph_images);
return((Image *) NULL);
}
image_view=AcquireVirtualCacheView(morph_image,exception);
morph_view=AcquireAuthenticCacheView(morph_images,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(morph_image,morph_image,morph_image->rows,1)
#endif
for (y=0; y < (ssize_t) morph_images->rows; y++)
{
MagickBooleanType
sync;
const Quantum
*magick_restrict p;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,morph_image->columns,1,
exception);
q=GetCacheViewAuthenticPixels(morph_view,0,y,morph_images->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) morph_images->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(morph_image); i++)
{
PixelChannel channel = GetPixelChannelChannel(morph_image,i);
PixelTrait traits = GetPixelChannelTraits(morph_image,channel);
PixelTrait morph_traits=GetPixelChannelTraits(morph_images,channel);
if ((traits == UndefinedPixelTrait) ||
(morph_traits == UndefinedPixelTrait))
continue;
if ((morph_traits & CopyPixelTrait) != 0)
{
SetPixelChannel(morph_image,channel,p[i],q);
continue;
}
SetPixelChannel(morph_image,channel,ClampToQuantum(alpha*
GetPixelChannel(morph_images,channel,q)+beta*p[i]),q);
}
p+=GetPixelChannels(morph_image);
q+=GetPixelChannels(morph_images);
}
sync=SyncCacheViewAuthenticPixels(morph_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
morph_view=DestroyCacheView(morph_view);
image_view=DestroyCacheView(image_view);
morph_image=DestroyImage(morph_image);
}
if (n < (ssize_t) number_frames)
break;
/*
Clone last frame in sequence.
*/
morph_image=CloneImage(GetNextImageInList(next),0,0,MagickTrue,exception);
if (morph_image == (Image *) NULL)
{
morph_images=DestroyImageList(morph_images);
return((Image *) NULL);
}
AppendImageToList(&morph_images,morph_image);
morph_images=GetLastImageInList(morph_images);
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,MorphImageTag,scene,
GetImageListLength(image));
if (proceed == MagickFalse)
status=MagickFalse;
}
scene++;
}
if (GetNextImageInList(next) != (Image *) NULL)
{
morph_images=DestroyImageList(morph_images);
return((Image *) NULL);
}
return(GetFirstImageInList(morph_images));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P l a s m a I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PlasmaImage() initializes an image with plasma fractal values. The image
% must be initialized with a base color and the random number generator
% seeded before this method is called.
%
% The format of the PlasmaImage method is:
%
% MagickBooleanType PlasmaImage(Image *image,const SegmentInfo *segment,
% size_t attenuate,size_t depth,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o segment: Define the region to apply plasma fractals values.
%
% o attenuate: Define the plasma attenuation factor.
%
% o depth: Limit the plasma recursion depth.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline Quantum PlasmaPixel(RandomInfo *magick_restrict random_info,
const double pixel,const double noise)
{
MagickRealType
plasma;
plasma=pixel+noise*GetPseudoRandomValue(random_info)-noise/2.0;
return(ClampToQuantum(plasma));
}
static MagickBooleanType PlasmaImageProxy(Image *image,CacheView *image_view,
CacheView *u_view,CacheView *v_view,RandomInfo *magick_restrict random_info,
const SegmentInfo *magick_restrict segment,size_t attenuate,size_t depth,
ExceptionInfo *exception)
{
double
plasma;
MagickStatusType
status;
const Quantum
*magick_restrict u,
*magick_restrict v;
Quantum
*magick_restrict q;
ssize_t
i;
ssize_t
x,
x_mid,
y,
y_mid;
if ((fabs(segment->x2-segment->x1) < MagickEpsilon) &&
(fabs(segment->y2-segment->y1) < MagickEpsilon))
return(MagickTrue);
if (depth != 0)
{
SegmentInfo
local_info;
/*
Divide the area into quadrants and recurse.
*/
depth--;
attenuate++;
x_mid=CastDoubleToLong(ceil((segment->x1+segment->x2)/2-0.5));
y_mid=CastDoubleToLong(ceil((segment->y1+segment->y2)/2-0.5));
local_info=(*segment);
local_info.x2=(double) x_mid;
local_info.y2=(double) y_mid;
status=PlasmaImageProxy(image,image_view,u_view,v_view,random_info,
&local_info,attenuate,depth,exception);
local_info=(*segment);
local_info.y1=(double) y_mid;
local_info.x2=(double) x_mid;
status&=PlasmaImageProxy(image,image_view,u_view,v_view,random_info,
&local_info,attenuate,depth,exception);
local_info=(*segment);
local_info.x1=(double) x_mid;
local_info.y2=(double) y_mid;
status&=PlasmaImageProxy(image,image_view,u_view,v_view,random_info,
&local_info,attenuate,depth,exception);
local_info=(*segment);
local_info.x1=(double) x_mid;
local_info.y1=(double) y_mid;
status&=PlasmaImageProxy(image,image_view,u_view,v_view,random_info,
&local_info,attenuate,depth,exception);
return(status == 0 ? MagickFalse : MagickTrue);
}
x_mid=CastDoubleToLong(ceil((segment->x1+segment->x2)/2-0.5));
y_mid=CastDoubleToLong(ceil((segment->y1+segment->y2)/2-0.5));
if ((fabs(segment->x1-x_mid) < MagickEpsilon) &&
(fabs(segment->x2-x_mid) < MagickEpsilon) &&
(fabs(segment->y1-y_mid) < MagickEpsilon) &&
(fabs(segment->y2-y_mid) < MagickEpsilon))
return(MagickFalse);
/*
Average pixels and apply plasma.
*/
status=MagickTrue;
plasma=(double) QuantumRange/(2.0*attenuate);
if ((fabs(segment->x1-x_mid) >= MagickEpsilon) ||
(fabs(segment->x2-x_mid) >= MagickEpsilon))
{
/*
Left pixel.
*/
x=CastDoubleToLong(ceil(segment->x1-0.5));
u=GetCacheViewVirtualPixels(u_view,x,CastDoubleToLong(ceil(
segment->y1-0.5)),1,1,exception);
v=GetCacheViewVirtualPixels(v_view,x,CastDoubleToLong(ceil(
segment->y2-0.5)),1,1,exception);
q=QueueCacheViewAuthenticPixels(image_view,x,y_mid,1,1,exception);
if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
return(MagickTrue);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
q[i]=PlasmaPixel(random_info,((double) u[i]+v[i])/2.0,plasma);
}
status=SyncCacheViewAuthenticPixels(image_view,exception);
if (fabs(segment->x1-segment->x2) >= MagickEpsilon)
{
/*
Right pixel.
*/
x=CastDoubleToLong(ceil(segment->x2-0.5));
u=GetCacheViewVirtualPixels(u_view,x,CastDoubleToLong(ceil(
segment->y1-0.5)),1,1,exception);
v=GetCacheViewVirtualPixels(v_view,x,CastDoubleToLong(ceil(
segment->y2-0.5)),1,1,exception);
q=QueueCacheViewAuthenticPixels(image_view,x,y_mid,1,1,exception);
if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
return(MagickFalse);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
q[i]=PlasmaPixel(random_info,((double) u[i]+v[i])/2.0,plasma);
}
status=SyncCacheViewAuthenticPixels(image_view,exception);
}
}
if ((fabs(segment->y1-y_mid) >= MagickEpsilon) ||
(fabs(segment->y2-y_mid) >= MagickEpsilon))
{
if ((fabs(segment->x1-x_mid) >= MagickEpsilon) ||
(fabs(segment->y2-y_mid) >= MagickEpsilon))
{
/*
Bottom pixel.
*/
y=CastDoubleToLong(ceil(segment->y2-0.5));
u=GetCacheViewVirtualPixels(u_view,CastDoubleToLong(ceil(
segment->x1-0.5)),y,1,1,exception);
v=GetCacheViewVirtualPixels(v_view,CastDoubleToLong(ceil(
segment->x2-0.5)),y,1,1,exception);
q=QueueCacheViewAuthenticPixels(image_view,x_mid,y,1,1,exception);
if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
return(MagickTrue);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
q[i]=PlasmaPixel(random_info,((double) u[i]+v[i])/2.0,plasma);
}
status=SyncCacheViewAuthenticPixels(image_view,exception);
}
if (fabs(segment->y1-segment->y2) >= MagickEpsilon)
{
/*
Top pixel.
*/
y=CastDoubleToLong(ceil(segment->y1-0.5));
u=GetCacheViewVirtualPixels(u_view,CastDoubleToLong(ceil(
segment->x1-0.5)),y,1,1,exception);
v=GetCacheViewVirtualPixels(v_view,CastDoubleToLong(ceil(
segment->x2-0.5)),y,1,1,exception);
q=QueueCacheViewAuthenticPixels(image_view,x_mid,y,1,1,exception);
if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
return(MagickTrue);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
q[i]=PlasmaPixel(random_info,((double) u[i]+v[i])/2.0,plasma);
}
status=SyncCacheViewAuthenticPixels(image_view,exception);
}
}
if ((fabs(segment->x1-segment->x2) >= MagickEpsilon) ||
(fabs(segment->y1-segment->y2) >= MagickEpsilon))
{
/*
Middle pixel.
*/
x=CastDoubleToLong(ceil(segment->x1-0.5));
y=CastDoubleToLong(ceil(segment->y1-0.5));
u=GetCacheViewVirtualPixels(u_view,x,y,1,1,exception);
x=CastDoubleToLong(ceil(segment->x2-0.5));
y=CastDoubleToLong(ceil(segment->y2-0.5));
v=GetCacheViewVirtualPixels(v_view,x,y,1,1,exception);
q=QueueCacheViewAuthenticPixels(image_view,x_mid,y_mid,1,1,exception);
if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
return(MagickTrue);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
q[i]=PlasmaPixel(random_info,((double) u[i]+v[i])/2.0,plasma);
}
status=SyncCacheViewAuthenticPixels(image_view,exception);
}
if ((fabs(segment->x2-segment->x1) < 3.0) &&
(fabs(segment->y2-segment->y1) < 3.0))
return(status == 0 ? MagickFalse : MagickTrue);
return(MagickFalse);
}
MagickExport MagickBooleanType PlasmaImage(Image *image,
const SegmentInfo *segment,size_t attenuate,size_t depth,
ExceptionInfo *exception)
{
CacheView
*image_view,
*u_view,
*v_view;
MagickBooleanType
status;
RandomInfo
*random_info;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
image_view=AcquireAuthenticCacheView(image,exception);
u_view=AcquireVirtualCacheView(image,exception);
v_view=AcquireVirtualCacheView(image,exception);
random_info=AcquireRandomInfo();
status=PlasmaImageProxy(image,image_view,u_view,v_view,random_info,segment,
attenuate,depth,exception);
random_info=DestroyRandomInfo(random_info);
v_view=DestroyCacheView(v_view);
u_view=DestroyCacheView(u_view);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P o l a r o i d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PolaroidImage() simulates a Polaroid picture.
%
% The format of the PolaroidImage method is:
%
% Image *PolaroidImage(const Image *image,const DrawInfo *draw_info,
% const char *caption,const double angle,
% const PixelInterpolateMethod method,ExceptionInfo exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o caption: the Polaroid caption.
%
% o angle: Apply the effect along this angle.
%
% o method: the pixel interpolation method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *PolaroidImage(const Image *image,const DrawInfo *draw_info,
const char *caption,const double angle,const PixelInterpolateMethod method,
ExceptionInfo *exception)
{
Image
*bend_image,
*caption_image,
*flop_image,
*picture_image,
*polaroid_image,
*rotate_image,
*trim_image;
size_t
height;
ssize_t
quantum;
/*
Simulate a Polaroid picture.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
quantum=(ssize_t) MagickMax(MagickMax((double) image->columns,(double)
image->rows)/25.0,10.0);
height=image->rows+2*quantum;
caption_image=(Image *) NULL;
if (caption != (const char *) NULL)
{
char
*text;
/*
Generate caption image.
*/
caption_image=CloneImage(image,image->columns,1,MagickTrue,exception);
if (caption_image == (Image *) NULL)
return((Image *) NULL);
text=InterpretImageProperties((ImageInfo *) NULL,(Image *) image,caption,
exception);
if (text != (char *) NULL)
{
char
geometry[MagickPathExtent];
DrawInfo
*annotate_info;
MagickBooleanType
status;
ssize_t
count;
TypeMetric
metrics;
annotate_info=CloneDrawInfo((const ImageInfo *) NULL,draw_info);
(void) CloneString(&annotate_info->text,text);
count=FormatMagickCaption(caption_image,annotate_info,MagickTrue,
&metrics,&text,exception);
status=SetImageExtent(caption_image,image->columns,(size_t)
((count+1)*(metrics.ascent-metrics.descent)+0.5),exception);
if (status == MagickFalse)
caption_image=DestroyImage(caption_image);
else
{
caption_image->background_color=image->border_color;
(void) SetImageBackgroundColor(caption_image,exception);
(void) CloneString(&annotate_info->text,text);
(void) FormatLocaleString(geometry,MagickPathExtent,"+0+%.20g",
metrics.ascent);
if (annotate_info->gravity == UndefinedGravity)
(void) CloneString(&annotate_info->geometry,AcquireString(
geometry));
(void) AnnotateImage(caption_image,annotate_info,exception);
height+=caption_image->rows;
}
annotate_info=DestroyDrawInfo(annotate_info);
text=DestroyString(text);
}
}
picture_image=CloneImage(image,image->columns+2*quantum,height,MagickTrue,
exception);
if (picture_image == (Image *) NULL)
{
if (caption_image != (Image *) NULL)
caption_image=DestroyImage(caption_image);
return((Image *) NULL);
}
picture_image->background_color=image->border_color;
(void) SetImageBackgroundColor(picture_image,exception);
(void) CompositeImage(picture_image,image,OverCompositeOp,MagickTrue,quantum,
quantum,exception);
if (caption_image != (Image *) NULL)
{
(void) CompositeImage(picture_image,caption_image,OverCompositeOp,
MagickTrue,quantum,(ssize_t) (image->rows+3*quantum/2),exception);
caption_image=DestroyImage(caption_image);
}
(void) QueryColorCompliance("none",AllCompliance,
&picture_image->background_color,exception);
(void) SetImageAlphaChannel(picture_image,OpaqueAlphaChannel,exception);
rotate_image=RotateImage(picture_image,90.0,exception);
picture_image=DestroyImage(picture_image);
if (rotate_image == (Image *) NULL)
return((Image *) NULL);
picture_image=rotate_image;
bend_image=WaveImage(picture_image,0.01*picture_image->rows,2.0*
picture_image->columns,method,exception);
picture_image=DestroyImage(picture_image);
if (bend_image == (Image *) NULL)
return((Image *) NULL);
picture_image=bend_image;
rotate_image=RotateImage(picture_image,-90.0,exception);
picture_image=DestroyImage(picture_image);
if (rotate_image == (Image *) NULL)
return((Image *) NULL);
picture_image=rotate_image;
picture_image->background_color=image->background_color;
polaroid_image=ShadowImage(picture_image,80.0,2.0,quantum/3,quantum/3,
exception);
if (polaroid_image == (Image *) NULL)
{
picture_image=DestroyImage(picture_image);
return(picture_image);
}
flop_image=FlopImage(polaroid_image,exception);
polaroid_image=DestroyImage(polaroid_image);
if (flop_image == (Image *) NULL)
{
picture_image=DestroyImage(picture_image);
return(picture_image);
}
polaroid_image=flop_image;
(void) CompositeImage(polaroid_image,picture_image,OverCompositeOp,
MagickTrue,(ssize_t) (-0.01*picture_image->columns/2.0),0L,exception);
picture_image=DestroyImage(picture_image);
(void) QueryColorCompliance("none",AllCompliance,
&polaroid_image->background_color,exception);
rotate_image=RotateImage(polaroid_image,angle,exception);
polaroid_image=DestroyImage(polaroid_image);
if (rotate_image == (Image *) NULL)
return((Image *) NULL);
polaroid_image=rotate_image;
trim_image=TrimImage(polaroid_image,exception);
polaroid_image=DestroyImage(polaroid_image);
if (trim_image == (Image *) NULL)
return((Image *) NULL);
polaroid_image=trim_image;
return(polaroid_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e p i a T o n e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickSepiaToneImage() applies a special effect to the image, similar to the
% effect achieved in a photo darkroom by sepia toning. Threshold ranges from
% 0 to QuantumRange and is a measure of the extent of the sepia toning. A
% threshold of 80% is a good starting point for a reasonable tone.
%
% The format of the SepiaToneImage method is:
%
% Image *SepiaToneImage(const Image *image,const double threshold,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold: the tone threshold.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SepiaToneImage(const Image *image,const double threshold,
ExceptionInfo *exception)
{
#define SepiaToneImageTag "SepiaTone/Image"
CacheView
*image_view,
*sepia_view;
Image
*sepia_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Initialize sepia-toned image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
sepia_image=CloneImage(image,0,0,MagickTrue,exception);
if (sepia_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(sepia_image,DirectClass,exception) == MagickFalse)
{
sepia_image=DestroyImage(sepia_image);
return((Image *) NULL);
}
/*
Tone each row of the image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
sepia_view=AcquireAuthenticCacheView(sepia_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,sepia_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*magick_restrict p;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=GetCacheViewAuthenticPixels(sepia_view,0,y,sepia_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
intensity,
tone;
intensity=GetPixelIntensity(image,p);
tone=intensity > threshold ? (double) QuantumRange : intensity+
(double) QuantumRange-threshold;
SetPixelRed(sepia_image,ClampToQuantum(tone),q);
tone=intensity > (7.0*threshold/6.0) ? (double) QuantumRange :
intensity+(double) QuantumRange-7.0*threshold/6.0;
SetPixelGreen(sepia_image,ClampToQuantum(tone),q);
tone=intensity < (threshold/6.0) ? 0 : intensity-threshold/6.0;
SetPixelBlue(sepia_image,ClampToQuantum(tone),q);
tone=threshold/7.0;
if ((double) GetPixelGreen(image,q) < tone)
SetPixelGreen(sepia_image,ClampToQuantum(tone),q);
if ((double) GetPixelBlue(image,q) < tone)
SetPixelBlue(sepia_image,ClampToQuantum(tone),q);
SetPixelAlpha(sepia_image,GetPixelAlpha(image,p),q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(sepia_image);
}
if (SyncCacheViewAuthenticPixels(sepia_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SepiaToneImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
sepia_view=DestroyCacheView(sepia_view);
image_view=DestroyCacheView(image_view);
(void) NormalizeImage(sepia_image,exception);
(void) ContrastImage(sepia_image,MagickTrue,exception);
if (status == MagickFalse)
sepia_image=DestroyImage(sepia_image);
return(sepia_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h a d o w I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ShadowImage() simulates a shadow from the specified image and returns it.
%
% The format of the ShadowImage method is:
%
% Image *ShadowImage(const Image *image,const double alpha,
% const double sigma,const ssize_t x_offset,const ssize_t y_offset,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o alpha: percentage transparency.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o x_offset: the shadow x-offset.
%
% o y_offset: the shadow y-offset.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ShadowImage(const Image *image,const double alpha,
const double sigma,const ssize_t x_offset,const ssize_t y_offset,
ExceptionInfo *exception)
{
#define ShadowImageTag "Shadow/Image"
CacheView
*image_view;
ChannelType
channel_mask;
Image
*border_image,
*clone_image,
*shadow_image;
MagickBooleanType
status;
PixelInfo
background_color;
RectangleInfo
border_info;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
clone_image=CloneImage(image,0,0,MagickTrue,exception);
if (clone_image == (Image *) NULL)
return((Image *) NULL);
if (IsGrayColorspace(image->colorspace) != MagickFalse)
(void) SetImageColorspace(clone_image,sRGBColorspace,exception);
(void) SetImageVirtualPixelMethod(clone_image,EdgeVirtualPixelMethod,
exception);
border_info.width=(size_t) floor(2.0*sigma+0.5);
border_info.height=(size_t) floor(2.0*sigma+0.5);
border_info.x=0;
border_info.y=0;
(void) QueryColorCompliance("none",AllCompliance,&clone_image->border_color,
exception);
clone_image->alpha_trait=BlendPixelTrait;
border_image=BorderImage(clone_image,&border_info,OverCompositeOp,exception);
clone_image=DestroyImage(clone_image);
if (border_image == (Image *) NULL)
return((Image *) NULL);
if (border_image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(border_image,OpaqueAlphaChannel,exception);
/*
Shadow image.
*/
status=MagickTrue;
background_color=border_image->background_color;
background_color.alpha_trait=BlendPixelTrait;
image_view=AcquireAuthenticCacheView(border_image,exception);
for (y=0; y < (ssize_t) border_image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(image_view,0,y,border_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) border_image->columns; x++)
{
if (border_image->alpha_trait != UndefinedPixelTrait)
background_color.alpha=GetPixelAlpha(border_image,q)*alpha/100.0;
SetPixelViaPixelInfo(border_image,&background_color,q);
q+=GetPixelChannels(border_image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
{
border_image=DestroyImage(border_image);
return((Image *) NULL);
}
channel_mask=SetImageChannelMask(border_image,AlphaChannel);
shadow_image=BlurImage(border_image,0.0,sigma,exception);
border_image=DestroyImage(border_image);
if (shadow_image == (Image *) NULL)
return((Image *) NULL);
(void) SetPixelChannelMask(shadow_image,channel_mask);
if (shadow_image->page.width == 0)
shadow_image->page.width=shadow_image->columns;
if (shadow_image->page.height == 0)
shadow_image->page.height=shadow_image->rows;
shadow_image->page.width+=x_offset-(ssize_t) border_info.width;
shadow_image->page.height+=y_offset-(ssize_t) border_info.height;
shadow_image->page.x+=x_offset-(ssize_t) border_info.width;
shadow_image->page.y+=y_offset-(ssize_t) border_info.height;
return(shadow_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S k e t c h I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SketchImage() simulates a pencil sketch. We convolve the image with a
% Gaussian operator of the given radius and standard deviation (sigma). For
% reasonable results, radius should be larger than sigma. Use a radius of 0
% and SketchImage() selects a suitable radius for you. Angle gives the angle
% of the sketch.
%
% The format of the SketchImage method is:
%
% Image *SketchImage(const Image *image,const double radius,
% const double sigma,const double angle,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the Gaussian, in pixels, not counting the
% center pixel.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o angle: apply the effect along this angle.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SketchImage(const Image *image,const double radius,
const double sigma,const double angle,ExceptionInfo *exception)
{
CacheView
*random_view;
Image
*blend_image,
*blur_image,
*dodge_image,
*random_image,
*sketch_image;
MagickBooleanType
status;
RandomInfo
**magick_restrict random_info;
ssize_t
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
unsigned long
key;
#endif
/*
Sketch image.
*/
random_image=CloneImage(image,image->columns << 1,image->rows << 1,
MagickTrue,exception);
if (random_image == (Image *) NULL)
return((Image *) NULL);
status=MagickTrue;
random_info=AcquireRandomInfoTLS();
random_view=AcquireAuthenticCacheView(random_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(random_image,random_image,random_image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) random_image->rows; y++)
{
const int
id = GetOpenMPThreadId();
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(random_view,0,y,random_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) random_image->columns; x++)
{
double
value;
ssize_t
i;
value=GetPseudoRandomValue(random_info[id]);
for (i=0; i < (ssize_t) GetPixelChannels(random_image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
q[i]=ClampToQuantum(QuantumRange*value);
}
q+=GetPixelChannels(random_image);
}
if (SyncCacheViewAuthenticPixels(random_view,exception) == MagickFalse)
status=MagickFalse;
}
random_view=DestroyCacheView(random_view);
random_info=DestroyRandomInfoTLS(random_info);
if (status == MagickFalse)
{
random_image=DestroyImage(random_image);
return(random_image);
}
blur_image=MotionBlurImage(random_image,radius,sigma,angle,exception);
random_image=DestroyImage(random_image);
if (blur_image == (Image *) NULL)
return((Image *) NULL);
dodge_image=EdgeImage(blur_image,radius,exception);
blur_image=DestroyImage(blur_image);
if (dodge_image == (Image *) NULL)
return((Image *) NULL);
status=ClampImage(dodge_image,exception);
if (status != MagickFalse)
status=NormalizeImage(dodge_image,exception);
if (status != MagickFalse)
status=NegateImage(dodge_image,MagickFalse,exception);
if (status != MagickFalse)
status=TransformImage(&dodge_image,(char *) NULL,"50%",exception);
sketch_image=CloneImage(image,0,0,MagickTrue,exception);
if (sketch_image == (Image *) NULL)
{
dodge_image=DestroyImage(dodge_image);
return((Image *) NULL);
}
(void) CompositeImage(sketch_image,dodge_image,ColorDodgeCompositeOp,
MagickTrue,0,0,exception);
dodge_image=DestroyImage(dodge_image);
blend_image=CloneImage(image,0,0,MagickTrue,exception);
if (blend_image == (Image *) NULL)
{
sketch_image=DestroyImage(sketch_image);
return((Image *) NULL);
}
if (blend_image->alpha_trait != BlendPixelTrait)
(void) SetImageAlpha(blend_image,TransparentAlpha,exception);
(void) SetImageArtifact(blend_image,"compose:args","20x80");
(void) CompositeImage(sketch_image,blend_image,BlendCompositeOp,MagickTrue,
0,0,exception);
blend_image=DestroyImage(blend_image);
return(sketch_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S o l a r i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SolarizeImage() applies a special effect to the image, similar to the effect
% achieved in a photo darkroom by selectively exposing areas of photo
% sensitive paper to light. Threshold ranges from 0 to QuantumRange and is a
% measure of the extent of the solarization.
%
% The format of the SolarizeImage method is:
%
% MagickBooleanType SolarizeImage(Image *image,const double threshold,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold: Define the extent of the solarization.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SolarizeImage(Image *image,
const double threshold,ExceptionInfo *exception)
{
#define SolarizeImageTag "Solarize/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (IsGrayColorspace(image->colorspace) != MagickFalse)
(void) SetImageColorspace(image,sRGBColorspace,exception);
if (image->storage_class == PseudoClass)
{
ssize_t
i;
/*
Solarize colormap.
*/
for (i=0; i < (ssize_t) image->colors; i++)
{
if ((double) image->colormap[i].red > threshold)
image->colormap[i].red=QuantumRange-image->colormap[i].red;
if ((double) image->colormap[i].green > threshold)
image->colormap[i].green=QuantumRange-image->colormap[i].green;
if ((double) image->colormap[i].blue > threshold)
image->colormap[i].blue=QuantumRange-image->colormap[i].blue;
}
return(SyncImage(image,exception));
}
/*
Solarize image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if ((double) q[i] > threshold)
q[i]=QuantumRange-q[i];
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SolarizeImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S t e g a n o I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SteganoImage() hides a digital watermark within the image. Recover
% the hidden watermark later to prove that the authenticity of an image.
% Offset defines the start position within the image to hide the watermark.
%
% The format of the SteganoImage method is:
%
% Image *SteganoImage(const Image *image,Image *watermark,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o watermark: the watermark image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SteganoImage(const Image *image,const Image *watermark,
ExceptionInfo *exception)
{
#define GetBit(alpha,i) ((((size_t) (alpha) >> (size_t) (i)) & 0x01) != 0)
#define SetBit(alpha,i,set) (Quantum) ((set) != 0 ? (size_t) (alpha) \
| (one << (size_t) (i)) : (size_t) (alpha) & ~(one << (size_t) (i)))
#define SteganoImageTag "Stegano/Image"
CacheView
*stegano_view,
*watermark_view;
Image
*stegano_image;
int
c;
MagickBooleanType
status;
PixelInfo
pixel;
Quantum
*q;
ssize_t
x;
size_t
depth,
one;
ssize_t
i,
j,
k,
y;
/*
Initialize steganographic image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(watermark != (const Image *) NULL);
assert(watermark->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
one=1UL;
stegano_image=CloneImage(image,0,0,MagickTrue,exception);
if (stegano_image == (Image *) NULL)
return((Image *) NULL);
stegano_image->depth=MAGICKCORE_QUANTUM_DEPTH;
if (SetImageStorageClass(stegano_image,DirectClass,exception) == MagickFalse)
{
stegano_image=DestroyImage(stegano_image);
return((Image *) NULL);
}
/*
Hide watermark in low-order bits of image.
*/
c=0;
i=0;
j=0;
depth=stegano_image->depth;
k=stegano_image->offset;
status=MagickTrue;
watermark_view=AcquireVirtualCacheView(watermark,exception);
stegano_view=AcquireAuthenticCacheView(stegano_image,exception);
for (i=(ssize_t) depth-1; (i >= 0) && (j < (ssize_t) depth); i--)
{
for (y=0; (y < (ssize_t) watermark->rows) && (j < (ssize_t) depth); y++)
{
for (x=0; (x < (ssize_t) watermark->columns) && (j < (ssize_t) depth); x++)
{
ssize_t
offset;
(void) GetOneCacheViewVirtualPixelInfo(watermark_view,x,y,&pixel,
exception);
offset=k/(ssize_t) stegano_image->columns;
if (offset >= (ssize_t) stegano_image->rows)
break;
q=GetCacheViewAuthenticPixels(stegano_view,k % (ssize_t)
stegano_image->columns,k/(ssize_t) stegano_image->columns,1,1,
exception);
if (q == (Quantum *) NULL)
break;
switch (c)
{
case 0:
{
SetPixelRed(stegano_image,SetBit(GetPixelRed(stegano_image,q),j,
GetBit(GetPixelInfoIntensity(stegano_image,&pixel),i)),q);
break;
}
case 1:
{
SetPixelGreen(stegano_image,SetBit(GetPixelGreen(stegano_image,q),j,
GetBit(GetPixelInfoIntensity(stegano_image,&pixel),i)),q);
break;
}
case 2:
{
SetPixelBlue(stegano_image,SetBit(GetPixelBlue(stegano_image,q),j,
GetBit(GetPixelInfoIntensity(stegano_image,&pixel),i)),q);
break;
}
}
if (SyncCacheViewAuthenticPixels(stegano_view,exception) == MagickFalse)
break;
c++;
if (c == 3)
c=0;
k++;
if (k == (ssize_t) (stegano_image->columns*stegano_image->columns))
k=0;
if (k == stegano_image->offset)
j++;
}
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,SteganoImageTag,(MagickOffsetType)
(depth-i),depth);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
stegano_view=DestroyCacheView(stegano_view);
watermark_view=DestroyCacheView(watermark_view);
if (status == MagickFalse)
stegano_image=DestroyImage(stegano_image);
return(stegano_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S t e r e o A n a g l y p h I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% StereoAnaglyphImage() combines two images and produces a single image that
% is the composite of a left and right image of a stereo pair. Special
% red-green stereo glasses are required to view this effect.
%
% The format of the StereoAnaglyphImage method is:
%
% Image *StereoImage(const Image *left_image,const Image *right_image,
% ExceptionInfo *exception)
% Image *StereoAnaglyphImage(const Image *left_image,
% const Image *right_image,const ssize_t x_offset,const ssize_t y_offset,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o left_image: the left image.
%
% o right_image: the right image.
%
% o exception: return any errors or warnings in this structure.
%
% o x_offset: amount, in pixels, by which the left image is offset to the
% right of the right image.
%
% o y_offset: amount, in pixels, by which the left image is offset to the
% bottom of the right image.
%
%
*/
MagickExport Image *StereoImage(const Image *left_image,
const Image *right_image,ExceptionInfo *exception)
{
return(StereoAnaglyphImage(left_image,right_image,0,0,exception));
}
MagickExport Image *StereoAnaglyphImage(const Image *left_image,
const Image *right_image,const ssize_t x_offset,const ssize_t y_offset,
ExceptionInfo *exception)
{
#define StereoImageTag "Stereo/Image"
const Image
*image;
Image
*stereo_image;
MagickBooleanType
status;
ssize_t
y;
assert(left_image != (const Image *) NULL);
assert(left_image->signature == MagickCoreSignature);
assert(right_image != (const Image *) NULL);
assert(right_image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
left_image->filename);
image=left_image;
if ((left_image->columns != right_image->columns) ||
(left_image->rows != right_image->rows))
ThrowImageException(ImageError,"LeftAndRightImageSizesDiffer");
/*
Initialize stereo image attributes.
*/
stereo_image=CloneImage(left_image,left_image->columns,left_image->rows,
MagickTrue,exception);
if (stereo_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(stereo_image,DirectClass,exception) == MagickFalse)
{
stereo_image=DestroyImage(stereo_image);
return((Image *) NULL);
}
(void) SetImageColorspace(stereo_image,sRGBColorspace,exception);
/*
Copy left image to red channel and right image to blue channel.
*/
status=MagickTrue;
for (y=0; y < (ssize_t) stereo_image->rows; y++)
{
const Quantum
*magick_restrict p,
*magick_restrict q;
ssize_t
x;
Quantum
*magick_restrict r;
p=GetVirtualPixels(left_image,-x_offset,y-y_offset,image->columns,1,
exception);
q=GetVirtualPixels(right_image,0,y,right_image->columns,1,exception);
r=QueueAuthenticPixels(stereo_image,0,y,stereo_image->columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL) ||
(r == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) stereo_image->columns; x++)
{
SetPixelRed(stereo_image,GetPixelRed(left_image,p),r);
SetPixelGreen(stereo_image,GetPixelGreen(right_image,q),r);
SetPixelBlue(stereo_image,GetPixelBlue(right_image,q),r);
if ((GetPixelAlphaTraits(stereo_image) & CopyPixelTrait) != 0)
SetPixelAlpha(stereo_image,(GetPixelAlpha(left_image,p)+
GetPixelAlpha(right_image,q))/2,r);
p+=GetPixelChannels(left_image);
q+=GetPixelChannels(right_image);
r+=GetPixelChannels(stereo_image);
}
if (SyncAuthenticPixels(stereo_image,exception) == MagickFalse)
break;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,StereoImageTag,(MagickOffsetType) y,
stereo_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
if (status == MagickFalse)
stereo_image=DestroyImage(stereo_image);
return(stereo_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S w i r l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SwirlImage() swirls the pixels about the center of the image, where
% degrees indicates the sweep of the arc through which each pixel is moved.
% You get a more dramatic effect as the degrees move from 1 to 360.
%
% The format of the SwirlImage method is:
%
% Image *SwirlImage(const Image *image,double degrees,
% const PixelInterpolateMethod method,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o degrees: Define the tightness of the swirling effect.
%
% o method: the pixel interpolation method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SwirlImage(const Image *image,double degrees,
const PixelInterpolateMethod method,ExceptionInfo *exception)
{
#define SwirlImageTag "Swirl/Image"
CacheView
*canvas_view,
*interpolate_view,
*swirl_view;
double
radius;
Image
*canvas_image,
*swirl_image;
MagickBooleanType
status;
MagickOffsetType
progress;
PointInfo
center,
scale;
ssize_t
y;
/*
Initialize swirl image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
canvas_image=CloneImage(image,0,0,MagickTrue,exception);
if (canvas_image == (Image *) NULL)
return((Image *) NULL);
swirl_image=CloneImage(canvas_image,0,0,MagickTrue,exception);
if (swirl_image == (Image *) NULL)
{
canvas_image=DestroyImage(canvas_image);
return((Image *) NULL);
}
if (SetImageStorageClass(swirl_image,DirectClass,exception) == MagickFalse)
{
canvas_image=DestroyImage(canvas_image);
swirl_image=DestroyImage(swirl_image);
return((Image *) NULL);
}
if (swirl_image->background_color.alpha_trait != UndefinedPixelTrait)
(void) SetImageAlphaChannel(swirl_image,OnAlphaChannel,exception);
/*
Compute scaling factor.
*/
center.x=(double) canvas_image->columns/2.0;
center.y=(double) canvas_image->rows/2.0;
radius=MagickMax(center.x,center.y);
scale.x=1.0;
scale.y=1.0;
if (canvas_image->columns > canvas_image->rows)
scale.y=(double) canvas_image->columns/(double) canvas_image->rows;
else
if (canvas_image->columns < canvas_image->rows)
scale.x=(double) canvas_image->rows/(double) canvas_image->columns;
degrees=(double) DegreesToRadians(degrees);
/*
Swirl image.
*/
status=MagickTrue;
progress=0;
canvas_view=AcquireVirtualCacheView(canvas_image,exception);
interpolate_view=AcquireVirtualCacheView(image,exception);
swirl_view=AcquireAuthenticCacheView(swirl_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(canvas_image,swirl_image,canvas_image->rows,1)
#endif
for (y=0; y < (ssize_t) canvas_image->rows; y++)
{
double
distance;
PointInfo
delta;
const Quantum
*magick_restrict p;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(canvas_view,0,y,canvas_image->columns,1,
exception);
q=QueueCacheViewAuthenticPixels(swirl_view,0,y,swirl_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
delta.y=scale.y*(double) (y-center.y);
for (x=0; x < (ssize_t) canvas_image->columns; x++)
{
/*
Determine if the pixel is within an ellipse.
*/
delta.x=scale.x*(double) (x-center.x);
distance=delta.x*delta.x+delta.y*delta.y;
if (distance >= (radius*radius))
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(canvas_image); i++)
{
PixelChannel channel = GetPixelChannelChannel(canvas_image,i);
PixelTrait traits = GetPixelChannelTraits(canvas_image,channel);
PixelTrait swirl_traits = GetPixelChannelTraits(swirl_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(swirl_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(swirl_image,channel,p[i],q);
}
}
else
{
double
cosine,
factor,
sine;
/*
Swirl the pixel.
*/
factor=1.0-sqrt((double) distance)/radius;
sine=sin((double) (degrees*factor*factor));
cosine=cos((double) (degrees*factor*factor));
status=InterpolatePixelChannels(canvas_image,interpolate_view,
swirl_image,method,((cosine*delta.x-sine*delta.y)/scale.x+center.x),
(double) ((sine*delta.x+cosine*delta.y)/scale.y+center.y),q,
exception);
if (status == MagickFalse)
break;
}
p+=GetPixelChannels(canvas_image);
q+=GetPixelChannels(swirl_image);
}
if (SyncCacheViewAuthenticPixels(swirl_view,exception) == MagickFalse)
status=MagickFalse;
if (canvas_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(canvas_image,SwirlImageTag,progress,
canvas_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
swirl_view=DestroyCacheView(swirl_view);
interpolate_view=DestroyCacheView(interpolate_view);
canvas_view=DestroyCacheView(canvas_view);
canvas_image=DestroyImage(canvas_image);
if (status == MagickFalse)
swirl_image=DestroyImage(swirl_image);
return(swirl_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T i n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TintImage() applies a color vector to each pixel in the image. The length
% of the vector is 0 for black and white and at its maximum for the midtones.
% The vector weighting function is f(x)=(1-(4.0*((x-0.5)*(x-0.5))))
%
% The format of the TintImage method is:
%
% Image *TintImage(const Image *image,const char *blend,
% const PixelInfo *tint,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o blend: A color value used for tinting.
%
% o tint: A color value used for tinting.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *TintImage(const Image *image,const char *blend,
const PixelInfo *tint,ExceptionInfo *exception)
{
#define TintImageTag "Tint/Image"
CacheView
*image_view,
*tint_view;
double
intensity;
GeometryInfo
geometry_info;
Image
*tint_image;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
color_vector;
MagickStatusType
flags;
ssize_t
y;
/*
Allocate tint image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
tint_image=CloneImage(image,0,0,MagickTrue,exception);
if (tint_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(tint_image,DirectClass,exception) == MagickFalse)
{
tint_image=DestroyImage(tint_image);
return((Image *) NULL);
}
if ((IsGrayColorspace(image->colorspace) != MagickFalse) &&
(IsPixelInfoGray(tint) == MagickFalse))
(void) SetImageColorspace(tint_image,sRGBColorspace,exception);
if (blend == (const char *) NULL)
return(tint_image);
/*
Determine RGB values of the color.
*/
GetPixelInfo(image,&color_vector);
flags=ParseGeometry(blend,&geometry_info);
color_vector.red=geometry_info.rho;
color_vector.green=geometry_info.rho;
color_vector.blue=geometry_info.rho;
color_vector.alpha=(MagickRealType) OpaqueAlpha;
if ((flags & SigmaValue) != 0)
color_vector.green=geometry_info.sigma;
if ((flags & XiValue) != 0)
color_vector.blue=geometry_info.xi;
if ((flags & PsiValue) != 0)
color_vector.alpha=geometry_info.psi;
if (image->colorspace == CMYKColorspace)
{
color_vector.black=geometry_info.rho;
if ((flags & PsiValue) != 0)
color_vector.black=geometry_info.psi;
if ((flags & ChiValue) != 0)
color_vector.alpha=geometry_info.chi;
}
intensity=(double) GetPixelInfoIntensity((const Image *) NULL,tint);
color_vector.red=(double) (color_vector.red*tint->red/100.0-intensity);
color_vector.green=(double) (color_vector.green*tint->green/100.0-intensity);
color_vector.blue=(double) (color_vector.blue*tint->blue/100.0-intensity);
color_vector.black=(double) (color_vector.black*tint->black/100.0-intensity);
color_vector.alpha=(double) (color_vector.alpha*tint->alpha/100.0-intensity);
/*
Tint image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
tint_view=AcquireAuthenticCacheView(tint_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,tint_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(tint_view,0,y,tint_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
PixelInfo
pixel;
double
weight;
GetPixelInfo(image,&pixel);
weight=QuantumScale*GetPixelRed(image,p)-0.5;
pixel.red=(MagickRealType) GetPixelRed(image,p)+color_vector.red*
(1.0-(4.0*(weight*weight)));
weight=QuantumScale*GetPixelGreen(image,p)-0.5;
pixel.green=(MagickRealType) GetPixelGreen(image,p)+color_vector.green*
(1.0-(4.0*(weight*weight)));
weight=QuantumScale*GetPixelBlue(image,p)-0.5;
pixel.blue=(MagickRealType) GetPixelBlue(image,p)+color_vector.blue*
(1.0-(4.0*(weight*weight)));
weight=QuantumScale*GetPixelBlack(image,p)-0.5;
pixel.black=(MagickRealType) GetPixelBlack(image,p)+color_vector.black*
(1.0-(4.0*(weight*weight)));
pixel.alpha=(MagickRealType) GetPixelAlpha(image,p);
SetPixelViaPixelInfo(tint_image,&pixel,q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(tint_image);
}
if (SyncCacheViewAuthenticPixels(tint_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,TintImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
tint_view=DestroyCacheView(tint_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
tint_image=DestroyImage(tint_image);
return(tint_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% V i g n e t t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% VignetteImage() softens the edges of the image in vignette style.
%
% The format of the VignetteImage method is:
%
% Image *VignetteImage(const Image *image,const double radius,
% const double sigma,const ssize_t x,const ssize_t y,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the pixel neighborhood.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o x, y: Define the x and y ellipse offset.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *VignetteImage(const Image *image,const double radius,
const double sigma,const ssize_t x,const ssize_t y,ExceptionInfo *exception)
{
char
ellipse[MagickPathExtent];
DrawInfo
*draw_info;
Image
*canvas,
*blur_image,
*oval_image,
*vignette_image;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
canvas=CloneImage(image,0,0,MagickTrue,exception);
if (canvas == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(canvas,DirectClass,exception) == MagickFalse)
{
canvas=DestroyImage(canvas);
return((Image *) NULL);
}
canvas->alpha_trait=BlendPixelTrait;
oval_image=CloneImage(canvas,canvas->columns,canvas->rows,MagickTrue,
exception);
if (oval_image == (Image *) NULL)
{
canvas=DestroyImage(canvas);
return((Image *) NULL);
}
(void) QueryColorCompliance("#000000",AllCompliance,
&oval_image->background_color,exception);
(void) SetImageBackgroundColor(oval_image,exception);
draw_info=CloneDrawInfo((const ImageInfo *) NULL,(const DrawInfo *) NULL);
(void) QueryColorCompliance("#ffffff",AllCompliance,&draw_info->fill,
exception);
(void) QueryColorCompliance("#ffffff",AllCompliance,&draw_info->stroke,
exception);
(void) FormatLocaleString(ellipse,MagickPathExtent,"ellipse %g,%g,%g,%g,"
"0.0,360.0",image->columns/2.0,image->rows/2.0,image->columns/2.0-x,
image->rows/2.0-y);
draw_info->primitive=AcquireString(ellipse);
(void) DrawImage(oval_image,draw_info,exception);
draw_info=DestroyDrawInfo(draw_info);
blur_image=BlurImage(oval_image,radius,sigma,exception);
oval_image=DestroyImage(oval_image);
if (blur_image == (Image *) NULL)
{
canvas=DestroyImage(canvas);
return((Image *) NULL);
}
blur_image->alpha_trait=UndefinedPixelTrait;
(void) CompositeImage(canvas,blur_image,IntensityCompositeOp,MagickTrue,
0,0,exception);
blur_image=DestroyImage(blur_image);
vignette_image=MergeImageLayers(canvas,FlattenLayer,exception);
canvas=DestroyImage(canvas);
if (vignette_image != (Image *) NULL)
(void) TransformImageColorspace(vignette_image,image->colorspace,exception);
return(vignette_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W a v e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WaveImage() creates a "ripple" effect in the image by shifting the pixels
% vertically along a sine wave whose amplitude and wavelength is specified
% by the given parameters.
%
% The format of the WaveImage method is:
%
% Image *WaveImage(const Image *image,const double amplitude,
% const double wave_length,const PixelInterpolateMethod method,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o amplitude, wave_length: Define the amplitude and wave length of the
% sine wave.
%
% o interpolate: the pixel interpolation method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *WaveImage(const Image *image,const double amplitude,
const double wave_length,const PixelInterpolateMethod method,
ExceptionInfo *exception)
{
#define WaveImageTag "Wave/Image"
CacheView
*canvas_image_view,
*wave_view;
float
*sine_map;
Image
*canvas_image,
*wave_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
i;
ssize_t
y;
/*
Initialize wave image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
canvas_image=CloneImage(image,0,0,MagickTrue,exception);
if (canvas_image == (Image *) NULL)
return((Image *) NULL);
if ((canvas_image->alpha_trait == UndefinedPixelTrait) &&
(canvas_image->background_color.alpha != OpaqueAlpha))
(void) SetImageAlpha(canvas_image,OpaqueAlpha,exception);
wave_image=CloneImage(canvas_image,canvas_image->columns,(size_t)
(canvas_image->rows+2.0*fabs(amplitude)),MagickTrue,exception);
if (wave_image == (Image *) NULL)
{
canvas_image=DestroyImage(canvas_image);
return((Image *) NULL);
}
if (SetImageStorageClass(wave_image,DirectClass,exception) == MagickFalse)
{
canvas_image=DestroyImage(canvas_image);
wave_image=DestroyImage(wave_image);
return((Image *) NULL);
}
/*
Allocate sine map.
*/
sine_map=(float *) AcquireQuantumMemory((size_t) wave_image->columns,
sizeof(*sine_map));
if (sine_map == (float *) NULL)
{
canvas_image=DestroyImage(canvas_image);
wave_image=DestroyImage(wave_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
for (i=0; i < (ssize_t) wave_image->columns; i++)
sine_map[i]=(float) fabs(amplitude)+amplitude*sin((double)
((2.0*MagickPI*i)*PerceptibleReciprocal(wave_length)));
/*
Wave image.
*/
status=MagickTrue;
progress=0;
canvas_image_view=AcquireVirtualCacheView(canvas_image,exception);
wave_view=AcquireAuthenticCacheView(wave_image,exception);
(void) SetCacheViewVirtualPixelMethod(canvas_image_view,
BackgroundVirtualPixelMethod);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(canvas_image,wave_image,wave_image->rows,1)
#endif
for (y=0; y < (ssize_t) wave_image->rows; y++)
{
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(canvas_image_view,0,y,canvas_image->columns,1,
exception);
q=QueueCacheViewAuthenticPixels(wave_view,0,y,wave_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) wave_image->columns; x++)
{
status=InterpolatePixelChannels(canvas_image,canvas_image_view,
wave_image,method,(double) x,(double) (y-sine_map[x]),q,exception);
if (status == MagickFalse)
break;
p+=GetPixelChannels(canvas_image);
q+=GetPixelChannels(wave_image);
}
if (SyncCacheViewAuthenticPixels(wave_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(canvas_image,WaveImageTag,progress,
canvas_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
wave_view=DestroyCacheView(wave_view);
canvas_image_view=DestroyCacheView(canvas_image_view);
canvas_image=DestroyImage(canvas_image);
sine_map=(float *) RelinquishMagickMemory(sine_map);
if (status == MagickFalse)
wave_image=DestroyImage(wave_image);
return(wave_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W a v e l e t D e n o i s e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WaveletDenoiseImage() removes noise from the image using a wavelet
% transform. The wavelet transform is a fast hierarchical scheme for
% processing an image using a set of consecutive lowpass and high_pass filters,
% followed by a decimation. This results in a decomposition into different
% scales which can be regarded as different “frequency bands”, determined by
% the mother wavelet. Adapted from dcraw.c by David Coffin.
%
% The format of the WaveletDenoiseImage method is:
%
% Image *WaveletDenoiseImage(const Image *image,const double threshold,
% const double softness,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold: set the threshold for smoothing.
%
% o softness: attenuate the smoothing threshold.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline void HatTransform(const float *magick_restrict pixels,
const size_t stride,const size_t extent,const size_t scale,float *kernel)
{
const float
*magick_restrict p,
*magick_restrict q,
*magick_restrict r;
ssize_t
i;
p=pixels;
q=pixels+scale*stride;
r=pixels+scale*stride;
for (i=0; i < (ssize_t) scale; i++)
{
kernel[i]=0.25f*(*p+(*p)+(*q)+(*r));
p+=stride;
q-=stride;
r+=stride;
}
for ( ; i < (ssize_t) (extent-scale); i++)
{
kernel[i]=0.25f*(2.0f*(*p)+*(p-scale*stride)+*(p+scale*stride));
p+=stride;
}
q=p-scale*stride;
r=pixels+stride*(extent-2);
for ( ; i < (ssize_t) extent; i++)
{
kernel[i]=0.25f*(*p+(*p)+(*q)+(*r));
p+=stride;
q+=stride;
r-=stride;
}
}
MagickExport Image *WaveletDenoiseImage(const Image *image,
const double threshold,const double softness,ExceptionInfo *exception)
{
CacheView
*image_view,
*noise_view;
float
*kernel,
*pixels;
Image
*noise_image;
MagickBooleanType
status;
MagickSizeType
number_pixels;
MemoryInfo
*pixels_info;
ssize_t
channel;
static const float
noise_levels[] = { 0.8002f, 0.2735f, 0.1202f, 0.0585f, 0.0291f, 0.0152f,
0.0080f, 0.0044f };
/*
Initialize noise image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (IsEventLogging() != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
noise_image=AccelerateWaveletDenoiseImage(image,threshold,exception);
if (noise_image != (Image *) NULL)
return(noise_image);
#endif
noise_image=CloneImage(image,0,0,MagickTrue,exception);
if (noise_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(noise_image,DirectClass,exception) == MagickFalse)
{
noise_image=DestroyImage(noise_image);
return((Image *) NULL);
}
if (AcquireMagickResource(WidthResource,4*image->columns) == MagickFalse)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
pixels_info=AcquireVirtualMemory(3*image->columns,image->rows*
sizeof(*pixels));
kernel=(float *) AcquireQuantumMemory(MagickMax(image->rows,image->columns)+1,
GetOpenMPMaximumThreads()*sizeof(*kernel));
if ((pixels_info == (MemoryInfo *) NULL) || (kernel == (float *) NULL))
{
if (kernel != (float *) NULL)
kernel=(float *) RelinquishMagickMemory(kernel);
if (pixels_info != (MemoryInfo *) NULL)
pixels_info=RelinquishVirtualMemory(pixels_info);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
pixels=(float *) GetVirtualMemoryBlob(pixels_info);
status=MagickTrue;
number_pixels=(MagickSizeType) image->columns*image->rows;
image_view=AcquireAuthenticCacheView(image,exception);
noise_view=AcquireAuthenticCacheView(noise_image,exception);
for (channel=0; channel < (ssize_t) GetPixelChannels(image); channel++)
{
ssize_t
i;
size_t
high_pass,
low_pass;
ssize_t
level,
y;
PixelChannel
pixel_channel;
PixelTrait
traits;
if (status == MagickFalse)
continue;
traits=GetPixelChannelTraits(image,(PixelChannel) channel);
if (traits == UndefinedPixelTrait)
continue;
pixel_channel=GetPixelChannelChannel(image,channel);
if ((pixel_channel != RedPixelChannel) &&
(pixel_channel != GreenPixelChannel) &&
(pixel_channel != BluePixelChannel))
continue;
/*
Copy channel from image to wavelet pixel array.
*/
i=0;
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*magick_restrict p;
ssize_t
x;
p=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
pixels[i++]=(float) p[channel];
p+=GetPixelChannels(image);
}
}
/*
Low pass filter outputs are called approximation kernel & high pass
filters are referred to as detail kernel. The detail kernel
have high values in the noisy parts of the signal.
*/
high_pass=0;
for (level=0; level < 5; level++)
{
double
magnitude;
ssize_t
x;
low_pass=(size_t) (number_pixels*((level & 0x01)+1));
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,1) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
float
*magick_restrict p,
*magick_restrict q;
ssize_t
c;
p=kernel+id*image->columns;
q=pixels+y*image->columns;
HatTransform(q+high_pass,1,image->columns,((size_t) 1UL << level),p);
q+=low_pass;
for (c=0; c < (ssize_t) image->columns; c++)
*q++=(*p++);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,1) \
magick_number_threads(image,image,image->columns,1)
#endif
for (x=0; x < (ssize_t) image->columns; x++)
{
const int
id = GetOpenMPThreadId();
float
*magick_restrict p,
*magick_restrict q;
ssize_t
r;
p=kernel+id*image->rows;
q=pixels+x+low_pass;
HatTransform(q,image->columns,image->rows,((size_t) 1UL << level),p);
for (r=0; r < (ssize_t) image->rows; r++)
{
*q=(*p++);
q+=image->columns;
}
}
/*
To threshold, each coefficient is compared to a threshold value and
attenuated / shrunk by some factor.
*/
magnitude=threshold*noise_levels[level];
for (i=0; i < (ssize_t) number_pixels; ++i)
{
pixels[high_pass+i]-=pixels[low_pass+i];
if (pixels[high_pass+i] < -magnitude)
pixels[high_pass+i]+=magnitude-softness*magnitude;
else
if (pixels[high_pass+i] > magnitude)
pixels[high_pass+i]-=magnitude-softness*magnitude;
else
pixels[high_pass+i]*=softness;
if (high_pass != 0)
pixels[i]+=pixels[high_pass+i];
}
high_pass=low_pass;
}
/*
Reconstruct image from the thresholded wavelet kernel.
*/
i=0;
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
Quantum
*magick_restrict q;
ssize_t
x;
ssize_t
offset;
q=GetCacheViewAuthenticPixels(noise_view,0,y,noise_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
break;
}
offset=GetPixelChannelOffset(noise_image,pixel_channel);
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
pixel;
pixel=(MagickRealType) pixels[i]+pixels[low_pass+i];
q[offset]=ClampToQuantum(pixel);
i++;
q+=GetPixelChannels(noise_image);
}
sync=SyncCacheViewAuthenticPixels(noise_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,AddNoiseImageTag,(MagickOffsetType)
channel,GetPixelChannels(image));
if (proceed == MagickFalse)
status=MagickFalse;
}
}
noise_view=DestroyCacheView(noise_view);
image_view=DestroyCacheView(image_view);
kernel=(float *) RelinquishMagickMemory(kernel);
pixels_info=RelinquishVirtualMemory(pixels_info);
if (status == MagickFalse)
noise_image=DestroyImage(noise_image);
return(noise_image);
}
|
omp_task.c | // RUN: %libomp-compile-and-run
#include <stdio.h>
#include <math.h>
#include "omp_testsuite.h"
#include "omp_my_sleep.h"
int test_omp_task()
{
int tids[NUM_TASKS];
int i;
#pragma omp parallel
{
#pragma omp single
{
for (i = 0; i < NUM_TASKS; i++) {
/* First we have to store the value of the loop index in a new variable
* which will be private for each task because otherwise it will be overwritten
* if the execution of the task takes longer than the time which is needed to
* enter the next step of the loop!
*/
int myi;
myi = i;
#pragma omp task
{
my_sleep (SLEEPTIME);
tids[myi] = omp_get_thread_num();
} /* end of omp task */
} /* end of for */
} /* end of single */
} /*end of parallel */
/* Now we ckeck if more than one thread executed the tasks. */
for (i = 1; i < NUM_TASKS; i++) {
if (tids[0] != tids[i])
return 1;
}
return 0;
} /* end of check_parallel_for_private */
int main()
{
int i;
int num_failed=0;
for(i = 0; i < REPETITIONS; i++) {
if(!test_omp_task()) {
num_failed++;
}
}
return num_failed;
}
|
quick_sort.h | // Copyright 2021 Bessolitsyn Sergey
#ifndef MODULES_TASK_2_BESSOLITSYN_S_QUICK_SORT_QUICK_SORT_H_
#define MODULES_TASK_2_BESSOLITSYN_S_QUICK_SORT_QUICK_SORT_H_
#include <omp.h>
#include <vector>
#include <random>
#include <utility>
template<typename T>
void quick_sort(T arr[], int right) {
if (right > 0) {
int rand_id = std::rand() % right; // Change to std::mt19937?
int k = 0;
std::swap(arr[0], arr[rand_id]);
for (int i = 1; i < right; ++i) {
if (arr[i] < arr[0]) {
std::swap(arr[i], arr[++k]);
}
}
std::swap(arr[0], arr[k]);
quick_sort(arr, k);
quick_sort(arr + k + 1, right - k - 1);
}
}
template<typename T>
void merge(T arr1[], int size1, T arr2[], int size2) {
T* tmp_arr = new T[size1 + size2];
int i = 0, j = 0, k = 0;
for (; i < size1 && j < size2; ++k) {
if (arr1[i] < arr2[j])
tmp_arr[k] = arr1[i++];
else
tmp_arr[k] = arr2[j++];
}
while (i < size1) {
tmp_arr[k++] = arr1[i++];
}
while (j < size2) {
tmp_arr[k++] = arr2[j++];
}
for (i = 0; i < k; ++i) {
arr1[i] = tmp_arr[i];
}
}
template<typename T>
void quick_sort_OMP(T arr[], int size) {
#pragma omp parallel
{
int num_th = omp_get_num_threads();
int delta = size / num_th;
int rem = size % num_th;
int th_id = omp_get_thread_num();
if (th_id != num_th - 1)
quick_sort(arr + th_id * delta, delta);
else
quick_sort(arr + (num_th - 1) * delta, delta + rem);
#pragma omp barrier
#pragma omp master
{
int last_size = delta + rem;
T* arr2 = arr + (num_th - 1) * delta;
T* arr1 = arr2 - delta;
for (int j = num_th - 1; j > 0; --j) {
merge(arr1, delta, arr2, last_size);
arr1 -= delta;
arr2 -= delta;
last_size += delta;
}
}
}
}
std::vector<int> getRandomVector(int size, uint64_t seed = 50);
std::vector<double> getRandomDoubleVector(int size, uint64_t seed = 50);
#endif // MODULES_TASK_2_BESSOLITSYN_S_QUICK_SORT_QUICK_SORT_H_
|
cache.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC AAA CCCC H H EEEEE %
% C A A C H H E %
% C AAAAA C HHHHH EEE %
% C A A C H H E %
% CCCC A A CCCC H H EEEEE %
% %
% %
% MagickCore Pixel Cache Methods %
% %
% Software Design %
% Cristy %
% July 1999 %
% %
% %
% Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/blob.h"
#include "magick/blob-private.h"
#include "magick/cache.h"
#include "magick/cache-private.h"
#include "magick/color-private.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/composite-private.h"
#include "magick/distribute-cache-private.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/geometry.h"
#include "magick/list.h"
#include "magick/log.h"
#include "magick/magick.h"
#include "magick/memory_.h"
#include "magick/memory-private.h"
#include "magick/nt-base-private.h"
#include "magick/option.h"
#include "magick/pixel.h"
#include "magick/pixel-accessor.h"
#include "magick/pixel-private.h"
#include "magick/policy.h"
#include "magick/quantum.h"
#include "magick/random_.h"
#include "magick/registry.h"
#include "magick/resource_.h"
#include "magick/semaphore.h"
#include "magick/splay-tree.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/thread-private.h"
#include "magick/timer-private.h"
#include "magick/utility.h"
#include "magick/utility-private.h"
#if defined(MAGICKCORE_ZLIB_DELEGATE)
#include "zlib.h"
#endif
/*
Define declarations.
*/
#define CacheTick(offset,extent) QuantumTick((MagickOffsetType) offset,extent)
#define IsFileDescriptorLimitExceeded() (GetMagickResource(FileResource) > \
GetMagickResourceLimit(FileResource) ? MagickTrue : MagickFalse)
/*
Typedef declarations.
*/
typedef struct _MagickModulo
{
ssize_t
quotient,
remainder;
} MagickModulo;
/*
Forward declarations.
*/
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static Cache
GetImagePixelCache(Image *,const MagickBooleanType,ExceptionInfo *)
magick_hot_spot;
static const IndexPacket
*GetVirtualIndexesFromCache(const Image *);
static const PixelPacket
*GetVirtualPixelCache(const Image *,const VirtualPixelMethod,const ssize_t,
const ssize_t,const size_t,const size_t,ExceptionInfo *),
*GetVirtualPixelsCache(const Image *);
static MagickBooleanType
GetOneAuthenticPixelFromCache(Image *,const ssize_t,const ssize_t,
PixelPacket *,ExceptionInfo *),
GetOneVirtualPixelFromCache(const Image *,const VirtualPixelMethod,
const ssize_t,const ssize_t,PixelPacket *,ExceptionInfo *),
OpenPixelCache(Image *,const MapMode,ExceptionInfo *),
OpenPixelCacheOnDisk(CacheInfo *,const MapMode),
ReadPixelCacheIndexes(CacheInfo *magick_restrict,NexusInfo *magick_restrict,
ExceptionInfo *),
ReadPixelCachePixels(CacheInfo *magick_restrict,NexusInfo *magick_restrict,
ExceptionInfo *),
SyncAuthenticPixelsCache(Image *,ExceptionInfo *),
WritePixelCacheIndexes(CacheInfo *,NexusInfo *magick_restrict,
ExceptionInfo *),
WritePixelCachePixels(CacheInfo *,NexusInfo *magick_restrict,
ExceptionInfo *);
static PixelPacket
*GetAuthenticPixelsCache(Image *,const ssize_t,const ssize_t,const size_t,
const size_t,ExceptionInfo *),
*QueueAuthenticPixelsCache(Image *,const ssize_t,const ssize_t,const size_t,
const size_t,ExceptionInfo *),
*SetPixelCacheNexusPixels(const CacheInfo *magick_restrict,const MapMode,
const ssize_t,const ssize_t,const size_t,const size_t,
const MagickBooleanType,NexusInfo *magick_restrict,ExceptionInfo *)
magick_hot_spot;
#if defined(MAGICKCORE_OPENCL_SUPPORT)
static void
CopyOpenCLBuffer(CacheInfo *magick_restrict);
#endif
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
/*
Global declarations.
*/
static SemaphoreInfo
*cache_semaphore = (SemaphoreInfo *) NULL;
static ssize_t
cache_anonymous_memory = (-1);
static time_t
cache_epoch = 0;
#if defined(MAGICKCORE_OPENCL_SUPPORT)
static inline OpenCLCacheInfo *RelinquishOpenCLCacheInfo(MagickCLEnv clEnv,
OpenCLCacheInfo *info)
{
ssize_t
i;
for (i=0; i < (ssize_t) info->event_count; i++)
clEnv->library->clReleaseEvent(info->events[i]);
info->events=(cl_event *) RelinquishMagickMemory(info->events);
DestroySemaphoreInfo(&info->events_semaphore);
if (info->buffer != (cl_mem) NULL)
{
clEnv->library->clReleaseMemObject(info->buffer);
info->buffer=(cl_mem) NULL;
}
return((OpenCLCacheInfo *) RelinquishMagickMemory(info));
}
static void CL_API_CALL RelinquishPixelCachePixelsDelayed(
cl_event magick_unused(event),cl_int magick_unused(event_command_exec_status),
void *user_data)
{
MagickCLEnv
clEnv;
OpenCLCacheInfo
*info;
PixelPacket
*pixels;
ssize_t
i;
magick_unreferenced(event);
magick_unreferenced(event_command_exec_status);
info=(OpenCLCacheInfo *) user_data;
clEnv=GetDefaultOpenCLEnv();
for (i=(ssize_t)info->event_count-1; i >= 0; i--)
{
cl_int
event_status;
cl_uint
status;
status=clEnv->library->clGetEventInfo(info->events[i],
CL_EVENT_COMMAND_EXECUTION_STATUS,sizeof(cl_int),&event_status,NULL);
if ((status == CL_SUCCESS) && (event_status > CL_COMPLETE))
{
clEnv->library->clSetEventCallback(info->events[i],CL_COMPLETE,
&RelinquishPixelCachePixelsDelayed,info);
return;
}
}
pixels=info->pixels;
RelinquishMagickResource(MemoryResource,info->length);
(void) RelinquishOpenCLCacheInfo(clEnv,info);
(void) RelinquishAlignedMemory(pixels);
}
static MagickBooleanType RelinquishOpenCLBuffer(
CacheInfo *magick_restrict cache_info)
{
MagickCLEnv
clEnv;
assert(cache_info != (CacheInfo *) NULL);
if (cache_info->opencl == (OpenCLCacheInfo *) NULL)
return(MagickFalse);
RelinquishPixelCachePixelsDelayed((cl_event) NULL,0,cache_info->opencl);
return(MagickTrue);
}
static cl_event *CopyOpenCLEvents(OpenCLCacheInfo *opencl_info,
cl_uint *event_count)
{
cl_event
*events;
register size_t
i;
assert(opencl_info != (OpenCLCacheInfo *) NULL);
events=(cl_event *) NULL;
LockSemaphoreInfo(opencl_info->events_semaphore);
*event_count=opencl_info->event_count;
if (*event_count > 0)
{
events=AcquireQuantumMemory(*event_count,sizeof(*events));
if (events == (cl_event *) NULL)
*event_count=0;
else
{
for (i=0; i < opencl_info->event_count; i++)
events[i]=opencl_info->events[i];
}
}
UnlockSemaphoreInfo(opencl_info->events_semaphore);
return(events);
}
#endif
#if defined(MAGICKCORE_OPENCL_SUPPORT)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ A d d O p e n C L E v e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AddOpenCLEvent() adds an event to the list of operations the next operation
% should wait for.
%
% The format of the AddOpenCLEvent() method is:
%
% void AddOpenCLEvent(const Image *image,cl_event event)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o event: the event that should be added.
%
*/
extern MagickPrivate void AddOpenCLEvent(const Image *image,cl_event event)
{
CacheInfo
*magick_restrict cache_info;
MagickCLEnv
clEnv;
assert(image != (const Image *) NULL);
assert(event != (cl_event) NULL);
cache_info=(CacheInfo *)image->cache;
assert(cache_info->opencl != (OpenCLCacheInfo *) NULL);
clEnv=GetDefaultOpenCLEnv();
if (clEnv->library->clRetainEvent(event) != CL_SUCCESS)
{
clEnv->library->clWaitForEvents(1,&event);
return;
}
LockSemaphoreInfo(cache_info->opencl->events_semaphore);
if (cache_info->opencl->events == (cl_event *) NULL)
{
cache_info->opencl->events=AcquireMagickMemory(sizeof(
*cache_info->opencl->events));
cache_info->opencl->event_count=1;
}
else
cache_info->opencl->events=ResizeQuantumMemory(cache_info->opencl->events,
++cache_info->opencl->event_count,sizeof(*cache_info->opencl->events));
if (cache_info->opencl->events == (cl_event *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
cache_info->opencl->events[cache_info->opencl->event_count-1]=event;
UnlockSemaphoreInfo(cache_info->opencl->events_semaphore);
}
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ A c q u i r e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquirePixelCache() acquires a pixel cache.
%
% The format of the AcquirePixelCache() method is:
%
% Cache AcquirePixelCache(const size_t number_threads)
%
% A description of each parameter follows:
%
% o number_threads: the number of nexus threads.
%
*/
MagickExport Cache AcquirePixelCache(const size_t number_threads)
{
CacheInfo
*magick_restrict cache_info;
char
*value;
cache_info=(CacheInfo *) AcquireAlignedMemory(1,sizeof(*cache_info));
if (cache_info == (CacheInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) memset(cache_info,0,sizeof(*cache_info));
cache_info->type=UndefinedCache;
cache_info->mode=IOMode;
cache_info->disk_mode=IOMode;
cache_info->colorspace=sRGBColorspace;
cache_info->channels=4;
cache_info->file=(-1);
cache_info->id=GetMagickThreadId();
cache_info->number_threads=number_threads;
if (GetOpenMPMaximumThreads() > cache_info->number_threads)
cache_info->number_threads=GetOpenMPMaximumThreads();
if (GetMagickResourceLimit(ThreadResource) > cache_info->number_threads)
cache_info->number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
if (cache_info->number_threads == 0)
cache_info->number_threads=1;
cache_info->nexus_info=AcquirePixelCacheNexus(cache_info->number_threads);
value=GetEnvironmentValue("MAGICK_SYNCHRONIZE");
if (value != (const char *) NULL)
{
cache_info->synchronize=IsStringTrue(value);
value=DestroyString(value);
}
value=GetPolicyValue("cache:synchronize");
if (value != (const char *) NULL)
{
cache_info->synchronize=IsStringTrue(value);
value=DestroyString(value);
}
cache_info->width_limit=GetMagickResourceLimit(WidthResource);
cache_info->height_limit=GetMagickResourceLimit(HeightResource);
cache_info->semaphore=AllocateSemaphoreInfo();
cache_info->reference_count=1;
cache_info->file_semaphore=AllocateSemaphoreInfo();
cache_info->debug=IsEventLogging();
cache_info->signature=MagickCoreSignature;
return((Cache ) cache_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquirePixelCacheNexus() allocates the NexusInfo structure.
%
% The format of the AcquirePixelCacheNexus method is:
%
% NexusInfo **AcquirePixelCacheNexus(const size_t number_threads)
%
% A description of each parameter follows:
%
% o number_threads: the number of nexus threads.
%
*/
MagickExport NexusInfo **AcquirePixelCacheNexus(const size_t number_threads)
{
NexusInfo
**magick_restrict nexus_info;
register ssize_t
i;
nexus_info=(NexusInfo **) MagickAssumeAligned(AcquireAlignedMemory(2*
number_threads,sizeof(*nexus_info)));
if (nexus_info == (NexusInfo **) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
*nexus_info=(NexusInfo *) AcquireQuantumMemory(2*number_threads,
sizeof(**nexus_info));
if (*nexus_info == (NexusInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) memset(*nexus_info,0,2*number_threads*sizeof(**nexus_info));
for (i=0; i < (ssize_t) (2*number_threads); i++)
{
nexus_info[i]=(*nexus_info+i);
if (i < (ssize_t) number_threads)
nexus_info[i]->virtual_nexus=(*nexus_info+number_threads+i);
nexus_info[i]->signature=MagickCoreSignature;
}
return(nexus_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e P i x e l C a c h e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquirePixelCachePixels() returns the pixels associated with the specified
% image.
%
% The format of the AcquirePixelCachePixels() method is:
%
% const void *AcquirePixelCachePixels(const Image *image,
% MagickSizeType *length,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o length: the pixel cache length.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport const void *AcquirePixelCachePixels(const Image *image,
MagickSizeType *length,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
(void) exception;
*length=0;
if ((cache_info->type != MemoryCache) && (cache_info->type != MapCache))
return((const void *) NULL);
*length=cache_info->length;
return((const void *) cache_info->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C a c h e C o m p o n e n t G e n e s i s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CacheComponentGenesis() instantiates the cache component.
%
% The format of the CacheComponentGenesis method is:
%
% MagickBooleanType CacheComponentGenesis(void)
%
*/
MagickExport MagickBooleanType CacheComponentGenesis(void)
{
if (cache_semaphore == (SemaphoreInfo *) NULL)
cache_semaphore=AllocateSemaphoreInfo();
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C a c h e C o m p o n e n t T e r m i n u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CacheComponentTerminus() destroys the cache component.
%
% The format of the CacheComponentTerminus() method is:
%
% CacheComponentTerminus(void)
%
*/
MagickExport void CacheComponentTerminus(void)
{
if (cache_semaphore == (SemaphoreInfo *) NULL)
ActivateSemaphoreInfo(&cache_semaphore);
/* no op-- nothing to destroy */
DestroySemaphoreInfo(&cache_semaphore);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l i p P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClipPixelCacheNexus() clips the cache nexus as defined by the image clip
% mask. The method returns MagickTrue if the pixel region is clipped,
% otherwise MagickFalse.
%
% The format of the ClipPixelCacheNexus() method is:
%
% MagickBooleanType ClipPixelCacheNexus(Image *image,NexusInfo *nexus_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o nexus_info: the cache nexus to clip.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType ClipPixelCacheNexus(Image *image,
NexusInfo *nexus_info,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
MagickSizeType
number_pixels;
NexusInfo
**magick_restrict clip_nexus;
register const PixelPacket
*magick_restrict r;
register IndexPacket
*magick_restrict nexus_indexes,
*magick_restrict indexes;
register PixelPacket
*magick_restrict p,
*magick_restrict q;
register ssize_t
i;
/*
Apply clip mask.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((image->clip_mask == (Image *) NULL) ||
(image->storage_class == PseudoClass))
return(MagickTrue);
if ((nexus_info->region.width == 0) || (nexus_info->region.height == 0))
return(MagickTrue);
cache_info=(CacheInfo *) image->cache;
if (cache_info == (Cache) NULL)
return(MagickFalse);
clip_nexus=AcquirePixelCacheNexus(1);
p=GetAuthenticPixelCacheNexus(image,nexus_info->region.x,nexus_info->region.y,
nexus_info->region.width,nexus_info->region.height,
nexus_info->virtual_nexus,exception);
indexes=nexus_info->virtual_nexus->indexes;
q=nexus_info->pixels;
nexus_indexes=nexus_info->indexes;
r=GetVirtualPixelCacheNexus(image->clip_mask,MaskVirtualPixelMethod,
nexus_info->region.x,nexus_info->region.y,nexus_info->region.width,
nexus_info->region.height,clip_nexus[0],exception);
number_pixels=(MagickSizeType) nexus_info->region.width*
nexus_info->region.height;
for (i=0; i < (ssize_t) number_pixels; i++)
{
double
mask_alpha;
if ((p == (PixelPacket *) NULL) || (r == (const PixelPacket *) NULL))
break;
mask_alpha=QuantumScale*GetPixelIntensity(image,r);
if (fabs(mask_alpha) >= MagickEpsilon)
{
SetPixelRed(q,mask_alpha*MagickOver_((MagickRealType) p->red,
(MagickRealType) GetPixelOpacity(p),(MagickRealType) q->red,
(MagickRealType) GetPixelOpacity(q)));
SetPixelGreen(q,mask_alpha*MagickOver_((MagickRealType) p->green,
(MagickRealType) GetPixelOpacity(p),(MagickRealType) q->green,
(MagickRealType) GetPixelOpacity(q)));
SetPixelBlue(q,mask_alpha*MagickOver_((MagickRealType) p->blue,
(MagickRealType) GetPixelOpacity(p),(MagickRealType) q->blue,
(MagickRealType) GetPixelOpacity(q)));
SetPixelOpacity(q,GetPixelOpacity(p));
if (cache_info->active_index_channel != MagickFalse)
SetPixelIndex(nexus_indexes+i,GetPixelIndex(indexes+i));
}
p++;
q++;
r++;
}
clip_nexus=DestroyPixelCacheNexus(clip_nexus,1);
return(i < (ssize_t) number_pixels ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l o n e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClonePixelCache() clones a pixel cache.
%
% The format of the ClonePixelCache() method is:
%
% Cache ClonePixelCache(const Cache cache)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
*/
MagickExport Cache ClonePixelCache(const Cache cache)
{
CacheInfo
*magick_restrict clone_info;
const CacheInfo
*magick_restrict cache_info;
assert(cache != NULL);
cache_info=(const CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
clone_info=(CacheInfo *) AcquirePixelCache(cache_info->number_threads);
clone_info->virtual_pixel_method=cache_info->virtual_pixel_method;
return((Cache ) clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l o n e P i x e l C a c h e M e t h o d s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClonePixelCacheMethods() clones the pixel cache methods from one cache to
% another.
%
% The format of the ClonePixelCacheMethods() method is:
%
% void ClonePixelCacheMethods(Cache clone,const Cache cache)
%
% A description of each parameter follows:
%
% o clone: Specifies a pointer to a Cache structure.
%
% o cache: the pixel cache.
%
*/
MagickExport void ClonePixelCacheMethods(Cache clone,const Cache cache)
{
CacheInfo
*magick_restrict cache_info,
*magick_restrict source_info;
assert(clone != (Cache) NULL);
source_info=(CacheInfo *) clone;
assert(source_info->signature == MagickCoreSignature);
if (source_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
source_info->filename);
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
source_info->methods=cache_info->methods;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l o n e P i x e l C a c h e R e p o s i t o r y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %
% ClonePixelCacheRepository() clones the source pixel cache to the destination
% cache.
%
% The format of the ClonePixelCacheRepository() method is:
%
% MagickBooleanType ClonePixelCacheRepository(CacheInfo *cache_info,
% CacheInfo *source_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o source_info: the source pixel cache.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType ClonePixelCacheOnDisk(
CacheInfo *magick_restrict cache_info,CacheInfo *magick_restrict clone_info)
{
MagickSizeType
extent;
size_t
quantum;
ssize_t
count;
struct stat
file_stats;
unsigned char
*buffer;
/*
Clone pixel cache on disk with identical morphology.
*/
if ((OpenPixelCacheOnDisk(cache_info,ReadMode) == MagickFalse) ||
(OpenPixelCacheOnDisk(clone_info,IOMode) == MagickFalse))
return(MagickFalse);
if ((lseek(cache_info->file,0,SEEK_SET) < 0) ||
(lseek(clone_info->file,0,SEEK_SET) < 0))
return(MagickFalse);
quantum=(size_t) MagickMaxBufferExtent;
if ((fstat(cache_info->file,&file_stats) == 0) && (file_stats.st_size > 0))
quantum=(size_t) MagickMin(file_stats.st_size,MagickMaxBufferExtent);
buffer=(unsigned char *) AcquireQuantumMemory(quantum,sizeof(*buffer));
if (buffer == (unsigned char *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
extent=0;
while ((count=read(cache_info->file,buffer,quantum)) > 0)
{
ssize_t
number_bytes;
number_bytes=write(clone_info->file,buffer,(size_t) count);
if (number_bytes != count)
break;
extent+=number_bytes;
}
buffer=(unsigned char *) RelinquishMagickMemory(buffer);
if (extent != cache_info->length)
return(MagickFalse);
return(MagickTrue);
}
static MagickBooleanType ClonePixelCacheRepository(
CacheInfo *magick_restrict clone_info,CacheInfo *magick_restrict cache_info,
ExceptionInfo *exception)
{
#define MaxCacheThreads ((size_t) GetMagickResourceLimit(ThreadResource))
#define cache_number_threads(source,destination,chunk,multithreaded) \
num_threads((multithreaded) == 0 ? 1 : \
(((source)->type != MemoryCache) && ((source)->type != MapCache)) || \
(((destination)->type != MemoryCache) && ((destination)->type != MapCache)) ? \
MagickMax(MagickMin(GetMagickResourceLimit(ThreadResource),2),1) : \
MagickMax(MagickMin((ssize_t) GetMagickResourceLimit(ThreadResource),(ssize_t) (chunk)/256),1))
MagickBooleanType
status;
NexusInfo
**magick_restrict cache_nexus,
**magick_restrict clone_nexus;
size_t
length;
ssize_t
y;
assert(cache_info != (CacheInfo *) NULL);
assert(clone_info != (CacheInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
if (cache_info->type == PingCache)
return(MagickTrue);
if ((cache_info->storage_class == clone_info->storage_class) &&
(cache_info->colorspace == clone_info->colorspace) &&
(cache_info->channels == clone_info->channels) &&
(cache_info->columns == clone_info->columns) &&
(cache_info->rows == clone_info->rows) &&
(cache_info->active_index_channel == clone_info->active_index_channel))
{
/*
Identical pixel cache morphology.
*/
if (((cache_info->type == MemoryCache) ||
(cache_info->type == MapCache)) &&
((clone_info->type == MemoryCache) ||
(clone_info->type == MapCache)))
{
(void) memcpy(clone_info->pixels,cache_info->pixels,
cache_info->columns*cache_info->rows*sizeof(*cache_info->pixels));
if ((cache_info->active_index_channel != MagickFalse) &&
(clone_info->active_index_channel != MagickFalse))
(void) memcpy(clone_info->indexes,cache_info->indexes,
cache_info->columns*cache_info->rows*
sizeof(*cache_info->indexes));
return(MagickTrue);
}
if ((cache_info->type == DiskCache) && (clone_info->type == DiskCache))
return(ClonePixelCacheOnDisk(cache_info,clone_info));
}
/*
Mismatched pixel cache morphology.
*/
cache_nexus=AcquirePixelCacheNexus(cache_info->number_threads);
clone_nexus=AcquirePixelCacheNexus(clone_info->number_threads);
length=(size_t) MagickMin(cache_info->columns,clone_info->columns)*
sizeof(*cache_info->pixels);
status=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
cache_number_threads(cache_info,clone_info,cache_info->rows,1)
#endif
for (y=0; y < (ssize_t) cache_info->rows; y++)
{
const int
id = GetOpenMPThreadId();
PixelPacket
*pixels;
if (status == MagickFalse)
continue;
if (y >= (ssize_t) clone_info->rows)
continue;
pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,0,y,
cache_info->columns,1,MagickFalse,cache_nexus[id],exception);
if (pixels == (PixelPacket *) NULL)
continue;
status=ReadPixelCachePixels(cache_info,cache_nexus[id],exception);
if (status == MagickFalse)
continue;
pixels=SetPixelCacheNexusPixels(clone_info,WriteMode,0,y,
clone_info->columns,1,MagickFalse,clone_nexus[id],exception);
if (pixels == (PixelPacket *) NULL)
continue;
(void) memset(clone_nexus[id]->pixels,0,(size_t) clone_nexus[id]->length);
(void) memcpy(clone_nexus[id]->pixels,cache_nexus[id]->pixels,length);
status=WritePixelCachePixels(clone_info,clone_nexus[id],exception);
}
if ((cache_info->active_index_channel != MagickFalse) &&
(clone_info->active_index_channel != MagickFalse))
{
/*
Clone indexes.
*/
length=(size_t) MagickMin(cache_info->columns,clone_info->columns)*
sizeof(*cache_info->indexes);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
cache_number_threads(cache_info,clone_info,cache_info->rows,1)
#endif
for (y=0; y < (ssize_t) cache_info->rows; y++)
{
const int
id = GetOpenMPThreadId();
PixelPacket
*pixels;
if (status == MagickFalse)
continue;
if (y >= (ssize_t) clone_info->rows)
continue;
pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,0,y,
cache_info->columns,1,MagickFalse,cache_nexus[id],exception);
if (pixels == (PixelPacket *) NULL)
continue;
status=ReadPixelCacheIndexes(cache_info,cache_nexus[id],exception);
if (status == MagickFalse)
continue;
pixels=SetPixelCacheNexusPixels(clone_info,WriteMode,0,y,
clone_info->columns,1,MagickFalse,clone_nexus[id],exception);
if (pixels == (PixelPacket *) NULL)
continue;
(void) memcpy(clone_nexus[id]->indexes,cache_nexus[id]->indexes,length);
status=WritePixelCacheIndexes(clone_info,clone_nexus[id],exception);
}
}
clone_nexus=DestroyPixelCacheNexus(clone_nexus,clone_info->number_threads);
cache_nexus=DestroyPixelCacheNexus(cache_nexus,cache_info->number_threads);
if (cache_info->debug != MagickFalse)
{
char
message[MaxTextExtent];
(void) FormatLocaleString(message,MaxTextExtent,"%s => %s",
CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type),
CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) clone_info->type));
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message);
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y I m a g e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImagePixelCache() deallocates memory associated with the pixel cache.
%
% The format of the DestroyImagePixelCache() method is:
%
% void DestroyImagePixelCache(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static void DestroyImagePixelCache(Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->cache != (void *) NULL)
image->cache=DestroyPixelCache(image->cache);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y I m a g e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImagePixels() deallocates memory associated with the pixel cache.
%
% The format of the DestroyImagePixels() method is:
%
% void DestroyImagePixels(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport void DestroyImagePixels(Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.destroy_pixel_handler != (DestroyPixelHandler) NULL)
{
cache_info->methods.destroy_pixel_handler(image);
return;
}
image->cache=DestroyPixelCache(image->cache);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyPixelCache() deallocates memory associated with the pixel cache.
%
% The format of the DestroyPixelCache() method is:
%
% Cache DestroyPixelCache(Cache cache)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
*/
static MagickBooleanType ClosePixelCacheOnDisk(CacheInfo *cache_info)
{
int
status;
status=(-1);
if (cache_info->file != -1)
{
status=close(cache_info->file);
cache_info->file=(-1);
RelinquishMagickResource(FileResource,1);
}
return(status == -1 ? MagickFalse : MagickTrue);
}
static inline void RelinquishPixelCachePixels(CacheInfo *cache_info)
{
switch (cache_info->type)
{
case MemoryCache:
{
#if defined(MAGICKCORE_OPENCL_SUPPORT)
if (RelinquishOpenCLBuffer(cache_info) != MagickFalse)
{
cache_info->pixels=(PixelPacket *) NULL;
break;
}
#endif
if (cache_info->mapped == MagickFalse)
cache_info->pixels=(PixelPacket *) RelinquishAlignedMemory(
cache_info->pixels);
else
(void) UnmapBlob(cache_info->pixels,(size_t) cache_info->length);
RelinquishMagickResource(MemoryResource,cache_info->length);
break;
}
case MapCache:
{
(void) UnmapBlob(cache_info->pixels,(size_t) cache_info->length);
cache_info->pixels=(PixelPacket *) NULL;
if ((cache_info->mode != ReadMode) && (cache_info->mode != PersistMode))
(void) RelinquishUniqueFileResource(cache_info->cache_filename);
*cache_info->cache_filename='\0';
RelinquishMagickResource(MapResource,cache_info->length);
}
case DiskCache:
{
if (cache_info->file != -1)
(void) ClosePixelCacheOnDisk(cache_info);
if ((cache_info->mode != ReadMode) && (cache_info->mode != PersistMode))
(void) RelinquishUniqueFileResource(cache_info->cache_filename);
*cache_info->cache_filename='\0';
RelinquishMagickResource(DiskResource,cache_info->length);
break;
}
case DistributedCache:
{
*cache_info->cache_filename='\0';
(void) RelinquishDistributePixelCache((DistributeCacheInfo *)
cache_info->server_info);
break;
}
default:
break;
}
cache_info->type=UndefinedCache;
cache_info->mapped=MagickFalse;
cache_info->indexes=(IndexPacket *) NULL;
}
MagickExport Cache DestroyPixelCache(Cache cache)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
LockSemaphoreInfo(cache_info->semaphore);
cache_info->reference_count--;
if (cache_info->reference_count != 0)
{
UnlockSemaphoreInfo(cache_info->semaphore);
return((Cache) NULL);
}
UnlockSemaphoreInfo(cache_info->semaphore);
if (cache_info->debug != MagickFalse)
{
char
message[MaxTextExtent];
(void) FormatLocaleString(message,MaxTextExtent,"destroy %s",
cache_info->filename);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message);
}
RelinquishPixelCachePixels(cache_info);
if (cache_info->server_info != (DistributeCacheInfo *) NULL)
cache_info->server_info=DestroyDistributeCacheInfo((DistributeCacheInfo *)
cache_info->server_info);
if (cache_info->nexus_info != (NexusInfo **) NULL)
cache_info->nexus_info=DestroyPixelCacheNexus(cache_info->nexus_info,
cache_info->number_threads);
if (cache_info->random_info != (RandomInfo *) NULL)
cache_info->random_info=DestroyRandomInfo(cache_info->random_info);
if (cache_info->file_semaphore != (SemaphoreInfo *) NULL)
DestroySemaphoreInfo(&cache_info->file_semaphore);
if (cache_info->semaphore != (SemaphoreInfo *) NULL)
DestroySemaphoreInfo(&cache_info->semaphore);
cache_info->signature=(~MagickCoreSignature);
cache_info=(CacheInfo *) RelinquishAlignedMemory(cache_info);
cache=(Cache) NULL;
return(cache);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyPixelCacheNexus() destroys a pixel cache nexus.
%
% The format of the DestroyPixelCacheNexus() method is:
%
% NexusInfo **DestroyPixelCacheNexus(NexusInfo *nexus_info,
% const size_t number_threads)
%
% A description of each parameter follows:
%
% o nexus_info: the nexus to destroy.
%
% o number_threads: the number of nexus threads.
%
*/
static inline void RelinquishCacheNexusPixels(NexusInfo *nexus_info)
{
if (nexus_info->mapped == MagickFalse)
(void) RelinquishAlignedMemory(nexus_info->cache);
else
(void) UnmapBlob(nexus_info->cache,(size_t) nexus_info->length);
nexus_info->cache=(PixelPacket *) NULL;
nexus_info->pixels=(PixelPacket *) NULL;
nexus_info->indexes=(IndexPacket *) NULL;
nexus_info->length=0;
nexus_info->mapped=MagickFalse;
}
MagickExport NexusInfo **DestroyPixelCacheNexus(NexusInfo **nexus_info,
const size_t number_threads)
{
register ssize_t
i;
assert(nexus_info != (NexusInfo **) NULL);
for (i=0; i < (ssize_t) (2*number_threads); i++)
{
if (nexus_info[i]->cache != (PixelPacket *) NULL)
RelinquishCacheNexusPixels(nexus_info[i]);
nexus_info[i]->signature=(~MagickCoreSignature);
}
*nexus_info=(NexusInfo *) RelinquishMagickMemory(*nexus_info);
nexus_info=(NexusInfo **) RelinquishAlignedMemory(nexus_info);
return(nexus_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c I n d e x e s F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticIndexesFromCache() returns the indexes associated with the last
% call to QueueAuthenticPixelsCache() or GetAuthenticPixelsCache().
%
% The format of the GetAuthenticIndexesFromCache() method is:
%
% IndexPacket *GetAuthenticIndexesFromCache(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static IndexPacket *GetAuthenticIndexesFromCache(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(cache_info->nexus_info[id]->indexes);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t A u t h e n t i c I n d e x Q u e u e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticIndexQueue() returns the authentic black channel or the colormap
% indexes associated with the last call to QueueAuthenticPixels() or
% GetVirtualPixels(). NULL is returned if the black channel or colormap
% indexes are not available.
%
% The format of the GetAuthenticIndexQueue() method is:
%
% IndexPacket *GetAuthenticIndexQueue(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport IndexPacket *GetAuthenticIndexQueue(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_authentic_indexes_from_handler !=
(GetAuthenticIndexesFromHandler) NULL)
return(cache_info->methods.get_authentic_indexes_from_handler(image));
assert(id < (int) cache_info->number_threads);
return(cache_info->nexus_info[id]->indexes);
}
#if defined(MAGICKCORE_OPENCL_SUPPORT)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c O p e n C L B u f f e r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticOpenCLBuffer() returns an OpenCL buffer used to execute OpenCL
% operations.
%
% The format of the GetAuthenticOpenCLBuffer() method is:
%
% cl_mem GetAuthenticOpenCLBuffer(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickPrivate cl_mem GetAuthenticOpenCLBuffer(const Image *image,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
cl_context
context;
cl_int
status;
MagickCLEnv
clEnv;
assert(image != (const Image *) NULL);
cache_info=(CacheInfo *)image->cache;
if ((cache_info->type == UndefinedCache) || (cache_info->reference_count > 1))
{
SyncImagePixelCache((Image *) image,exception);
cache_info=(CacheInfo *)image->cache;
}
if ((cache_info->type != MemoryCache) || (cache_info->mapped != MagickFalse))
return((cl_mem) NULL);
LockSemaphoreInfo(cache_info->semaphore);
clEnv=GetDefaultOpenCLEnv();
if (cache_info->opencl == (OpenCLCacheInfo *) NULL)
{
assert(cache_info->pixels != NULL);
context=GetOpenCLContext(clEnv);
cache_info->opencl=(OpenCLCacheInfo *) AcquireCriticalMemory(
sizeof(*cache_info->opencl));
(void) memset(cache_info->opencl,0,sizeof(*cache_info->opencl));
cache_info->opencl->events_semaphore=AllocateSemaphoreInfo();
cache_info->opencl->length=cache_info->length;
cache_info->opencl->pixels=cache_info->pixels;
cache_info->opencl->buffer=clEnv->library->clCreateBuffer(context,
CL_MEM_USE_HOST_PTR,cache_info->length,cache_info->pixels,&status);
if (status != CL_SUCCESS)
cache_info->opencl=RelinquishOpenCLCacheInfo(clEnv,cache_info->opencl);
}
if (cache_info->opencl != (OpenCLCacheInfo *) NULL)
clEnv->library->clRetainMemObject(cache_info->opencl->buffer);
UnlockSemaphoreInfo(cache_info->semaphore);
if (cache_info->opencl == (OpenCLCacheInfo *) NULL)
return((cl_mem) NULL);
return(cache_info->opencl->buffer);
}
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixelCacheNexus() gets authentic pixels from the in-memory or
% disk pixel cache as defined by the geometry parameters. A pointer to the
% pixels is returned if the pixels are transferred, otherwise a NULL is
% returned.
%
% The format of the GetAuthenticPixelCacheNexus() method is:
%
% PixelPacket *GetAuthenticPixelCacheNexus(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o nexus_info: the cache nexus to return.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport PixelPacket *GetAuthenticPixelCacheNexus(Image *image,
const ssize_t x,const ssize_t y,const size_t columns,const size_t rows,
NexusInfo *nexus_info,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
PixelPacket
*magick_restrict pixels;
/*
Transfer pixels from the cache.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickTrue,
nexus_info,exception);
if (pixels == (PixelPacket *) NULL)
return((PixelPacket *) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(pixels);
if (ReadPixelCachePixels(cache_info,nexus_info,exception) == MagickFalse)
return((PixelPacket *) NULL);
if (cache_info->active_index_channel != MagickFalse)
if (ReadPixelCacheIndexes(cache_info,nexus_info,exception) == MagickFalse)
return((PixelPacket *) NULL);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c P i x e l s F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixelsFromCache() returns the pixels associated with the last
% call to the QueueAuthenticPixelsCache() or GetAuthenticPixelsCache() methods.
%
% The format of the GetAuthenticPixelsFromCache() method is:
%
% PixelPacket *GetAuthenticPixelsFromCache(const Image image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static PixelPacket *GetAuthenticPixelsFromCache(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(cache_info->nexus_info[id]->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t A u t h e n t i c P i x e l Q u e u e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixelQueue() returns the authentic pixels associated with the
% last call to QueueAuthenticPixels() or GetAuthenticPixels().
%
% The format of the GetAuthenticPixelQueue() method is:
%
% PixelPacket *GetAuthenticPixelQueue(const Image image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport PixelPacket *GetAuthenticPixelQueue(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_authentic_pixels_from_handler !=
(GetAuthenticPixelsFromHandler) NULL)
return(cache_info->methods.get_authentic_pixels_from_handler(image));
assert(id < (int) cache_info->number_threads);
return(cache_info->nexus_info[id]->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t A u t h e n t i c P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixels() obtains a pixel region for read/write access. If the
% region is successfully accessed, a pointer to a PixelPacket array
% representing the region is returned, otherwise NULL is returned.
%
% The returned pointer may point to a temporary working copy of the pixels
% or it may point to the original pixels in memory. Performance is maximized
% if the selected region is part of one row, or one or more full rows, since
% then there is opportunity to access the pixels in-place (without a copy)
% if the image is in memory, or in a memory-mapped file. The returned pointer
% must *never* be deallocated by the user.
%
% Pixels accessed via the returned pointer represent a simple array of type
% PixelPacket. If the image type is CMYK or if the storage class is
% PseduoClass, call GetAuthenticIndexQueue() after invoking
% GetAuthenticPixels() to obtain the black color component or colormap indexes
% (of type IndexPacket) corresponding to the region. Once the PixelPacket
% (and/or IndexPacket) array has been updated, the changes must be saved back
% to the underlying image using SyncAuthenticPixels() or they may be lost.
%
% The format of the GetAuthenticPixels() method is:
%
% PixelPacket *GetAuthenticPixels(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport PixelPacket *GetAuthenticPixels(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_authentic_pixels_handler !=
(GetAuthenticPixelsHandler) NULL)
return(cache_info->methods.get_authentic_pixels_handler(image,x,y,columns,
rows,exception));
assert(id < (int) cache_info->number_threads);
return(GetAuthenticPixelCacheNexus(image,x,y,columns,rows,
cache_info->nexus_info[id],exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c P i x e l s C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixelsCache() gets pixels from the in-memory or disk pixel cache
% as defined by the geometry parameters. A pointer to the pixels is returned
% if the pixels are transferred, otherwise a NULL is returned.
%
% The format of the GetAuthenticPixelsCache() method is:
%
% PixelPacket *GetAuthenticPixelsCache(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static PixelPacket *GetAuthenticPixelsCache(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
if (cache_info == (Cache) NULL)
return((PixelPacket *) NULL);
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(GetAuthenticPixelCacheNexus(image,x,y,columns,rows,
cache_info->nexus_info[id],exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e E x t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageExtent() returns the extent of the pixels associated with the
% last call to QueueAuthenticPixels() or GetAuthenticPixels().
%
% The format of the GetImageExtent() method is:
%
% MagickSizeType GetImageExtent(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickSizeType GetImageExtent(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(GetPixelCacheNexusExtent(cache_info,cache_info->nexus_info[id]));
}
#if defined(MAGICKCORE_OPENCL_SUPPORT)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t O p e n C L E v e n t s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOpenCLEvents() returns the events that the next operation should wait
% for. The argument event_count is set to the number of events.
%
% The format of the GetOpenCLEvents() method is:
%
% const cl_event *GetOpenCLEvents(const Image *image,
% cl_command_queue queue)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o event_count: will be set to the number of events.
%
*/
extern MagickPrivate cl_event *GetOpenCLEvents(const Image *image,
cl_uint *event_count)
{
CacheInfo
*magick_restrict cache_info;
cl_event
*events;
assert(image != (const Image *) NULL);
assert(event_count != (cl_uint *) NULL);
cache_info=(CacheInfo *) image->cache;
*event_count=0;
events=(cl_event *) NULL;
if (cache_info->opencl != (OpenCLCacheInfo *) NULL)
events=CopyOpenCLEvents(cache_info->opencl,event_count);
return(events);
}
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImagePixelCache() ensures that there is only a single reference to the
% pixel cache to be modified, updating the provided cache pointer to point to
% a clone of the original pixel cache if necessary.
%
% The format of the GetImagePixelCache method is:
%
% Cache GetImagePixelCache(Image *image,const MagickBooleanType clone,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o clone: any value other than MagickFalse clones the cache pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickBooleanType ValidatePixelCacheMorphology(
const Image *magick_restrict image)
{
CacheInfo
*magick_restrict cache_info;
/*
Does the image match the pixel cache morphology?
*/
cache_info=(CacheInfo *) image->cache;
if ((image->storage_class != cache_info->storage_class) ||
(image->colorspace != cache_info->colorspace) ||
(image->channels != cache_info->channels) ||
(image->columns != cache_info->columns) ||
(image->rows != cache_info->rows) ||
(cache_info->nexus_info == (NexusInfo **) NULL))
return(MagickFalse);
return(MagickTrue);
}
static Cache GetImagePixelCache(Image *image,const MagickBooleanType clone,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
MagickBooleanType
destroy,
status;
static MagickSizeType
cache_timelimit = MagickResourceInfinity,
cpu_throttle = MagickResourceInfinity,
cycles = 0;
status=MagickTrue;
if (cpu_throttle == MagickResourceInfinity)
cpu_throttle=GetMagickResourceLimit(ThrottleResource);
if ((cpu_throttle != 0) && ((cycles++ % 32) == 0))
MagickDelay(cpu_throttle);
if (cache_epoch == 0)
{
/*
Set the expire time in seconds.
*/
cache_epoch=GetMagickTime();
cache_timelimit=GetMagickResourceLimit(TimeResource);
}
if ((cache_timelimit != MagickResourceInfinity) &&
((MagickSizeType) (GetMagickTime()-cache_epoch) >= cache_timelimit))
{
#if defined(ECANCELED)
errno=ECANCELED;
#endif
cache_info=(CacheInfo *) image->cache;
if (cache_info->file != -1)
(void) ClosePixelCacheOnDisk(cache_info);
ThrowFatalException(ResourceLimitFatalError,"TimeLimitExceeded");
}
LockSemaphoreInfo(image->semaphore);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
#if defined(MAGICKCORE_OPENCL_SUPPORT)
CopyOpenCLBuffer(cache_info);
#endif
destroy=MagickFalse;
if ((cache_info->reference_count > 1) || (cache_info->mode == ReadMode))
{
LockSemaphoreInfo(cache_info->semaphore);
if ((cache_info->reference_count > 1) || (cache_info->mode == ReadMode))
{
CacheInfo
*clone_info;
Image
clone_image;
/*
Clone pixel cache.
*/
clone_image=(*image);
clone_image.semaphore=AllocateSemaphoreInfo();
clone_image.reference_count=1;
clone_image.cache=ClonePixelCache(cache_info);
clone_info=(CacheInfo *) clone_image.cache;
status=OpenPixelCache(&clone_image,IOMode,exception);
if (status == MagickFalse)
clone_info=(CacheInfo *) DestroyPixelCache(clone_info);
else
{
if (clone != MagickFalse)
status=ClonePixelCacheRepository(clone_info,cache_info,
exception);
if (status == MagickFalse)
clone_info=(CacheInfo *) DestroyPixelCache(clone_info);
else
{
destroy=MagickTrue;
image->cache=clone_info;
}
}
DestroySemaphoreInfo(&clone_image.semaphore);
}
UnlockSemaphoreInfo(cache_info->semaphore);
}
if (destroy != MagickFalse)
cache_info=(CacheInfo *) DestroyPixelCache(cache_info);
if (status != MagickFalse)
{
/*
Ensure the image matches the pixel cache morphology.
*/
if (image->type != UndefinedType)
image->type=UndefinedType;
if (ValidatePixelCacheMorphology(image) == MagickFalse)
{
status=OpenPixelCache(image,IOMode,exception);
cache_info=(CacheInfo *) image->cache;
if (cache_info->file != -1)
(void) ClosePixelCacheOnDisk(cache_info);
}
}
UnlockSemaphoreInfo(image->semaphore);
if (status == MagickFalse)
return((Cache) NULL);
return(image->cache);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e P i x e l C a c h e T y p e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImagePixelCacheType() returns the pixel cache type: UndefinedCache,
% DiskCache, MapCache, MemoryCache, or PingCache.
%
% The format of the GetImagePixelCacheType() method is:
%
% CacheType GetImagePixelCacheType(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport CacheType GetPixelCacheType(const Image *image)
{
return(GetImagePixelCacheType(image));
}
MagickExport CacheType GetImagePixelCacheType(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
return(cache_info->type);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t O n e A u t h e n t i c P i x e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneAuthenticPixel() returns a single pixel at the specified (x,y)
% location. The image background color is returned if an error occurs.
%
% The format of the GetOneAuthenticPixel() method is:
%
% MagickBooleanType GetOneAuthenticPixel(const Image image,const ssize_t x,
% const ssize_t y,PixelPacket *pixel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetOneAuthenticPixel(Image *image,
const ssize_t x,const ssize_t y,PixelPacket *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
PixelPacket
*magick_restrict pixels;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
*pixel=image->background_color;
if (cache_info->methods.get_one_authentic_pixel_from_handler != (GetOneAuthenticPixelFromHandler) NULL)
return(cache_info->methods.get_one_authentic_pixel_from_handler(image,x,y,pixel,exception));
pixels=GetAuthenticPixelsCache(image,x,y,1UL,1UL,exception);
if (pixels == (PixelPacket *) NULL)
return(MagickFalse);
*pixel=(*pixels);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t O n e A u t h e n t i c P i x e l F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneAuthenticPixelFromCache() returns a single pixel at the specified (x,y)
% location. The image background color is returned if an error occurs.
%
% The format of the GetOneAuthenticPixelFromCache() method is:
%
% MagickBooleanType GetOneAuthenticPixelFromCache(const Image image,
% const ssize_t x,const ssize_t y,PixelPacket *pixel,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType GetOneAuthenticPixelFromCache(Image *image,
const ssize_t x,const ssize_t y,PixelPacket *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
PixelPacket
*magick_restrict pixels;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
*pixel=image->background_color;
assert(id < (int) cache_info->number_threads);
pixels=GetAuthenticPixelCacheNexus(image,x,y,1UL,1UL,
cache_info->nexus_info[id],exception);
if (pixels == (PixelPacket *) NULL)
return(MagickFalse);
*pixel=(*pixels);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t O n e V i r t u a l M a g i c k P i x e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneVirtualMagickPixel() returns a single pixel at the specified (x,y)
% location. The image background color is returned if an error occurs. If
% you plan to modify the pixel, use GetOneAuthenticPixel() instead.
%
% The format of the GetOneVirtualMagickPixel() method is:
%
% MagickBooleanType GetOneVirtualMagickPixel(const Image image,
% const ssize_t x,const ssize_t y,MagickPixelPacket *pixel,
% ExceptionInfo exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y: these values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetOneVirtualMagickPixel(const Image *image,
const ssize_t x,const ssize_t y,MagickPixelPacket *pixel,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
register const IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict pixels;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
pixels=GetVirtualPixelCacheNexus(image,GetPixelCacheVirtualMethod(image),x,y,
1UL,1UL,cache_info->nexus_info[id],exception);
GetMagickPixelPacket(image,pixel);
if (pixels == (const PixelPacket *) NULL)
return(MagickFalse);
indexes=GetVirtualIndexesFromNexus(cache_info,cache_info->nexus_info[id]);
SetMagickPixelPacket(image,pixels,indexes,pixel);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t O n e V i r t u a l M e t h o d P i x e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneVirtualMethodPixel() returns a single pixel at the specified (x,y)
% location as defined by specified pixel method. The image background color
% is returned if an error occurs. If you plan to modify the pixel, use
% GetOneAuthenticPixel() instead.
%
% The format of the GetOneVirtualMethodPixel() method is:
%
% MagickBooleanType GetOneVirtualMethodPixel(const Image image,
% const VirtualPixelMethod virtual_pixel_method,const ssize_t x,
% const ssize_t y,Pixelpacket *pixel,ExceptionInfo exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: the virtual pixel method.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetOneVirtualMethodPixel(const Image *image,
const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y,
PixelPacket *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const PixelPacket
*magick_restrict pixels;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
*pixel=image->background_color;
if (cache_info->methods.get_one_virtual_pixel_from_handler !=
(GetOneVirtualPixelFromHandler) NULL)
return(cache_info->methods.get_one_virtual_pixel_from_handler(image,
virtual_pixel_method,x,y,pixel,exception));
assert(id < (int) cache_info->number_threads);
pixels=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x,y,1UL,1UL,
cache_info->nexus_info[id],exception);
if (pixels == (const PixelPacket *) NULL)
return(MagickFalse);
*pixel=(*pixels);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t O n e V i r t u a l P i x e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneVirtualPixel() returns a single virtual pixel at the specified
% (x,y) location. The image background color is returned if an error occurs.
% If you plan to modify the pixel, use GetOneAuthenticPixel() instead.
%
% The format of the GetOneVirtualPixel() method is:
%
% MagickBooleanType GetOneVirtualPixel(const Image image,const ssize_t x,
% const ssize_t y,PixelPacket *pixel,ExceptionInfo exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetOneVirtualPixel(const Image *image,
const ssize_t x,const ssize_t y,PixelPacket *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const PixelPacket
*magick_restrict pixels;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
*pixel=image->background_color;
if (cache_info->methods.get_one_virtual_pixel_from_handler !=
(GetOneVirtualPixelFromHandler) NULL)
return(cache_info->methods.get_one_virtual_pixel_from_handler(image,
GetPixelCacheVirtualMethod(image),x,y,pixel,exception));
assert(id < (int) cache_info->number_threads);
pixels=GetVirtualPixelCacheNexus(image,GetPixelCacheVirtualMethod(image),x,y,
1UL,1UL,cache_info->nexus_info[id],exception);
if (pixels == (const PixelPacket *) NULL)
return(MagickFalse);
*pixel=(*pixels);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t O n e V i r t u a l P i x e l F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneVirtualPixelFromCache() returns a single virtual pixel at the
% specified (x,y) location. The image background color is returned if an
% error occurs.
%
% The format of the GetOneVirtualPixelFromCache() method is:
%
% MagickBooleanType GetOneVirtualPixelFromCache(const Image image,
% const VirtualPixelPacket method,const ssize_t x,const ssize_t y,
% PixelPacket *pixel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: the virtual pixel method.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType GetOneVirtualPixelFromCache(const Image *image,
const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y,
PixelPacket *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const PixelPacket
*magick_restrict pixels;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
*pixel=image->background_color;
pixels=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x,y,1UL,1UL,
cache_info->nexus_info[id],exception);
if (pixels == (const PixelPacket *) NULL)
return(MagickFalse);
*pixel=(*pixels);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e C h a n n e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheChannels() returns the number of pixel channels associated
% with this instance of the pixel cache.
%
% The format of the GetPixelCacheChannels() method is:
%
% size_t GetPixelCacheChannels(Cache cache)
%
% A description of each parameter follows:
%
% o type: GetPixelCacheChannels returns DirectClass or PseudoClass.
%
% o cache: the pixel cache.
%
*/
MagickExport size_t GetPixelCacheChannels(const Cache cache)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
return(cache_info->channels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e C o l o r s p a c e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheColorspace() returns the class type of the pixel cache.
%
% The format of the GetPixelCacheColorspace() method is:
%
% Colorspace GetPixelCacheColorspace(Cache cache)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
*/
MagickExport ColorspaceType GetPixelCacheColorspace(const Cache cache)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
return(cache_info->colorspace);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e F i l e n a m e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheFilename() returns the filename associated with the pixel
% cache.
%
% The format of the GetPixelCacheFilename() method is:
%
% const char *GetPixelCacheFilename(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport const char *GetPixelCacheFilename(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
return(cache_info->cache_filename);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e M e t h o d s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheMethods() initializes the CacheMethods structure.
%
% The format of the GetPixelCacheMethods() method is:
%
% void GetPixelCacheMethods(CacheMethods *cache_methods)
%
% A description of each parameter follows:
%
% o cache_methods: Specifies a pointer to a CacheMethods structure.
%
*/
MagickExport void GetPixelCacheMethods(CacheMethods *cache_methods)
{
assert(cache_methods != (CacheMethods *) NULL);
(void) memset(cache_methods,0,sizeof(*cache_methods));
cache_methods->get_virtual_pixel_handler=GetVirtualPixelCache;
cache_methods->get_virtual_pixels_handler=GetVirtualPixelsCache;
cache_methods->get_virtual_indexes_from_handler=GetVirtualIndexesFromCache;
cache_methods->get_one_virtual_pixel_from_handler=GetOneVirtualPixelFromCache;
cache_methods->get_authentic_pixels_handler=GetAuthenticPixelsCache;
cache_methods->get_authentic_indexes_from_handler=
GetAuthenticIndexesFromCache;
cache_methods->get_authentic_pixels_from_handler=GetAuthenticPixelsFromCache;
cache_methods->get_one_authentic_pixel_from_handler=
GetOneAuthenticPixelFromCache;
cache_methods->queue_authentic_pixels_handler=QueueAuthenticPixelsCache;
cache_methods->sync_authentic_pixels_handler=SyncAuthenticPixelsCache;
cache_methods->destroy_pixel_handler=DestroyImagePixelCache;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e N e x u s E x t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheNexusExtent() returns the extent of the pixels associated with
% the last call to SetPixelCacheNexusPixels() or GetPixelCacheNexusPixels().
%
% The format of the GetPixelCacheNexusExtent() method is:
%
% MagickSizeType GetPixelCacheNexusExtent(const Cache cache,
% NexusInfo *nexus_info)
%
% A description of each parameter follows:
%
% o nexus_info: the nexus info.
%
*/
MagickExport MagickSizeType GetPixelCacheNexusExtent(const Cache cache,
NexusInfo *nexus_info)
{
CacheInfo
*magick_restrict cache_info;
MagickSizeType
extent;
assert(cache != NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
extent=(MagickSizeType) nexus_info->region.width*nexus_info->region.height;
if (extent == 0)
return((MagickSizeType) cache_info->columns*cache_info->rows);
return(extent);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCachePixels() returns the pixels associated with the specified image.
%
% The format of the GetPixelCachePixels() method is:
%
% void *GetPixelCachePixels(Image *image,MagickSizeType *length,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o length: the pixel cache length.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport void *GetPixelCachePixels(Image *image,MagickSizeType *length,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
assert(length != (MagickSizeType *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
(void) exception;
*length=cache_info->length;
if ((cache_info->type != MemoryCache) && (cache_info->type != MapCache))
return((void *) NULL);
return((void *) cache_info->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e S t o r a g e C l a s s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheStorageClass() returns the class type of the pixel cache.
%
% The format of the GetPixelCacheStorageClass() method is:
%
% ClassType GetPixelCacheStorageClass(Cache cache)
%
% A description of each parameter follows:
%
% o type: GetPixelCacheStorageClass returns DirectClass or PseudoClass.
%
% o cache: the pixel cache.
%
*/
MagickExport ClassType GetPixelCacheStorageClass(const Cache cache)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
return(cache_info->storage_class);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e T i l e S i z e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheTileSize() returns the pixel cache tile size.
%
% The format of the GetPixelCacheTileSize() method is:
%
% void GetPixelCacheTileSize(const Image *image,size_t *width,
% size_t *height)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o width: the optimize cache tile width in pixels.
%
% o height: the optimize cache tile height in pixels.
%
*/
MagickExport void GetPixelCacheTileSize(const Image *image,size_t *width,
size_t *height)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
*width=2048UL/sizeof(PixelPacket);
if (GetImagePixelCacheType(image) == DiskCache)
*width=8192UL/sizeof(PixelPacket);
*height=(*width);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e V i r t u a l M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheVirtualMethod() gets the "virtual pixels" method for the
% pixel cache. A virtual pixel is any pixel access that is outside the
% boundaries of the image cache.
%
% The format of the GetPixelCacheVirtualMethod() method is:
%
% VirtualPixelMethod GetPixelCacheVirtualMethod(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport VirtualPixelMethod GetPixelCacheVirtualMethod(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
return(cache_info->virtual_pixel_method);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l I n d e x e s F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualIndexesFromCache() returns the indexes associated with the last
% call to QueueAuthenticPixelsCache() or GetVirtualPixelCache().
%
% The format of the GetVirtualIndexesFromCache() method is:
%
% IndexPacket *GetVirtualIndexesFromCache(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static const IndexPacket *GetVirtualIndexesFromCache(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(GetVirtualIndexesFromNexus(cache_info,cache_info->nexus_info[id]));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l I n d e x e s F r o m N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualIndexesFromNexus() returns the indexes associated with the
% specified cache nexus.
%
% The format of the GetVirtualIndexesFromNexus() method is:
%
% const IndexPacket *GetVirtualIndexesFromNexus(const Cache cache,
% NexusInfo *nexus_info)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
% o nexus_info: the cache nexus to return the colormap indexes.
%
*/
MagickExport const IndexPacket *GetVirtualIndexesFromNexus(const Cache cache,
NexusInfo *nexus_info)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->storage_class == UndefinedClass)
return((IndexPacket *) NULL);
return(nexus_info->indexes);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t V i r t u a l I n d e x Q u e u e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualIndexQueue() returns the virtual black channel or the
% colormap indexes associated with the last call to QueueAuthenticPixels() or
% GetVirtualPixels(). NULL is returned if the black channel or colormap
% indexes are not available.
%
% The format of the GetVirtualIndexQueue() method is:
%
% const IndexPacket *GetVirtualIndexQueue(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport const IndexPacket *GetVirtualIndexQueue(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_virtual_indexes_from_handler !=
(GetVirtualIndexesFromHandler) NULL)
return(cache_info->methods.get_virtual_indexes_from_handler(image));
assert(id < (int) cache_info->number_threads);
return(GetVirtualIndexesFromNexus(cache_info,cache_info->nexus_info[id]));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelCacheNexus() gets virtual pixels from the in-memory or disk
% pixel cache as defined by the geometry parameters. A pointer to the pixels
% is returned if the pixels are transferred, otherwise a NULL is returned.
%
% The format of the GetVirtualPixelCacheNexus() method is:
%
% PixelPacket *GetVirtualPixelCacheNexus(const Image *image,
% const VirtualPixelMethod method,const ssize_t x,const ssize_t y,
% const size_t columns,const size_t rows,NexusInfo *nexus_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: the virtual pixel method.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o nexus_info: the cache nexus to acquire.
%
% o exception: return any errors or warnings in this structure.
%
*/
static ssize_t
DitherMatrix[64] =
{
0, 48, 12, 60, 3, 51, 15, 63,
32, 16, 44, 28, 35, 19, 47, 31,
8, 56, 4, 52, 11, 59, 7, 55,
40, 24, 36, 20, 43, 27, 39, 23,
2, 50, 14, 62, 1, 49, 13, 61,
34, 18, 46, 30, 33, 17, 45, 29,
10, 58, 6, 54, 9, 57, 5, 53,
42, 26, 38, 22, 41, 25, 37, 21
};
static inline ssize_t DitherX(const ssize_t x,const size_t columns)
{
ssize_t
index;
index=x+DitherMatrix[x & 0x07]-32L;
if (index < 0L)
return(0L);
if (index >= (ssize_t) columns)
return((ssize_t) columns-1L);
return(index);
}
static inline ssize_t DitherY(const ssize_t y,const size_t rows)
{
ssize_t
index;
index=y+DitherMatrix[y & 0x07]-32L;
if (index < 0L)
return(0L);
if (index >= (ssize_t) rows)
return((ssize_t) rows-1L);
return(index);
}
static inline ssize_t EdgeX(const ssize_t x,const size_t columns)
{
if (x < 0L)
return(0L);
if (x >= (ssize_t) columns)
return((ssize_t) (columns-1));
return(x);
}
static inline ssize_t EdgeY(const ssize_t y,const size_t rows)
{
if (y < 0L)
return(0L);
if (y >= (ssize_t) rows)
return((ssize_t) (rows-1));
return(y);
}
static inline ssize_t RandomX(RandomInfo *random_info,const size_t columns)
{
return((ssize_t) (columns*GetPseudoRandomValue(random_info)));
}
static inline ssize_t RandomY(RandomInfo *random_info,const size_t rows)
{
return((ssize_t) (rows*GetPseudoRandomValue(random_info)));
}
static inline MagickModulo VirtualPixelModulo(const ssize_t offset,
const size_t extent)
{
MagickModulo
modulo;
modulo.quotient=offset/((ssize_t) extent);
modulo.remainder=offset % ((ssize_t) extent);
if ((modulo.remainder != 0) && ((offset ^ ((ssize_t) extent)) < 0))
{
modulo.quotient-=1;
modulo.remainder+=((ssize_t) extent);
}
return(modulo);
}
MagickExport const PixelPacket *GetVirtualPixelCacheNexus(const Image *image,
const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y,
const size_t columns,const size_t rows,NexusInfo *nexus_info,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
IndexPacket
virtual_index;
MagickOffsetType
offset;
MagickSizeType
length,
number_pixels;
NexusInfo
*magick_restrict virtual_nexus;
PixelPacket
*magick_restrict pixels,
virtual_pixel;
register const IndexPacket
*magick_restrict virtual_indexes;
register const PixelPacket
*magick_restrict p;
register IndexPacket
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
u,
v;
/*
Acquire pixels.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->type == UndefinedCache)
return((const PixelPacket *) NULL);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
CopyOpenCLBuffer(cache_info);
#endif
pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,x,y,columns,rows,
(image->clip_mask != (Image *) NULL) || (image->mask != (Image *) NULL) ?
MagickTrue : MagickFalse,nexus_info,exception);
if (pixels == (PixelPacket *) NULL)
return((const PixelPacket *) NULL);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
length=(MagickSizeType) (nexus_info->region.height-1L)*cache_info->columns+
nexus_info->region.width-1L;
number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows;
if ((offset >= 0) && (((MagickSizeType) offset+length) < number_pixels))
if ((x >= 0) && ((ssize_t) (x+columns) <= (ssize_t) cache_info->columns) &&
(y >= 0) && ((ssize_t) (y+rows) <= (ssize_t) cache_info->rows))
{
MagickBooleanType
status;
/*
Pixel request is inside cache extents.
*/
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(pixels);
status=ReadPixelCachePixels(cache_info,nexus_info,exception);
if (status == MagickFalse)
return((const PixelPacket *) NULL);
if ((cache_info->storage_class == PseudoClass) ||
(cache_info->colorspace == CMYKColorspace))
{
status=ReadPixelCacheIndexes(cache_info,nexus_info,exception);
if (status == MagickFalse)
return((const PixelPacket *) NULL);
}
return(pixels);
}
/*
Pixel request is outside cache extents.
*/
virtual_nexus=nexus_info->virtual_nexus;
q=pixels;
indexes=nexus_info->indexes;
switch (virtual_pixel_method)
{
case BlackVirtualPixelMethod:
{
SetPixelRed(&virtual_pixel,0);
SetPixelGreen(&virtual_pixel,0);
SetPixelBlue(&virtual_pixel,0);
SetPixelOpacity(&virtual_pixel,OpaqueOpacity);
break;
}
case GrayVirtualPixelMethod:
{
SetPixelRed(&virtual_pixel,QuantumRange/2);
SetPixelGreen(&virtual_pixel,QuantumRange/2);
SetPixelBlue(&virtual_pixel,QuantumRange/2);
SetPixelOpacity(&virtual_pixel,OpaqueOpacity);
break;
}
case TransparentVirtualPixelMethod:
{
SetPixelRed(&virtual_pixel,0);
SetPixelGreen(&virtual_pixel,0);
SetPixelBlue(&virtual_pixel,0);
SetPixelOpacity(&virtual_pixel,TransparentOpacity);
break;
}
case MaskVirtualPixelMethod:
case WhiteVirtualPixelMethod:
{
SetPixelRed(&virtual_pixel,QuantumRange);
SetPixelGreen(&virtual_pixel,QuantumRange);
SetPixelBlue(&virtual_pixel,QuantumRange);
SetPixelOpacity(&virtual_pixel,OpaqueOpacity);
break;
}
default:
{
virtual_pixel=image->background_color;
break;
}
}
virtual_index=(IndexPacket) 0;
for (v=0; v < (ssize_t) rows; v++)
{
ssize_t
y_offset;
y_offset=y+v;
if ((virtual_pixel_method == EdgeVirtualPixelMethod) ||
(virtual_pixel_method == UndefinedVirtualPixelMethod))
y_offset=EdgeY(y_offset,cache_info->rows);
for (u=0; u < (ssize_t) columns; u+=length)
{
ssize_t
x_offset;
x_offset=x+u;
length=(MagickSizeType) MagickMin(cache_info->columns-x_offset,columns-u);
if (((x_offset < 0) || (x_offset >= (ssize_t) cache_info->columns)) ||
((y_offset < 0) || (y_offset >= (ssize_t) cache_info->rows)) ||
(length == 0))
{
MagickModulo
x_modulo,
y_modulo;
/*
Transfer a single pixel.
*/
length=(MagickSizeType) 1;
switch (virtual_pixel_method)
{
case BackgroundVirtualPixelMethod:
case ConstantVirtualPixelMethod:
case BlackVirtualPixelMethod:
case GrayVirtualPixelMethod:
case TransparentVirtualPixelMethod:
case MaskVirtualPixelMethod:
case WhiteVirtualPixelMethod:
{
p=(&virtual_pixel);
virtual_indexes=(&virtual_index);
break;
}
case EdgeVirtualPixelMethod:
default:
{
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
EdgeX(x_offset,cache_info->columns),
EdgeY(y_offset,cache_info->rows),1UL,1UL,virtual_nexus,
exception);
virtual_indexes=GetVirtualIndexesFromNexus(cache_info,
virtual_nexus);
break;
}
case RandomVirtualPixelMethod:
{
if (cache_info->random_info == (RandomInfo *) NULL)
cache_info->random_info=AcquireRandomInfo();
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
RandomX(cache_info->random_info,cache_info->columns),
RandomY(cache_info->random_info,cache_info->rows),1UL,1UL,
virtual_nexus,exception);
virtual_indexes=GetVirtualIndexesFromNexus(cache_info,
virtual_nexus);
break;
}
case DitherVirtualPixelMethod:
{
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
DitherX(x_offset,cache_info->columns),
DitherY(y_offset,cache_info->rows),1UL,1UL,virtual_nexus,
exception);
virtual_indexes=GetVirtualIndexesFromNexus(cache_info,
virtual_nexus);
break;
}
case TileVirtualPixelMethod:
{
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus,
exception);
virtual_indexes=GetVirtualIndexesFromNexus(cache_info,
virtual_nexus);
break;
}
case MirrorVirtualPixelMethod:
{
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
if ((x_modulo.quotient & 0x01) == 1L)
x_modulo.remainder=(ssize_t) cache_info->columns-
x_modulo.remainder-1L;
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
if ((y_modulo.quotient & 0x01) == 1L)
y_modulo.remainder=(ssize_t) cache_info->rows-
y_modulo.remainder-1L;
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus,
exception);
virtual_indexes=GetVirtualIndexesFromNexus(cache_info,
virtual_nexus);
break;
}
case CheckerTileVirtualPixelMethod:
{
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
if (((x_modulo.quotient ^ y_modulo.quotient) & 0x01) != 0L)
{
p=(&virtual_pixel);
virtual_indexes=(&virtual_index);
break;
}
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus,
exception);
virtual_indexes=GetVirtualIndexesFromNexus(cache_info,
virtual_nexus);
break;
}
case HorizontalTileVirtualPixelMethod:
{
if ((y_offset < 0) || (y_offset >= (ssize_t) cache_info->rows))
{
p=(&virtual_pixel);
virtual_indexes=(&virtual_index);
break;
}
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus,
exception);
virtual_indexes=GetVirtualIndexesFromNexus(cache_info,
virtual_nexus);
break;
}
case VerticalTileVirtualPixelMethod:
{
if ((x_offset < 0) || (x_offset >= (ssize_t) cache_info->columns))
{
p=(&virtual_pixel);
virtual_indexes=(&virtual_index);
break;
}
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus,
exception);
virtual_indexes=GetVirtualIndexesFromNexus(cache_info,
virtual_nexus);
break;
}
case HorizontalTileEdgeVirtualPixelMethod:
{
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,EdgeY(y_offset,cache_info->rows),1UL,1UL,
virtual_nexus,exception);
virtual_indexes=GetVirtualIndexesFromNexus(cache_info,
virtual_nexus);
break;
}
case VerticalTileEdgeVirtualPixelMethod:
{
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
EdgeX(x_offset,cache_info->columns),y_modulo.remainder,1UL,1UL,
virtual_nexus,exception);
virtual_indexes=GetVirtualIndexesFromNexus(cache_info,
virtual_nexus);
break;
}
}
if (p == (const PixelPacket *) NULL)
break;
*q++=(*p);
if ((indexes != (IndexPacket *) NULL) &&
(virtual_indexes != (const IndexPacket *) NULL))
*indexes++=(*virtual_indexes);
continue;
}
/*
Transfer a run of pixels.
*/
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x_offset,y_offset,
(size_t) length,1UL,virtual_nexus,exception);
if (p == (const PixelPacket *) NULL)
break;
virtual_indexes=GetVirtualIndexesFromNexus(cache_info,virtual_nexus);
(void) memcpy(q,p,(size_t) length*sizeof(*p));
q+=length;
if ((indexes != (IndexPacket *) NULL) &&
(virtual_indexes != (const IndexPacket *) NULL))
{
(void) memcpy(indexes,virtual_indexes,(size_t) length*
sizeof(*virtual_indexes));
indexes+=length;
}
}
if (u < (ssize_t) columns)
break;
}
/*
Free resources.
*/
if (v < (ssize_t) rows)
return((const PixelPacket *) NULL);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelCache() get virtual pixels from the in-memory or disk pixel
% cache as defined by the geometry parameters. A pointer to the pixels
% is returned if the pixels are transferred, otherwise a NULL is returned.
%
% The format of the GetVirtualPixelCache() method is:
%
% const PixelPacket *GetVirtualPixelCache(const Image *image,
% const VirtualPixelMethod virtual_pixel_method,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: the virtual pixel method.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static const PixelPacket *GetVirtualPixelCache(const Image *image,
const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y,
const size_t columns,const size_t rows,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(GetVirtualPixelCacheNexus(image,virtual_pixel_method,x,y,columns,rows,
cache_info->nexus_info[id],exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t V i r t u a l P i x e l Q u e u e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelQueue() returns the virtual pixels associated with the
% last call to QueueAuthenticPixels() or GetVirtualPixels().
%
% The format of the GetVirtualPixelQueue() method is:
%
% const PixelPacket *GetVirtualPixelQueue(const Image image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport const PixelPacket *GetVirtualPixelQueue(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_virtual_pixels_handler !=
(GetVirtualPixelsHandler) NULL)
return(cache_info->methods.get_virtual_pixels_handler(image));
assert(id < (int) cache_info->number_threads);
return(GetVirtualPixelsNexus(cache_info,cache_info->nexus_info[id]));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t V i r t u a l P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixels() returns an immutable pixel region. If the
% region is successfully accessed, a pointer to it is returned, otherwise
% NULL is returned. The returned pointer may point to a temporary working
% copy of the pixels or it may point to the original pixels in memory.
% Performance is maximized if the selected region is part of one row, or one
% or more full rows, since there is opportunity to access the pixels in-place
% (without a copy) if the image is in memory, or in a memory-mapped file. The
% returned pointer must *never* be deallocated by the user.
%
% Pixels accessed via the returned pointer represent a simple array of type
% PixelPacket. If the image type is CMYK or the storage class is PseudoClass,
% call GetAuthenticIndexQueue() after invoking GetAuthenticPixels() to access
% the black color component or to obtain the colormap indexes (of type
% IndexPacket) corresponding to the region.
%
% If you plan to modify the pixels, use GetAuthenticPixels() instead.
%
% Note, the GetVirtualPixels() and GetAuthenticPixels() methods are not thread-
% safe. In a threaded environment, use GetCacheViewVirtualPixels() or
% GetCacheViewAuthenticPixels() instead.
%
% The format of the GetVirtualPixels() method is:
%
% const PixelPacket *GetVirtualPixels(const Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport const PixelPacket *GetVirtualPixels(const Image *image,
const ssize_t x,const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_virtual_pixel_handler !=
(GetVirtualPixelHandler) NULL)
return(cache_info->methods.get_virtual_pixel_handler(image,
GetPixelCacheVirtualMethod(image),x,y,columns,rows,exception));
assert(id < (int) cache_info->number_threads);
return(GetVirtualPixelCacheNexus(image,GetPixelCacheVirtualMethod(image),x,y,
columns,rows,cache_info->nexus_info[id],exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l P i x e l s F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelsCache() returns the pixels associated with the last call
% to QueueAuthenticPixelsCache() or GetVirtualPixelCache().
%
% The format of the GetVirtualPixelsCache() method is:
%
% PixelPacket *GetVirtualPixelsCache(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static const PixelPacket *GetVirtualPixelsCache(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(GetVirtualPixelsNexus(image->cache,cache_info->nexus_info[id]));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l P i x e l s N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelsNexus() returns the pixels associated with the specified
% cache nexus.
%
% The format of the GetVirtualPixelsNexus() method is:
%
% const IndexPacket *GetVirtualPixelsNexus(const Cache cache,
% NexusInfo *nexus_info)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
% o nexus_info: the cache nexus to return the colormap pixels.
%
*/
MagickExport const PixelPacket *GetVirtualPixelsNexus(const Cache cache,
NexusInfo *nexus_info)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->storage_class == UndefinedClass)
return((PixelPacket *) NULL);
return((const PixelPacket *) nexus_info->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ M a s k P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MaskPixelCacheNexus() masks the cache nexus as defined by the image mask.
% The method returns MagickTrue if the pixel region is masked, otherwise
% MagickFalse.
%
% The format of the MaskPixelCacheNexus() method is:
%
% MagickBooleanType MaskPixelCacheNexus(Image *image,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o nexus_info: the cache nexus to clip.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline void ApplyPixelCompositeMask(const MagickPixelPacket *p,
const MagickRealType alpha,const MagickPixelPacket *q,
const MagickRealType beta,MagickPixelPacket *composite)
{
double
gamma;
if (fabs(alpha-TransparentOpacity) < MagickEpsilon)
{
*composite=(*q);
return;
}
gamma=1.0-QuantumScale*QuantumScale*alpha*beta;
gamma=PerceptibleReciprocal(gamma);
composite->red=gamma*MagickOver_(p->red,alpha,q->red,beta);
composite->green=gamma*MagickOver_(p->green,alpha,q->green,beta);
composite->blue=gamma*MagickOver_(p->blue,alpha,q->blue,beta);
if ((p->colorspace == CMYKColorspace) && (q->colorspace == CMYKColorspace))
composite->index=gamma*MagickOver_(p->index,alpha,q->index,beta);
}
static MagickBooleanType MaskPixelCacheNexus(Image *image,NexusInfo *nexus_info,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
MagickPixelPacket
alpha,
beta;
MagickSizeType
number_pixels;
NexusInfo
**magick_restrict mask_nexus;
register const PixelPacket
*magick_restrict r;
register IndexPacket
*magick_restrict nexus_indexes,
*magick_restrict indexes;
register PixelPacket
*magick_restrict p,
*magick_restrict q;
register ssize_t
i;
/*
Apply clip mask.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((image->mask == (Image *) NULL) || (image->storage_class == PseudoClass))
return(MagickTrue);
if ((nexus_info->region.width == 0) || (nexus_info->region.height == 0))
return(MagickTrue);
cache_info=(CacheInfo *) image->cache;
if (cache_info == (Cache) NULL)
return(MagickFalse);
mask_nexus=AcquirePixelCacheNexus(1);
p=GetAuthenticPixelCacheNexus(image,nexus_info->region.x,nexus_info->region.y, nexus_info->region.width,nexus_info->region.height,
nexus_info->virtual_nexus,exception);
indexes=nexus_info->virtual_nexus->indexes;
q=nexus_info->pixels;
nexus_indexes=nexus_info->indexes;
r=GetVirtualPixelCacheNexus(image->mask,MaskVirtualPixelMethod,
nexus_info->region.x,nexus_info->region.y,nexus_info->region.width,
nexus_info->region.height,mask_nexus[0],&image->exception);
GetMagickPixelPacket(image,&alpha);
GetMagickPixelPacket(image,&beta);
number_pixels=(MagickSizeType) nexus_info->region.width*
nexus_info->region.height;
for (i=0; i < (ssize_t) number_pixels; i++)
{
if ((p == (PixelPacket *) NULL) || (r == (const PixelPacket *) NULL))
break;
SetMagickPixelPacket(image,p,indexes+i,&alpha);
SetMagickPixelPacket(image,q,nexus_indexes+i,&beta);
ApplyPixelCompositeMask(&beta,GetPixelIntensity(image,r),&alpha,
alpha.opacity,&beta);
SetPixelRed(q,ClampToQuantum(beta.red));
SetPixelGreen(q,ClampToQuantum(beta.green));
SetPixelBlue(q,ClampToQuantum(beta.blue));
SetPixelOpacity(q,ClampToQuantum(beta.opacity));
if (cache_info->active_index_channel != MagickFalse)
SetPixelIndex(nexus_indexes+i,GetPixelIndex(indexes+i));
p++;
q++;
r++;
}
mask_nexus=DestroyPixelCacheNexus(mask_nexus,1);
if (i < (ssize_t) number_pixels)
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ O p e n P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% OpenPixelCache() allocates the pixel cache. This includes defining the cache
% dimensions, allocating space for the image pixels and optionally the
% colormap indexes, and memory mapping the cache if it is disk based. The
% cache nexus array is initialized as well.
%
% The format of the OpenPixelCache() method is:
%
% MagickBooleanType OpenPixelCache(Image *image,const MapMode mode,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o mode: ReadMode, WriteMode, or IOMode.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType OpenPixelCacheOnDisk(CacheInfo *cache_info,
const MapMode mode)
{
int
file;
/*
Open pixel cache on disk.
*/
if ((cache_info->file != -1) && (cache_info->disk_mode == mode))
return(MagickTrue); /* cache already open and in the proper mode */
if (*cache_info->cache_filename == '\0')
file=AcquireUniqueFileResource(cache_info->cache_filename);
else
switch (mode)
{
case ReadMode:
{
file=open_utf8(cache_info->cache_filename,O_RDONLY | O_BINARY,0);
break;
}
case WriteMode:
{
file=open_utf8(cache_info->cache_filename,O_WRONLY | O_CREAT |
O_BINARY | O_EXCL,S_MODE);
if (file == -1)
file=open_utf8(cache_info->cache_filename,O_WRONLY | O_BINARY,S_MODE);
break;
}
case IOMode:
default:
{
file=open_utf8(cache_info->cache_filename,O_RDWR | O_CREAT | O_BINARY |
O_EXCL,S_MODE);
if (file == -1)
file=open_utf8(cache_info->cache_filename,O_RDWR | O_BINARY,S_MODE);
break;
}
}
if (file == -1)
return(MagickFalse);
(void) AcquireMagickResource(FileResource,1);
if (cache_info->file != -1)
(void) ClosePixelCacheOnDisk(cache_info);
cache_info->file=file;
cache_info->disk_mode=mode;
return(MagickTrue);
}
static inline MagickOffsetType WritePixelCacheRegion(
const CacheInfo *magick_restrict cache_info,const MagickOffsetType offset,
const MagickSizeType length,const unsigned char *magick_restrict buffer)
{
register MagickOffsetType
i;
ssize_t
count;
#if !defined(MAGICKCORE_HAVE_PWRITE)
if (lseek(cache_info->file,offset,SEEK_SET) < 0)
return((MagickOffsetType) -1);
#endif
count=0;
for (i=0; i < (MagickOffsetType) length; i+=count)
{
#if !defined(MAGICKCORE_HAVE_PWRITE)
count=write(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t)
SSIZE_MAX));
#else
count=pwrite(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t)
SSIZE_MAX),offset+i);
#endif
if (count <= 0)
{
count=0;
if (errno != EINTR)
break;
}
}
return(i);
}
static MagickBooleanType SetPixelCacheExtent(Image *image,MagickSizeType length)
{
CacheInfo
*magick_restrict cache_info;
MagickOffsetType
count,
extent,
offset;
cache_info=(CacheInfo *) image->cache;
if (image->debug != MagickFalse)
{
char
format[MaxTextExtent],
message[MaxTextExtent];
(void) FormatMagickSize(length,MagickFalse,format);
(void) FormatLocaleString(message,MaxTextExtent,
"extend %s (%s[%d], disk, %s)",cache_info->filename,
cache_info->cache_filename,cache_info->file,format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message);
}
offset=(MagickOffsetType) lseek(cache_info->file,0,SEEK_END);
if (offset < 0)
return(MagickFalse);
if ((MagickSizeType) offset >= length)
count=(MagickOffsetType) 1;
else
{
extent=(MagickOffsetType) length-1;
count=WritePixelCacheRegion(cache_info,extent,1,(const unsigned char *)
"");
if (count != 1)
return(MagickFalse);
#if defined(MAGICKCORE_HAVE_POSIX_FALLOCATE)
if (cache_info->synchronize != MagickFalse)
if (posix_fallocate(cache_info->file,offset+1,extent-offset) != 0)
return(MagickFalse);
#endif
}
offset=(MagickOffsetType) lseek(cache_info->file,0,SEEK_SET);
if (offset < 0)
return(MagickFalse);
return(MagickTrue);
}
static MagickBooleanType OpenPixelCache(Image *image,const MapMode mode,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info,
source_info;
char
format[MaxTextExtent],
message[MaxTextExtent];
const char
*hosts,
*type;
MagickSizeType
length,
number_pixels;
MagickStatusType
status;
size_t
columns,
packet_size;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (cache_anonymous_memory < 0)
{
char
*value;
/*
Does the security policy require anonymous mapping for pixel cache?
*/
cache_anonymous_memory=0;
value=GetPolicyValue("pixel-cache-memory");
if (value == (char *) NULL)
value=GetPolicyValue("cache:memory-map");
if (LocaleCompare(value,"anonymous") == 0)
{
#if defined(MAGICKCORE_HAVE_MMAP) && defined(MAP_ANONYMOUS)
cache_anonymous_memory=1;
#else
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateError,"DelegateLibrarySupportNotBuiltIn",
"'%s' (policy requires anonymous memory mapping)",image->filename);
#endif
}
value=DestroyString(value);
}
if ((image->columns == 0) || (image->rows == 0))
ThrowBinaryException(CacheError,"NoPixelsDefinedInCache",image->filename);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (((MagickSizeType) image->columns > cache_info->width_limit) ||
((MagickSizeType) image->rows > cache_info->height_limit))
ThrowBinaryException(ImageError,"WidthOrHeightExceedsLimit",
image->filename);
length=GetImageListLength(image);
if (AcquireMagickResource(ListLengthResource,length) == MagickFalse)
ThrowBinaryException(ResourceLimitError,"ListLengthExceedsLimit",
image->filename);
source_info=(*cache_info);
source_info.file=(-1);
(void) FormatLocaleString(cache_info->filename,MaxTextExtent,"%s[%.20g]",
image->filename,(double) image->scene);
cache_info->mode=mode;
cache_info->rows=image->rows;
cache_info->columns=image->columns;
cache_info->channels=image->channels;
cache_info->active_index_channel=((image->storage_class == PseudoClass) ||
(image->colorspace == CMYKColorspace)) ? MagickTrue : MagickFalse;
number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows;
packet_size=sizeof(PixelPacket);
if (cache_info->active_index_channel != MagickFalse)
packet_size+=sizeof(IndexPacket);
length=number_pixels*packet_size;
columns=(size_t) (length/cache_info->rows/packet_size);
if ((cache_info->columns != columns) || ((ssize_t) cache_info->columns < 0) ||
((ssize_t) cache_info->rows < 0))
ThrowBinaryException(ResourceLimitError,"PixelCacheAllocationFailed",
image->filename);
cache_info->length=length;
if (image->ping != MagickFalse)
{
cache_info->storage_class=image->storage_class;
cache_info->colorspace=image->colorspace;
cache_info->type=PingCache;
return(MagickTrue);
}
status=AcquireMagickResource(AreaResource,(MagickSizeType)
cache_info->columns*cache_info->rows);
if (cache_info->mode == PersistMode)
status=MagickFalse;
length=number_pixels*(sizeof(PixelPacket)+sizeof(IndexPacket));
if ((status != MagickFalse) &&
(length == (MagickSizeType) ((size_t) length)) &&
((cache_info->type == UndefinedCache) ||
(cache_info->type == MemoryCache)))
{
status=AcquireMagickResource(MemoryResource,cache_info->length);
if (status != MagickFalse)
{
status=MagickTrue;
if (cache_anonymous_memory <= 0)
{
cache_info->mapped=MagickFalse;
cache_info->pixels=(PixelPacket *) MagickAssumeAligned(
AcquireAlignedMemory(1,(size_t) cache_info->length));
}
else
{
cache_info->mapped=MagickTrue;
cache_info->pixels=(PixelPacket *) MapBlob(-1,IOMode,0,(size_t)
cache_info->length);
}
if (cache_info->pixels == (PixelPacket *) NULL)
{
cache_info->mapped=source_info.mapped;
cache_info->pixels=source_info.pixels;
}
else
{
/*
Create memory pixel cache.
*/
cache_info->colorspace=image->colorspace;
cache_info->type=MemoryCache;
cache_info->indexes=(IndexPacket *) NULL;
if (cache_info->active_index_channel != MagickFalse)
cache_info->indexes=(IndexPacket *) (cache_info->pixels+
number_pixels);
if ((source_info.storage_class != UndefinedClass) &&
(mode != ReadMode))
{
status&=ClonePixelCacheRepository(cache_info,&source_info,
exception);
RelinquishPixelCachePixels(&source_info);
}
if (image->debug != MagickFalse)
{
(void) FormatMagickSize(cache_info->length,MagickTrue,format);
type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t)
cache_info->type);
(void) FormatLocaleString(message,MaxTextExtent,
"open %s (%s %s, %.20gx%.20g %s)",cache_info->filename,
cache_info->mapped != MagickFalse ? "Anonymous" : "Heap",
type,(double) cache_info->columns,(double) cache_info->rows,
format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",
message);
}
cache_info->storage_class=image->storage_class;
if (status == 0)
{
cache_info->type=UndefinedCache;
return(MagickFalse);
}
return(MagickTrue);
}
}
}
status=AcquireMagickResource(DiskResource,cache_info->length);
hosts=(const char *) GetImageRegistry(StringRegistryType,"cache:hosts",
exception);
if ((status == MagickFalse) && (hosts != (const char *) NULL))
{
DistributeCacheInfo
*server_info;
/*
Distribute the pixel cache to a remote server.
*/
server_info=AcquireDistributeCacheInfo(exception);
if (server_info != (DistributeCacheInfo *) NULL)
{
status=OpenDistributePixelCache(server_info,image);
if (status == MagickFalse)
{
ThrowFileException(exception,CacheError,"UnableToOpenPixelCache",
GetDistributeCacheHostname(server_info));
server_info=DestroyDistributeCacheInfo(server_info);
}
else
{
/*
Create a distributed pixel cache.
*/
status=MagickTrue;
cache_info->type=DistributedCache;
cache_info->storage_class=image->storage_class;
cache_info->colorspace=image->colorspace;
cache_info->server_info=server_info;
(void) FormatLocaleString(cache_info->cache_filename,
MaxTextExtent,"%s:%d",GetDistributeCacheHostname(
(DistributeCacheInfo *) cache_info->server_info),
GetDistributeCachePort((DistributeCacheInfo *)
cache_info->server_info));
if ((source_info.storage_class != UndefinedClass) &&
(mode != ReadMode))
{
status=ClonePixelCacheRepository(cache_info,&source_info,
exception);
RelinquishPixelCachePixels(&source_info);
}
if (image->debug != MagickFalse)
{
(void) FormatMagickSize(cache_info->length,MagickFalse,
format);
type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t)
cache_info->type);
(void) FormatLocaleString(message,MaxTextExtent,
"open %s (%s[%d], %s, %.20gx%.20g %s)",cache_info->filename,
cache_info->cache_filename,GetDistributeCacheFile(
(DistributeCacheInfo *) cache_info->server_info),type,
(double) cache_info->columns,(double) cache_info->rows,
format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",
message);
}
if (status == 0)
{
cache_info->type=UndefinedCache;
return(MagickFalse);
}
return(MagickTrue);
}
}
cache_info->type=UndefinedCache;
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"CacheResourcesExhausted","`%s'",image->filename);
return(MagickFalse);
}
/*
Create pixel cache on disk.
*/
if (status == MagickFalse)
{
cache_info->type=UndefinedCache;
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"CacheResourcesExhausted","`%s'",image->filename);
return(MagickFalse);
}
if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode) &&
(cache_info->mode != PersistMode))
{
(void) ClosePixelCacheOnDisk(cache_info);
*cache_info->cache_filename='\0';
}
if (OpenPixelCacheOnDisk(cache_info,mode) == MagickFalse)
{
cache_info->type=UndefinedCache;
ThrowFileException(exception,CacheError,"UnableToOpenPixelCache",
image->filename);
return(MagickFalse);
}
status=SetPixelCacheExtent(image,(MagickSizeType) cache_info->offset+
cache_info->length);
if (status == MagickFalse)
{
cache_info->type=UndefinedCache;
ThrowFileException(exception,CacheError,"UnableToExtendCache",
image->filename);
return(MagickFalse);
}
cache_info->storage_class=image->storage_class;
cache_info->colorspace=image->colorspace;
length=number_pixels*(sizeof(PixelPacket)+sizeof(IndexPacket));
if (length != (MagickSizeType) ((size_t) length))
cache_info->type=DiskCache;
else
{
status=AcquireMagickResource(MapResource,cache_info->length);
if (status == MagickFalse)
cache_info->type=DiskCache;
else
if ((cache_info->type != MapCache) && (cache_info->type != MemoryCache))
{
cache_info->type=DiskCache;
RelinquishMagickResource(MapResource,cache_info->length);
}
else
{
cache_info->pixels=(PixelPacket *) MapBlob(cache_info->file,mode,
cache_info->offset,(size_t) cache_info->length);
if (cache_info->pixels == (PixelPacket *) NULL)
{
cache_info->type=DiskCache;
cache_info->mapped=source_info.mapped;
cache_info->pixels=source_info.pixels;
RelinquishMagickResource(MapResource,cache_info->length);
}
else
{
/*
Create file-backed memory-mapped pixel cache.
*/
(void) ClosePixelCacheOnDisk(cache_info);
cache_info->type=MapCache;
cache_info->mapped=MagickTrue;
cache_info->indexes=(IndexPacket *) NULL;
if (cache_info->active_index_channel != MagickFalse)
cache_info->indexes=(IndexPacket *) (cache_info->pixels+
number_pixels);
if ((source_info.storage_class != UndefinedClass) &&
(mode != ReadMode))
{
status=ClonePixelCacheRepository(cache_info,&source_info,
exception);
RelinquishPixelCachePixels(&source_info);
}
if (image->debug != MagickFalse)
{
(void) FormatMagickSize(cache_info->length,MagickTrue,
format);
type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t)
cache_info->type);
(void) FormatLocaleString(message,MaxTextExtent,
"open %s (%s[%d], %s, %.20gx%.20g %s)",
cache_info->filename,cache_info->cache_filename,
cache_info->file,type,(double) cache_info->columns,
(double) cache_info->rows,format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",
message);
}
if (status == 0)
{
cache_info->type=UndefinedCache;
return(MagickFalse);
}
return(MagickTrue);
}
}
}
status=MagickTrue;
if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode))
{
status=ClonePixelCacheRepository(cache_info,&source_info,exception);
RelinquishPixelCachePixels(&source_info);
}
if (image->debug != MagickFalse)
{
(void) FormatMagickSize(cache_info->length,MagickFalse,format);
type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t)
cache_info->type);
(void) FormatLocaleString(message,MaxTextExtent,
"open %s (%s[%d], %s, %.20gx%.20g %s)",cache_info->filename,
cache_info->cache_filename,cache_info->file,type,(double)
cache_info->columns,(double) cache_info->rows,format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message);
}
if (status == 0)
{
cache_info->type=UndefinedCache;
return(MagickFalse);
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P e r s i s t P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PersistPixelCache() attaches to or initializes a persistent pixel cache. A
% persistent pixel cache is one that resides on disk and is not destroyed
% when the program exits.
%
% The format of the PersistPixelCache() method is:
%
% MagickBooleanType PersistPixelCache(Image *image,const char *filename,
% const MagickBooleanType attach,MagickOffsetType *offset,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o filename: the persistent pixel cache filename.
%
% o attach: A value other than zero initializes the persistent pixel cache.
%
% o initialize: A value other than zero initializes the persistent pixel
% cache.
%
% o offset: the offset in the persistent cache to store pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType PersistPixelCache(Image *image,
const char *filename,const MagickBooleanType attach,MagickOffsetType *offset,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info,
*magick_restrict clone_info;
MagickBooleanType
status;
ssize_t
page_size;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (void *) NULL);
assert(filename != (const char *) NULL);
assert(offset != (MagickOffsetType *) NULL);
page_size=GetMagickPageSize();
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
CopyOpenCLBuffer(cache_info);
#endif
if (attach != MagickFalse)
{
/*
Attach existing persistent pixel cache.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"attach persistent cache");
(void) CopyMagickString(cache_info->cache_filename,filename,
MaxTextExtent);
cache_info->type=MapCache;
cache_info->offset=(*offset);
if (OpenPixelCache(image,ReadMode,exception) == MagickFalse)
return(MagickFalse);
*offset+=cache_info->length+page_size-(cache_info->length % page_size);
return(MagickTrue);
}
/*
Clone persistent pixel cache.
*/
status=AcquireMagickResource(DiskResource,cache_info->length);
if (status == MagickFalse)
{
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"CacheResourcesExhausted","`%s'",image->filename);
return(MagickFalse);
}
clone_info=(CacheInfo *) ClonePixelCache(cache_info);
clone_info->type=DiskCache;
(void) CopyMagickString(clone_info->cache_filename,filename,MaxTextExtent);
clone_info->file=(-1);
clone_info->storage_class=cache_info->storage_class;
clone_info->colorspace=cache_info->colorspace;
clone_info->columns=cache_info->columns;
clone_info->rows=cache_info->rows;
clone_info->active_index_channel=cache_info->active_index_channel;
clone_info->mode=PersistMode;
clone_info->length=cache_info->length;
clone_info->channels=cache_info->channels;
clone_info->offset=(*offset);
status=ClonePixelCacheRepository(clone_info,cache_info,exception);
*offset+=cache_info->length+page_size-(cache_info->length % page_size);
clone_info=(CacheInfo *) DestroyPixelCache(clone_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ Q u e u e A u t h e n t i c P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QueueAuthenticPixelCacheNexus() allocates an region to store image pixels as
% defined by the region rectangle and returns a pointer to the region. This
% region is subsequently transferred from the pixel cache with
% SyncAuthenticPixelsCache(). A pointer to the pixels is returned if the
% pixels are transferred, otherwise a NULL is returned.
%
% The format of the QueueAuthenticPixelCacheNexus() method is:
%
% PixelPacket *QueueAuthenticPixelCacheNexus(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% const MagickBooleanType clone,NexusInfo *nexus_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o nexus_info: the cache nexus to set.
%
% o clone: clone the pixel cache.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport PixelPacket *QueueAuthenticPixel(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
const MagickBooleanType clone,NexusInfo *nexus_info,
ExceptionInfo *exception)
{
return(QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,clone,nexus_info,
exception));
}
MagickExport PixelPacket *QueueAuthenticPixelCacheNexus(Image *image,
const ssize_t x,const ssize_t y,const size_t columns,const size_t rows,
const MagickBooleanType clone,NexusInfo *nexus_info,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
MagickOffsetType
offset;
MagickSizeType
number_pixels;
PixelPacket
*magick_restrict pixels;
/*
Validate pixel cache geometry.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) GetImagePixelCache(image,clone,exception);
if (cache_info == (Cache) NULL)
return((PixelPacket *) NULL);
assert(cache_info->signature == MagickCoreSignature);
if ((cache_info->columns == 0) || (cache_info->rows == 0) || (x < 0) ||
(y < 0) || (x >= (ssize_t) cache_info->columns) ||
(y >= (ssize_t) cache_info->rows))
{
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"PixelsAreNotAuthentic","`%s'",image->filename);
return((PixelPacket *) NULL);
}
offset=(MagickOffsetType) y*cache_info->columns+x;
if (offset < 0)
return((PixelPacket *) NULL);
number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows;
offset+=(MagickOffsetType) (rows-1)*cache_info->columns+columns-1;
if ((MagickSizeType) offset >= number_pixels)
return((PixelPacket *) NULL);
/*
Return pixel cache.
*/
pixels=SetPixelCacheNexusPixels(cache_info,WriteMode,x,y,columns,rows,
(image->clip_mask != (Image *) NULL) || (image->mask != (Image *) NULL) ?
MagickTrue : MagickFalse,nexus_info,exception);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ Q u e u e A u t h e n t i c P i x e l s C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QueueAuthenticPixelsCache() allocates an region to store image pixels as
% defined by the region rectangle and returns a pointer to the region. This
% region is subsequently transferred from the pixel cache with
% SyncAuthenticPixelsCache(). A pointer to the pixels is returned if the
% pixels are transferred, otherwise a NULL is returned.
%
% The format of the QueueAuthenticPixelsCache() method is:
%
% PixelPacket *QueueAuthenticPixelsCache(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static PixelPacket *QueueAuthenticPixelsCache(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickFalse,
cache_info->nexus_info[id],exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Q u e u e A u t h e n t i c P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QueueAuthenticPixels() queues a mutable pixel region. If the region is
% successfully initialized a pointer to a PixelPacket array representing the
% region is returned, otherwise NULL is returned. The returned pointer may
% point to a temporary working buffer for the pixels or it may point to the
% final location of the pixels in memory.
%
% Write-only access means that any existing pixel values corresponding to
% the region are ignored. This is useful if the initial image is being
% created from scratch, or if the existing pixel values are to be
% completely replaced without need to refer to their pre-existing values.
% The application is free to read and write the pixel buffer returned by
% QueueAuthenticPixels() any way it pleases. QueueAuthenticPixels() does not
% initialize the pixel array values. Initializing pixel array values is the
% application's responsibility.
%
% Performance is maximized if the selected region is part of one row, or
% one or more full rows, since then there is opportunity to access the
% pixels in-place (without a copy) if the image is in memory, or in a
% memory-mapped file. The returned pointer must *never* be deallocated
% by the user.
%
% Pixels accessed via the returned pointer represent a simple array of type
% PixelPacket. If the image type is CMYK or the storage class is PseudoClass,
% call GetAuthenticIndexQueue() after invoking GetAuthenticPixels() to obtain
% the black color component or the colormap indexes (of type IndexPacket)
% corresponding to the region. Once the PixelPacket (and/or IndexPacket)
% array has been updated, the changes must be saved back to the underlying
% image using SyncAuthenticPixels() or they may be lost.
%
% The format of the QueueAuthenticPixels() method is:
%
% PixelPacket *QueueAuthenticPixels(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport PixelPacket *QueueAuthenticPixels(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.queue_authentic_pixels_handler !=
(QueueAuthenticPixelsHandler) NULL)
return(cache_info->methods.queue_authentic_pixels_handler(image,x,y,columns,
rows,exception));
assert(id < (int) cache_info->number_threads);
return(QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickFalse,
cache_info->nexus_info[id],exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e a d P i x e l C a c h e I n d e x e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadPixelCacheIndexes() reads colormap indexes from the specified region of
% the pixel cache.
%
% The format of the ReadPixelCacheIndexes() method is:
%
% MagickBooleanType ReadPixelCacheIndexes(CacheInfo *cache_info,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o nexus_info: the cache nexus to read the colormap indexes.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickOffsetType ReadPixelCacheRegion(
const CacheInfo *magick_restrict cache_info,const MagickOffsetType offset,
const MagickSizeType length,unsigned char *magick_restrict buffer)
{
register MagickOffsetType
i;
ssize_t
count;
#if !defined(MAGICKCORE_HAVE_PREAD)
if (lseek(cache_info->file,offset,SEEK_SET) < 0)
return((MagickOffsetType) -1);
#endif
count=0;
for (i=0; i < (MagickOffsetType) length; i+=count)
{
#if !defined(MAGICKCORE_HAVE_PREAD)
count=read(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t)
SSIZE_MAX));
#else
count=pread(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t)
SSIZE_MAX),offset+i);
#endif
if (count <= 0)
{
count=0;
if (errno != EINTR)
break;
}
}
return(i);
}
static MagickBooleanType ReadPixelCacheIndexes(
CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info,
ExceptionInfo *exception)
{
MagickOffsetType
count,
offset;
MagickSizeType
extent,
length;
register IndexPacket
*magick_restrict q;
register ssize_t
y;
size_t
rows;
if (cache_info->active_index_channel == MagickFalse)
return(MagickFalse);
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
length=(MagickSizeType) nexus_info->region.width*sizeof(IndexPacket);
rows=nexus_info->region.height;
extent=length*rows;
q=nexus_info->indexes;
y=0;
switch (cache_info->type)
{
case MemoryCache:
case MapCache:
{
register IndexPacket
*magick_restrict p;
/*
Read indexes from memory.
*/
if ((cache_info->columns == nexus_info->region.width) &&
(extent == (MagickSizeType) ((size_t) extent)))
{
length=extent;
rows=1UL;
}
p=cache_info->indexes+offset;
for (y=0; y < (ssize_t) rows; y++)
{
(void) memcpy(q,p,(size_t) length);
p+=cache_info->columns;
q+=nexus_info->region.width;
}
break;
}
case DiskCache:
{
/*
Read indexes from disk.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
cache_info->cache_filename);
UnlockSemaphoreInfo(cache_info->file_semaphore);
return(MagickFalse);
}
if ((cache_info->columns == nexus_info->region.width) &&
(extent <= MagickMaxBufferExtent))
{
length=extent;
rows=1UL;
}
extent=(MagickSizeType) cache_info->columns*cache_info->rows;
for (y=0; y < (ssize_t) rows; y++)
{
count=ReadPixelCacheRegion(cache_info,cache_info->offset+extent*
sizeof(PixelPacket)+offset*sizeof(*q),length,(unsigned char *) q);
if (count < (MagickOffsetType) length)
break;
offset+=cache_info->columns;
q+=nexus_info->region.width;
}
if (IsFileDescriptorLimitExceeded() != MagickFalse)
(void) ClosePixelCacheOnDisk(cache_info);
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
case DistributedCache:
{
RectangleInfo
region;
/*
Read indexes from distributed cache.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
region=nexus_info->region;
if ((cache_info->columns != nexus_info->region.width) ||
(extent > MagickMaxBufferExtent))
region.height=1UL;
else
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=ReadDistributePixelCacheIndexes((DistributeCacheInfo *)
cache_info->server_info,®ion,length,(unsigned char *) q);
if (count != (MagickOffsetType) length)
break;
q+=nexus_info->region.width;
region.y++;
}
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
default:
break;
}
if (y < (ssize_t) rows)
{
ThrowFileException(exception,CacheError,"UnableToReadPixelCache",
cache_info->cache_filename);
return(MagickFalse);
}
if ((cache_info->debug != MagickFalse) &&
(CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse))
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double)
nexus_info->region.width,(double) nexus_info->region.height,(double)
nexus_info->region.x,(double) nexus_info->region.y);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e a d P i x e l C a c h e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadPixelCachePixels() reads pixels from the specified region of the pixel
% cache.
%
% The format of the ReadPixelCachePixels() method is:
%
% MagickBooleanType ReadPixelCachePixels(CacheInfo *cache_info,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o nexus_info: the cache nexus to read the pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType ReadPixelCachePixels(
CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info,
ExceptionInfo *exception)
{
MagickOffsetType
count,
offset;
MagickSizeType
extent,
length;
register PixelPacket
*magick_restrict q;
register ssize_t
y;
size_t
rows;
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns;
if ((ssize_t) (offset/cache_info->columns) != nexus_info->region.y)
return(MagickFalse);
offset+=nexus_info->region.x;
length=(MagickSizeType) nexus_info->region.width*sizeof(PixelPacket);
if ((length/sizeof(PixelPacket)) != nexus_info->region.width)
return(MagickFalse);
rows=nexus_info->region.height;
extent=length*rows;
if ((extent == 0) || ((extent/length) != rows))
return(MagickFalse);
q=nexus_info->pixels;
y=0;
switch (cache_info->type)
{
case MemoryCache:
case MapCache:
{
register PixelPacket
*magick_restrict p;
/*
Read pixels from memory.
*/
if ((cache_info->columns == nexus_info->region.width) &&
(extent == (MagickSizeType) ((size_t) extent)))
{
length=extent;
rows=1UL;
}
p=cache_info->pixels+offset;
for (y=0; y < (ssize_t) rows; y++)
{
(void) memcpy(q,p,(size_t) length);
p+=cache_info->columns;
q+=nexus_info->region.width;
}
break;
}
case DiskCache:
{
/*
Read pixels from disk.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
cache_info->cache_filename);
UnlockSemaphoreInfo(cache_info->file_semaphore);
return(MagickFalse);
}
if ((cache_info->columns == nexus_info->region.width) &&
(extent <= MagickMaxBufferExtent))
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=ReadPixelCacheRegion(cache_info,cache_info->offset+offset*
sizeof(*q),length,(unsigned char *) q);
if (count < (MagickOffsetType) length)
break;
offset+=cache_info->columns;
q+=nexus_info->region.width;
}
if (IsFileDescriptorLimitExceeded() != MagickFalse)
(void) ClosePixelCacheOnDisk(cache_info);
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
case DistributedCache:
{
RectangleInfo
region;
/*
Read pixels from distributed cache.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
region=nexus_info->region;
if ((cache_info->columns != nexus_info->region.width) ||
(extent > MagickMaxBufferExtent))
region.height=1UL;
else
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=ReadDistributePixelCachePixels((DistributeCacheInfo *)
cache_info->server_info,®ion,length,(unsigned char *) q);
if (count != (MagickOffsetType) length)
break;
q+=nexus_info->region.width;
region.y++;
}
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
default:
break;
}
if (y < (ssize_t) rows)
{
ThrowFileException(exception,CacheError,"UnableToReadPixelCache",
cache_info->cache_filename);
return(MagickFalse);
}
if ((cache_info->debug != MagickFalse) &&
(CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse))
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double)
nexus_info->region.width,(double) nexus_info->region.height,(double)
nexus_info->region.x,(double) nexus_info->region.y);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e f e r e n c e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReferencePixelCache() increments the reference count associated with the
% pixel cache returning a pointer to the cache.
%
% The format of the ReferencePixelCache method is:
%
% Cache ReferencePixelCache(Cache cache_info)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
*/
MagickExport Cache ReferencePixelCache(Cache cache)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache *) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
LockSemaphoreInfo(cache_info->semaphore);
cache_info->reference_count++;
UnlockSemaphoreInfo(cache_info->semaphore);
return(cache_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e s e t P i x e l C a c h e E p o c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResetPixelCacheEpoch() resets the pixel cache epoch.
%
% The format of the ResetPixelCacheEpoch method is:
%
% void ResetPixelCacheEpoch(void)
%
*/
MagickPrivate void ResetPixelCacheEpoch(void)
{
cache_epoch=0;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S e t P i x e l C a c h e M e t h o d s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetPixelCacheMethods() sets the image pixel methods to the specified ones.
%
% The format of the SetPixelCacheMethods() method is:
%
% SetPixelCacheMethods(Cache *,CacheMethods *cache_methods)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
% o cache_methods: Specifies a pointer to a CacheMethods structure.
%
*/
MagickExport void SetPixelCacheMethods(Cache cache,CacheMethods *cache_methods)
{
CacheInfo
*magick_restrict cache_info;
GetOneAuthenticPixelFromHandler
get_one_authentic_pixel_from_handler;
GetOneVirtualPixelFromHandler
get_one_virtual_pixel_from_handler;
/*
Set cache pixel methods.
*/
assert(cache != (Cache) NULL);
assert(cache_methods != (CacheMethods *) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
if (cache_methods->get_virtual_pixel_handler != (GetVirtualPixelHandler) NULL)
cache_info->methods.get_virtual_pixel_handler=
cache_methods->get_virtual_pixel_handler;
if (cache_methods->destroy_pixel_handler != (DestroyPixelHandler) NULL)
cache_info->methods.destroy_pixel_handler=
cache_methods->destroy_pixel_handler;
if (cache_methods->get_virtual_indexes_from_handler !=
(GetVirtualIndexesFromHandler) NULL)
cache_info->methods.get_virtual_indexes_from_handler=
cache_methods->get_virtual_indexes_from_handler;
if (cache_methods->get_authentic_pixels_handler !=
(GetAuthenticPixelsHandler) NULL)
cache_info->methods.get_authentic_pixels_handler=
cache_methods->get_authentic_pixels_handler;
if (cache_methods->queue_authentic_pixels_handler !=
(QueueAuthenticPixelsHandler) NULL)
cache_info->methods.queue_authentic_pixels_handler=
cache_methods->queue_authentic_pixels_handler;
if (cache_methods->sync_authentic_pixels_handler !=
(SyncAuthenticPixelsHandler) NULL)
cache_info->methods.sync_authentic_pixels_handler=
cache_methods->sync_authentic_pixels_handler;
if (cache_methods->get_authentic_pixels_from_handler !=
(GetAuthenticPixelsFromHandler) NULL)
cache_info->methods.get_authentic_pixels_from_handler=
cache_methods->get_authentic_pixels_from_handler;
if (cache_methods->get_authentic_indexes_from_handler !=
(GetAuthenticIndexesFromHandler) NULL)
cache_info->methods.get_authentic_indexes_from_handler=
cache_methods->get_authentic_indexes_from_handler;
get_one_virtual_pixel_from_handler=
cache_info->methods.get_one_virtual_pixel_from_handler;
if (get_one_virtual_pixel_from_handler !=
(GetOneVirtualPixelFromHandler) NULL)
cache_info->methods.get_one_virtual_pixel_from_handler=
cache_methods->get_one_virtual_pixel_from_handler;
get_one_authentic_pixel_from_handler=
cache_methods->get_one_authentic_pixel_from_handler;
if (get_one_authentic_pixel_from_handler !=
(GetOneAuthenticPixelFromHandler) NULL)
cache_info->methods.get_one_authentic_pixel_from_handler=
cache_methods->get_one_authentic_pixel_from_handler;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S e t P i x e l C a c h e N e x u s P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetPixelCacheNexusPixels() defines the region of the cache for the
% specified cache nexus.
%
% The format of the SetPixelCacheNexusPixels() method is:
%
% PixelPacket SetPixelCacheNexusPixels(
% const CacheInfo *magick_restrcit cache_info,const MapMode mode,
% const ssize_t y,const size_t width,const size_t height,
% const MagickBooleanType buffered,NexusInfo *magick_restrict nexus_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o mode: ReadMode, WriteMode, or IOMode.
%
% o x,y,width,height: define the region of this particular cache nexus.
%
% o buffered: pixels are buffered.
%
% o nexus_info: the cache nexus to set.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickBooleanType AcquireCacheNexusPixels(
const CacheInfo *magick_restrict cache_info,const MagickSizeType length,
NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception)
{
if (length != (MagickSizeType) ((size_t) length))
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"PixelCacheAllocationFailed","`%s'",
cache_info->filename);
return(MagickFalse);
}
nexus_info->length=0;
nexus_info->mapped=MagickFalse;
if (cache_anonymous_memory <= 0)
{
nexus_info->cache=(PixelPacket *) MagickAssumeAligned(
AcquireAlignedMemory(1,(size_t) length));
if (nexus_info->cache != (PixelPacket *) NULL)
(void) memset(nexus_info->cache,0,(size_t) length);
}
else
{
nexus_info->cache=(PixelPacket *) MapBlob(-1,IOMode,0,(size_t) length);
if (nexus_info->cache != (PixelPacket *) NULL)
nexus_info->mapped=MagickTrue;
}
if (nexus_info->cache == (PixelPacket *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"PixelCacheAllocationFailed","`%s'",
cache_info->filename);
return(MagickFalse);
}
nexus_info->length=length;
return(MagickTrue);
}
static inline void PrefetchPixelCacheNexusPixels(const NexusInfo *nexus_info,
const MapMode mode)
{
if (nexus_info->length < CACHE_LINE_SIZE)
return;
if (mode == ReadMode)
{
MagickCachePrefetch((unsigned char *) nexus_info->pixels+CACHE_LINE_SIZE,
0,1);
return;
}
MagickCachePrefetch((unsigned char *) nexus_info->pixels+CACHE_LINE_SIZE,1,1);
}
static PixelPacket *SetPixelCacheNexusPixels(
const CacheInfo *magick_restrict cache_info,const MapMode mode,
const ssize_t x,const ssize_t y,const size_t width,const size_t height,
const MagickBooleanType buffered,NexusInfo *magick_restrict nexus_info,
ExceptionInfo *exception)
{
MagickBooleanType
status;
MagickSizeType
length,
number_pixels;
assert(cache_info != (const CacheInfo *) NULL);
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->type == UndefinedCache)
return((PixelPacket *) NULL);
assert(nexus_info->signature == MagickCoreSignature);
(void) memset(&nexus_info->region,0,sizeof(nexus_info->region));
if ((width == 0) || (height == 0))
{
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"NoPixelsDefinedInCache","`%s'",cache_info->filename);
return((PixelPacket *) NULL);
}
if (((cache_info->type == MemoryCache) || (cache_info->type == MapCache)) &&
(buffered == MagickFalse))
{
if (((x >= 0) && (y >= 0) &&
(((ssize_t) height+y-1) < (ssize_t) cache_info->rows)) &&
(((x == 0) && (width == cache_info->columns)) || ((height == 1) &&
(((ssize_t) width+x-1) < (ssize_t) cache_info->columns))))
{
MagickOffsetType
offset;
/*
Pixels are accessed directly from memory.
*/
offset=(MagickOffsetType) y*cache_info->columns+x;
nexus_info->pixels=cache_info->pixels+offset;
nexus_info->indexes=(IndexPacket *) NULL;
if (cache_info->active_index_channel != MagickFalse)
nexus_info->indexes=cache_info->indexes+offset;
nexus_info->region.width=width;
nexus_info->region.height=height;
nexus_info->region.x=x;
nexus_info->region.y=y;
nexus_info->authentic_pixel_cache=MagickTrue;
PrefetchPixelCacheNexusPixels(nexus_info,mode);
return(nexus_info->pixels);
}
}
/*
Pixels are stored in a staging region until they are synced to the cache.
*/
if (((MagickSizeType) width > cache_info->width_limit) ||
((MagickSizeType) height > cache_info->height_limit))
{
(void) ThrowMagickException(exception,GetMagickModule(),ImageError,
"WidthOrHeightExceedsLimit","`%s'",cache_info->filename);
return((PixelPacket *) NULL);
}
number_pixels=(MagickSizeType) width*height;
length=MagickMax(number_pixels,MagickMax(cache_info->columns,
cache_info->rows))*sizeof(PixelPacket);
if (cache_info->active_index_channel != MagickFalse)
length+=number_pixels*sizeof(IndexPacket);
status=MagickTrue;
if (nexus_info->cache == (PixelPacket *) NULL)
status=AcquireCacheNexusPixels(cache_info,length,nexus_info,exception);
else
if (nexus_info->length < length)
{
RelinquishCacheNexusPixels(nexus_info);
status=AcquireCacheNexusPixels(cache_info,length,nexus_info,exception);
}
if (status == MagickFalse)
{
(void) memset(&nexus_info->region,0,sizeof(nexus_info->region));
return((PixelPacket *) NULL);
}
nexus_info->pixels=nexus_info->cache;
nexus_info->indexes=(IndexPacket *) NULL;
if (cache_info->active_index_channel != MagickFalse)
nexus_info->indexes=(IndexPacket *) (nexus_info->pixels+number_pixels);
nexus_info->region.width=width;
nexus_info->region.height=height;
nexus_info->region.x=x;
nexus_info->region.y=y;
nexus_info->authentic_pixel_cache=cache_info->type == PingCache ?
MagickTrue : MagickFalse;
PrefetchPixelCacheNexusPixels(nexus_info,mode);
return(nexus_info->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t P i x e l C a c h e V i r t u a l M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetPixelCacheVirtualMethod() sets the "virtual pixels" method for the
% pixel cache and returns the previous setting. A virtual pixel is any pixel
% access that is outside the boundaries of the image cache.
%
% The format of the SetPixelCacheVirtualMethod() method is:
%
% VirtualPixelMethod SetPixelCacheVirtualMethod(const Image *image,
% const VirtualPixelMethod virtual_pixel_method)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: choose the type of virtual pixel.
%
*/
static MagickBooleanType SetCacheAlphaChannel(Image *image,
const Quantum opacity)
{
CacheInfo
*magick_restrict cache_info;
CacheView
*magick_restrict image_view;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
image->matte=MagickTrue;
status=MagickTrue;
image_view=AcquireVirtualCacheView(image,&image->exception); /* must be virtual */
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
&image->exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
q->opacity=opacity;
q++;
}
status=SyncCacheViewAuthenticPixels(image_view,&image->exception);
}
image_view=DestroyCacheView(image_view);
return(status);
}
MagickExport VirtualPixelMethod SetPixelCacheVirtualMethod(const Image *image,
const VirtualPixelMethod virtual_pixel_method)
{
CacheInfo
*magick_restrict cache_info;
VirtualPixelMethod
method;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
method=cache_info->virtual_pixel_method;
cache_info->virtual_pixel_method=virtual_pixel_method;
if ((image->columns != 0) && (image->rows != 0))
switch (virtual_pixel_method)
{
case BackgroundVirtualPixelMethod:
{
if ((image->background_color.opacity != OpaqueOpacity) &&
(image->matte == MagickFalse))
(void) SetCacheAlphaChannel((Image *) image,OpaqueOpacity);
if ((IsPixelGray(&image->background_color) == MagickFalse) &&
(IsGrayColorspace(image->colorspace) != MagickFalse))
(void) SetImageColorspace((Image *) image,sRGBColorspace);
break;
}
case TransparentVirtualPixelMethod:
{
if (image->matte == MagickFalse)
(void) SetCacheAlphaChannel((Image *) image,OpaqueOpacity);
break;
}
default:
break;
}
return(method);
}
#if defined(MAGICKCORE_OPENCL_SUPPORT)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c A u t h e n t i c O p e n C L B u f f e r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncAuthenticOpenCLBuffer() ensures all the OpenCL operations have been
% completed and updates the host memory.
%
% The format of the SyncAuthenticOpenCLBuffer() method is:
%
% void SyncAuthenticOpenCLBuffer(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static void CopyOpenCLBuffer(CacheInfo *magick_restrict cache_info)
{
MagickCLEnv
clEnv;
assert(cache_info != (CacheInfo *)NULL);
if ((cache_info->type != MemoryCache) ||
(cache_info->opencl == (OpenCLCacheInfo *)NULL))
return;
/*
Ensure single threaded access to OpenCL environment.
*/
LockSemaphoreInfo(cache_info->semaphore);
if (cache_info->opencl != (OpenCLCacheInfo *)NULL)
{
cl_event
*events;
cl_uint
event_count;
clEnv=GetDefaultOpenCLEnv();
events=CopyOpenCLEvents(cache_info->opencl,&event_count);
if (events != (cl_event *) NULL)
{
cl_command_queue
queue;
cl_context
context;
cl_int
status;
PixelPacket
*pixels;
context=GetOpenCLContext(clEnv);
queue=AcquireOpenCLCommandQueue(clEnv);
pixels=(PixelPacket *) clEnv->library->clEnqueueMapBuffer(queue,
cache_info->opencl->buffer,CL_TRUE, CL_MAP_READ | CL_MAP_WRITE,0,
cache_info->length,event_count,events,NULL,&status);
assert(pixels == cache_info->pixels);
events=(cl_event *) RelinquishMagickMemory(events);
RelinquishOpenCLCommandQueue(clEnv,queue);
}
cache_info->opencl=RelinquishOpenCLCacheInfo(clEnv,cache_info->opencl);
}
UnlockSemaphoreInfo(cache_info->semaphore);
}
MagickPrivate void SyncAuthenticOpenCLBuffer(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (Image *)NULL);
cache_info = (CacheInfo *)image->cache;
CopyOpenCLBuffer(cache_info);
}
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c A u t h e n t i c P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncAuthenticPixelCacheNexus() saves the authentic image pixels to the
% in-memory or disk cache. The method returns MagickTrue if the pixel region
% is synced, otherwise MagickFalse.
%
% The format of the SyncAuthenticPixelCacheNexus() method is:
%
% MagickBooleanType SyncAuthenticPixelCacheNexus(Image *image,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o nexus_info: the cache nexus to sync.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SyncAuthenticPixelCacheNexus(Image *image,
NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
MagickBooleanType
status;
/*
Transfer pixels to the cache.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->cache == (Cache) NULL)
ThrowBinaryException(CacheError,"PixelCacheIsNotOpen",image->filename);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->type == UndefinedCache)
return(MagickFalse);
if ((image->storage_class == DirectClass) &&
(image->clip_mask != (Image *) NULL) &&
(ClipPixelCacheNexus(image,nexus_info,exception) == MagickFalse))
return(MagickFalse);
if ((image->storage_class == DirectClass) &&
(image->mask != (Image *) NULL) &&
(MaskPixelCacheNexus(image,nexus_info,exception) == MagickFalse))
return(MagickFalse);
if (nexus_info->authentic_pixel_cache != MagickFalse)
{
if (image->taint == MagickFalse)
image->taint=MagickTrue;
return(MagickTrue);
}
assert(cache_info->signature == MagickCoreSignature);
status=WritePixelCachePixels(cache_info,nexus_info,exception);
if ((cache_info->active_index_channel != MagickFalse) &&
(WritePixelCacheIndexes(cache_info,nexus_info,exception) == MagickFalse))
return(MagickFalse);
if ((status != MagickFalse) && (image->taint == MagickFalse))
image->taint=MagickTrue;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c A u t h e n t i c P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncAuthenticPixelsCache() saves the authentic image pixels to the in-memory
% or disk cache. The method returns MagickTrue if the pixel region is synced,
% otherwise MagickFalse.
%
% The format of the SyncAuthenticPixelsCache() method is:
%
% MagickBooleanType SyncAuthenticPixelsCache(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType SyncAuthenticPixelsCache(Image *image,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
status=SyncAuthenticPixelCacheNexus(image,cache_info->nexus_info[id],
exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S y n c A u t h e n t i c P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncAuthenticPixels() saves the image pixels to the in-memory or disk cache.
% The method returns MagickTrue if the pixel region is flushed, otherwise
% MagickFalse.
%
% The format of the SyncAuthenticPixels() method is:
%
% MagickBooleanType SyncAuthenticPixels(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SyncAuthenticPixels(Image *image,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.sync_authentic_pixels_handler !=
(SyncAuthenticPixelsHandler) NULL)
return(cache_info->methods.sync_authentic_pixels_handler(image,exception));
assert(id < (int) cache_info->number_threads);
status=SyncAuthenticPixelCacheNexus(image,cache_info->nexus_info[id],
exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c I m a g e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncImagePixelCache() saves the image pixels to the in-memory or disk cache.
% The method returns MagickTrue if the pixel region is flushed, otherwise
% MagickFalse.
%
% The format of the SyncImagePixelCache() method is:
%
% MagickBooleanType SyncImagePixelCache(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate MagickBooleanType SyncImagePixelCache(Image *image,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (Image *) NULL);
assert(exception != (ExceptionInfo *) NULL);
cache_info=(CacheInfo *) GetImagePixelCache(image,MagickTrue,exception);
return(cache_info == (CacheInfo *) NULL ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ W r i t e P i x e l C a c h e I n d e x e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WritePixelCacheIndexes() writes the colormap indexes to the specified
% region of the pixel cache.
%
% The format of the WritePixelCacheIndexes() method is:
%
% MagickBooleanType WritePixelCacheIndexes(CacheInfo *cache_info,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o nexus_info: the cache nexus to write the colormap indexes.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType WritePixelCacheIndexes(CacheInfo *cache_info,
NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception)
{
MagickOffsetType
count,
offset;
MagickSizeType
extent,
length;
register const IndexPacket
*magick_restrict p;
register ssize_t
y;
size_t
rows;
if (cache_info->active_index_channel == MagickFalse)
return(MagickFalse);
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
length=(MagickSizeType) nexus_info->region.width*sizeof(IndexPacket);
rows=nexus_info->region.height;
extent=(MagickSizeType) length*rows;
p=nexus_info->indexes;
y=0;
switch (cache_info->type)
{
case MemoryCache:
case MapCache:
{
register IndexPacket
*magick_restrict q;
/*
Write indexes to memory.
*/
if ((cache_info->columns == nexus_info->region.width) &&
(extent == (MagickSizeType) ((size_t) extent)))
{
length=extent;
rows=1UL;
}
q=cache_info->indexes+offset;
for (y=0; y < (ssize_t) rows; y++)
{
(void) memcpy(q,p,(size_t) length);
p+=nexus_info->region.width;
q+=cache_info->columns;
}
break;
}
case DiskCache:
{
/*
Write indexes to disk.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
cache_info->cache_filename);
UnlockSemaphoreInfo(cache_info->file_semaphore);
return(MagickFalse);
}
if ((cache_info->columns == nexus_info->region.width) &&
(extent <= MagickMaxBufferExtent))
{
length=extent;
rows=1UL;
}
extent=(MagickSizeType) cache_info->columns*cache_info->rows;
for (y=0; y < (ssize_t) rows; y++)
{
count=WritePixelCacheRegion(cache_info,cache_info->offset+extent*
sizeof(PixelPacket)+offset*sizeof(*p),length,(const unsigned char *)
p);
if (count < (MagickOffsetType) length)
break;
p+=nexus_info->region.width;
offset+=cache_info->columns;
}
if (IsFileDescriptorLimitExceeded() != MagickFalse)
(void) ClosePixelCacheOnDisk(cache_info);
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
case DistributedCache:
{
RectangleInfo
region;
/*
Write indexes to distributed cache.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
region=nexus_info->region;
if ((cache_info->columns != nexus_info->region.width) ||
(extent > MagickMaxBufferExtent))
region.height=1UL;
else
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=WriteDistributePixelCacheIndexes((DistributeCacheInfo *)
cache_info->server_info,®ion,length,(const unsigned char *) p);
if (count != (MagickOffsetType) length)
break;
p+=nexus_info->region.width;
region.y++;
}
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
default:
break;
}
if (y < (ssize_t) rows)
{
ThrowFileException(exception,CacheError,"UnableToWritePixelCache",
cache_info->cache_filename);
return(MagickFalse);
}
if ((cache_info->debug != MagickFalse) &&
(CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse))
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double)
nexus_info->region.width,(double) nexus_info->region.height,(double)
nexus_info->region.x,(double) nexus_info->region.y);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ W r i t e P i x e l C a c h e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WritePixelCachePixels() writes image pixels to the specified region of the
% pixel cache.
%
% The format of the WritePixelCachePixels() method is:
%
% MagickBooleanType WritePixelCachePixels(CacheInfo *cache_info,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o nexus_info: the cache nexus to write the pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType WritePixelCachePixels(CacheInfo *cache_info,
NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception)
{
MagickOffsetType
count,
offset;
MagickSizeType
extent,
length;
register const PixelPacket
*magick_restrict p;
register ssize_t
y;
size_t
rows;
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
length=(MagickSizeType) nexus_info->region.width*sizeof(PixelPacket);
rows=nexus_info->region.height;
extent=length*rows;
p=nexus_info->pixels;
y=0;
switch (cache_info->type)
{
case MemoryCache:
case MapCache:
{
register PixelPacket
*magick_restrict q;
/*
Write pixels to memory.
*/
if ((cache_info->columns == nexus_info->region.width) &&
(extent == (MagickSizeType) ((size_t) extent)))
{
length=extent;
rows=1UL;
}
q=cache_info->pixels+offset;
for (y=0; y < (ssize_t) rows; y++)
{
(void) memcpy(q,p,(size_t) length);
p+=nexus_info->region.width;
q+=cache_info->columns;
}
break;
}
case DiskCache:
{
/*
Write pixels to disk.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
cache_info->cache_filename);
UnlockSemaphoreInfo(cache_info->file_semaphore);
return(MagickFalse);
}
if ((cache_info->columns == nexus_info->region.width) &&
(extent <= MagickMaxBufferExtent))
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=WritePixelCacheRegion(cache_info,cache_info->offset+offset*
sizeof(*p),length,(const unsigned char *) p);
if (count < (MagickOffsetType) length)
break;
p+=nexus_info->region.width;
offset+=cache_info->columns;
}
if (IsFileDescriptorLimitExceeded() != MagickFalse)
(void) ClosePixelCacheOnDisk(cache_info);
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
case DistributedCache:
{
RectangleInfo
region;
/*
Write pixels to distributed cache.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
region=nexus_info->region;
if ((cache_info->columns != nexus_info->region.width) ||
(extent > MagickMaxBufferExtent))
region.height=1UL;
else
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=WriteDistributePixelCachePixels((DistributeCacheInfo *)
cache_info->server_info,®ion,length,(const unsigned char *) p);
if (count != (MagickOffsetType) length)
break;
p+=nexus_info->region.width;
region.y++;
}
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
default:
break;
}
if (y < (ssize_t) rows)
{
ThrowFileException(exception,CacheError,"UnableToWritePixelCache",
cache_info->cache_filename);
return(MagickFalse);
}
if ((cache_info->debug != MagickFalse) &&
(CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse))
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double)
nexus_info->region.width,(double) nexus_info->region.height,(double)
nexus_info->region.x,(double) nexus_info->region.y);
return(MagickTrue);
}
|
transpose.h | // This code is part of the Problem Based Benchmark Suite (PBBS)
// Copyright (c) 2011-2016 Guy Blelloch and the PBBS team
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights (to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to
// permit persons to whom the Software is furnished to do so, subject to
// the following conditions:
//
// The above copyright notice and this permission notice shall be included
// in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#pragma once
#include "utilities.h"
#include "get_time.h"
namespace pbbs {
constexpr const size_t TRANS_THRESHHOLD = PAR_GRANULARITY/4;
inline size_t split(size_t n) {
return n/2;
//return ((((size_t) 1) << log2_up(n) != n) ? n/2 : (7*(n+1))/16);
}
template <class E>
struct transpose {
E *A, *B;
transpose(E *AA, E *BB) : A(AA), B(BB) {}
void transR(size_t rStart, size_t rCount, size_t rLength,
size_t cStart, size_t cCount, size_t cLength) {
if (cCount*rCount < TRANS_THRESHHOLD) {
for (size_t i=rStart; i < rStart+ rCount; i++)
for (size_t j=cStart; j < cStart + cCount; j++)
B[j*cLength + i] = A[i*rLength + j];
} else if (cCount > rCount) {
size_t l1 = split(cCount);
size_t l2 = cCount - l1;
auto left = [&] () {
transR(rStart,rCount,rLength,cStart,l1,cLength);};
auto right = [&] () {
transR(rStart,rCount,rLength,cStart + l1,l2,cLength);};
par_do(left, right);
} else {
size_t l1 = split(cCount);
size_t l2 = rCount - l1;
auto left = [&] () {
transR(rStart,l1,rLength,cStart,cCount,cLength);};
auto right = [&] () {
transR(rStart + l1,l2,rLength,cStart,cCount,cLength);};
par_do(left, right);
}
}
void trans(size_t rCount, size_t cCount) {
#if defined(OPENMP)
#pragma omp parallel
#pragma omp single
#endif
transR(0,rCount,cCount,0,cCount,rCount);
}
};
template <class E, class int_t>
struct blockTrans {
E *A, *B;
int_t *OA, *OB;
blockTrans(E *AA, E *BB, int_t *OOA, int_t *OOB)
: A(AA), B(BB), OA(OOA), OB(OOB) {}
void transR(size_t rStart, size_t rCount, size_t rLength,
size_t cStart, size_t cCount, size_t cLength) {
if (cCount*rCount < TRANS_THRESHHOLD*16) {
parallel_for(rStart, rStart+rCount, [&] (size_t i) {
for (size_t j=cStart; j < cStart + cCount; j++) {
size_t sa = OA[i*rLength + j];
size_t sb = OB[j*cLength + i];
size_t l = OA[i*rLength + j + 1] - sa;
for (size_t k =0; k < l; k++)
move_uninitialized(B[k+sb], A[k+sa]);
}
});
} else if (cCount > rCount) {
size_t l1 = split(cCount);
size_t l2 = cCount - l1;
auto left = [&] () {
transR(rStart,rCount,rLength,cStart,l1,cLength);};
auto right = [&] () {
transR(rStart,rCount,rLength,cStart + l1,l2,cLength);};
par_do(left, right);
} else {
size_t l1 = split(cCount);
size_t l2 = rCount - l1;
auto left = [&] () {
transR(rStart,l1,rLength,cStart,cCount,cLength);};
auto right = [&] () {
transR(rStart + l1,l2,rLength,cStart,cCount,cLength);};
par_do(left, right);
}
}
void trans(size_t rCount, size_t cCount) {
#if defined(OPENMP)
#pragma omp parallel
#pragma omp single
#endif
transR(0,rCount,cCount,0,cCount,rCount);
}
} ;
// Moves values from blocks to buckets
// From is sorted by key within each block, in block major
// counts is the # of keys in each bucket for each block, in block major
// From and To are of lenght n
// counts is of length num_blocks * num_buckets
// Data is memcpy'd into To avoiding initializers and overloaded =
template<typename E, typename s_size_t>
size_t* transpose_buckets(E* From, E* To, s_size_t* counts, size_t n,
size_t block_size,
size_t num_blocks, size_t num_buckets) {
timer t("transpose", false);
size_t m = num_buckets * num_blocks;
sequence<s_size_t> dest_offsets; //(m);
auto add = addm<s_size_t>();
//cout << "ss 8" << endl;
// for smaller input do non-cache oblivious version
if (n < (1 << 22) || num_buckets <= 512 || num_blocks <= 512) {
size_t block_bits = log2_up(num_blocks);
size_t block_mask = num_blocks-1;
if ((size_t) 1 << block_bits != num_blocks) {
cout << "in transpose_buckets: num_blocks must be a power or 2"
<< endl;
abort();
}
// determine the destination offsets
auto get = [&] (size_t i) {
return counts[(i>>block_bits) + num_buckets*(i&block_mask)];};
// slow down?
dest_offsets = sequence<s_size_t>(m, get);
size_t sum = scan_inplace(dest_offsets.slice(), add);
if (sum != n) abort();
t.next("seq and scan");
// send each key to correct location within its bucket
auto f = [&] (size_t i) {
size_t s_offset = i * block_size;
for (size_t j= 0; j < num_buckets; j++) {
size_t d_offset = dest_offsets[i+ num_blocks*j];
size_t len = counts[i*num_buckets+j];
for (size_t k =0; k < len; k++)
move_uninitialized(To[d_offset++], From[s_offset++]);
}
};
parallel_for(0, num_blocks, f, 1);
t.next("trans");
free_array(counts);
} else { // for larger input do cache efficient transpose
sequence<s_size_t> source_offsets(counts,m);
dest_offsets = sequence<s_size_t>(m);
size_t total;
transpose<s_size_t>(counts, dest_offsets.begin()).trans(num_blocks,
num_buckets);
t.next("trans 1");
//cout << "ss 9" << endl;
// do both scans inplace
total = scan_inplace(dest_offsets.slice(), add);
if (total != n) abort();
total = scan_inplace(source_offsets.slice(), add);
if (total != n) abort();
source_offsets[m] = n;
t.next("scans");
blockTrans<E,s_size_t>(From, To, source_offsets.begin(),
dest_offsets.begin()).trans(num_blocks, num_buckets);
t.next("trans 2");
//cout << "ss 10" << endl;
}
size_t *bucket_offsets = new_array_no_init<size_t>(num_buckets+1);
for (s_size_t i=0; i < num_buckets; i++)
bucket_offsets[i] = dest_offsets[i*num_blocks];
// last element is the total size n
bucket_offsets[num_buckets] = n;
return bucket_offsets;
}
}
|
dertransp_parallel.c | /* Inputs: prhs[0] = dA/dx corresponding to a matrix A of size (nrow x ncol)
* prhs[1] = nrow of matrix A
* Outputs: plhs[0] = dA'/dx corresponding to transposed matrix A'
* Method: This is just a row permutation of dA/dx
*
* by SeHyoun Ahn, Aug 2016
*/
#include <omp.h>
#include "mex.h"
#include <stdlib.h>
#include <time.h>
void insertsort(mwIndex *irs, double *prs, mwSize n) {
mwIndex i,j;
mwIndex swapind;
double swapval;
for (i=1; i<n; ++i) {
swapind = irs[i];
swapval = prs[i];
for (j=i; j>=0;--j) {
if (j==0) {
irs[j] = swapind;
prs[j] = swapval;
}
else if (swapind<irs[j-1]) {
irs[j] = irs[j-1];
prs[j] = prs[j-1];
}
else {
irs[j] = swapind;
prs[j] = swapval;
break;
}
}
}
};
void quicksort(mwIndex* irs, double* prs, mwSize n) {
mwIndex front, back, pivot;
mwIndex swapind;
double swapval;
pivot = rand()%n;
front = rand()%n;
back = rand()%n;
if (irs[front]>irs[back]) {
if (irs[pivot]>irs[front]) {
pivot = irs[front];
irs[front] = irs[0];
irs[0] = pivot;
}
else if (irs[pivot]>irs[back]) {
front = irs[pivot];
irs[pivot] = irs[0];
irs[0] = front;
pivot = front;
}
else {
pivot = irs[back];
irs[back] = irs[0];
irs[0] = pivot;
}
}
else {
if (irs[pivot]>irs[back]) {
pivot = irs[back];
irs[back] = irs[0];
irs[0] = pivot;
}
else if (irs[pivot]>irs[front]) {
back = irs[pivot];
irs[pivot] = irs[0];
irs[0] = back;
pivot = back;
}
else {
pivot = irs[front];
irs[front] = irs[0];
irs[0] = pivot;
}
}
front = 1;
back = n-1;
while (front < back) {
if (irs[front] < pivot) {
++front;
}
else if (irs[back] > pivot) {
--back;
}
else {
swapind = irs[back];
swapval = prs[back];
irs[back] = irs[front];
prs[back] = prs[front];
irs[front]= swapind;
prs[front]= swapval;
++front;
}
}
if (irs[front]<pivot) {
swapind = irs[front];
swapval = prs[front];
irs[front] = irs[0];
prs[front] = prs[0];
irs[0] = swapind;
prs[0] = swapval;
if (front > 17)
quicksort(&irs[0],&prs[0],front);
else if (front > 1)
insertsort(&irs[0],&prs[0],front);
if ((n-1-front) > 17)
quicksort(&irs[front+1],&prs[front+1],n-1-front);
else if ((n-1-front) > 1)
insertsort(&irs[front+1],&prs[front+1],n-1-front);
}
else {
swapind = irs[front-1];
swapval = prs[front-1];
irs[front-1] = irs[0];
prs[front-1] = prs[0];
irs[0] = swapind;
prs[0] = swapval;
if (front-1 > 17)
quicksort(&irs[0],&prs[0],front-1);
else if (front-1 > 1)
insertsort(&irs[0],&prs[0],front-1);
if (n-front > 17)
quicksort(&irs[front],&prs[front],n-front);
else if (n-front > 1)
insertsort(&irs[front],&prs[front],n-front);
}
};
void mexFunction(int nlhs, mxArray *plhs[],int nrhs,const mxArray *prhs[])
{
srand(time(NULL));
mwSize nrow, ncol, nderiv,nnz;
/* Read In Sparse Matrix */
mwIndex *irsA, *jcsA;
double *prA;
nderiv = mxGetN(prhs[0]);
ncol = mxGetM(prhs[0]);
irsA = mxGetIr(prhs[0]);
jcsA = mxGetJc(prhs[0]);
prA = mxGetPr(prhs[0]);
nnz = jcsA[nderiv];
/* Read in the number of rows in the Matrix A */
nrow = mxGetScalar(prhs[1]);
ncol = ncol/nrow;
/* Prepare Output Matrix */
mwIndex *lirs, *ljcs;
double *lpr;
plhs[0] = mxCreateSparse(nrow*ncol,nderiv,nnz,mxREAL);
lirs = mxGetIr(plhs[0]);
ljcs = mxGetJc(plhs[0]);
lpr = mxGetPr(plhs[0]);
// lirs = mxMalloc( nnz * sizeof(*lirs));
// ljcs = mxMalloc( (nderiv+1) * sizeof(*ljcs));
// lpr = mxMalloc( nnz * sizeof(*lpr));
mwIndex i,j,tmp;
ljcs[0]=0;
#pragma omp parallel for default(shared) private(i,j) num_threads(2)
for (i=0; i<nderiv; ++i) {
/* Compute the new row Index */
for (j = jcsA[i]; j<jcsA[i+1]; ++j){
lirs[j] = (irsA[j]%nrow)*ncol + irsA[j]/nrow;
lpr[j] = prA[j];
}
ljcs[i+1] = jcsA[i+1];
/* Sort to ensure sorted CSC format */
if ( (ljcs[i+1] - ljcs[i]) > 17)
quicksort(&lirs[ljcs[i]], &lpr[ljcs[i]], ljcs[i+1]-ljcs[i]);
else if ( (ljcs[i+1] - ljcs[i]) > 1)
insertsort(&lirs[ljcs[i]], &lpr[ljcs[i]], ljcs[i+1]-ljcs[i]);
}
// plhs[0] = mxCreateSparse(nrow*ncol,nderiv,nnz,mxREAL);
// if (nnz>0) {
// mxSetIr(plhs[0],lirs);
// mxSetJc(plhs[0],ljcs);
// mxSetPr(plhs[0],lpr);
// }
}
|
kClistNodeParallel.c | /*
Info:
Feel free to use these lines as you wish.
This program iterates over all k-cliques.
This is an improvement of the 1985 algorithm of Chiba And Nishizeki detailed in "Arboricity and subgraph listing".
To compile:
"gcc kClistNodeParallel.c -O9 -o kClistNodeParallel -fopenmp".
To execute:
"./kClistNodeParallel p k edgelist.txt".
"edgelist.txt" should contain the graph: one edge on each line separated by a space.
k is the size of the k-cliques
p is the number of threads
Will print the number of k-cliques.
*/
#include <stdlib.h>
#include <stdio.h>
#include <stdbool.h>
#include <string.h>
#include <time.h>
#include <omp.h>
#define NLINKS 100000000 //maximum number of edges for memory allocation, will increase if needed
typedef struct {
unsigned s;
unsigned t;
} edge;
typedef struct {
unsigned node;
unsigned deg;
} nodedeg ;
typedef struct {
unsigned n;//number of nodes
unsigned e;//number of edges
edge *edges;//list of edges
unsigned *rank;//ranking of the nodes according to degeneracy ordering
//unsigned *map;//oldID newID correspondance NOT USED IN THIS VERSION
} edgelist;
typedef struct {
unsigned n;
unsigned *cd;//cumulative degree: (starts with 0) length=n+1
unsigned *adj;//truncated list of neighbors
unsigned core;//core value of the graph
} graph;
typedef struct {
unsigned *n;//n[l]: number of nodes in G_l
unsigned **d;//d[l]: degrees of G_l
unsigned *adj;//truncated list of neighbors
unsigned char *lab;//lab[i] label of node i
unsigned **nodes;//sub[l]: nodes in G_l
unsigned core;
} subgraph;
void free_edgelist(edgelist *el){
free(el->edges);
free(el->rank);
free(el);
}
void free_graph(graph *g){
free(g->cd);
free(g->adj);
free(g);
}
void free_subgraph(subgraph *sg, unsigned char k){
unsigned char i;
free(sg->n);
for (i=2;i<k;i++){
free(sg->d[i]);
free(sg->nodes[i]);
}
free(sg->d);
free(sg->nodes);
free(sg->lab);
free(sg->adj);
free(sg);
}
//Compute the maximum of three unsigned integers.
inline unsigned int max3(unsigned int a,unsigned int b,unsigned int c){
a=(a>b) ? a : b;
return (a>c) ? a : c;
}
edgelist* readedgelist(char* input){
unsigned e1=NLINKS;
edgelist *el=malloc(sizeof(edgelist));
FILE *file;
el->n=0;
el->e=0;
file=fopen(input,"r");
el->edges=malloc(e1*sizeof(edge));
while (fscanf(file,"%u %u", &(el->edges[el->e].s), &(el->edges[el->e].t))==2) {//Add one edge
el->n=max3(el->n,el->edges[el->e].s,el->edges[el->e].t);
el->e++;
if (el->e==e1) {
e1+=NLINKS;
el->edges=realloc(el->edges,e1*sizeof(edge));
}
}
fclose(file);
el->n++;
el->edges=realloc(el->edges,el->e*sizeof(edge));
return el;
}
void relabel(edgelist *el){
unsigned i, source, target, tmp;
for (i=0;i<el->e;i++) {
source=el->rank[el->edges[i].s];
target=el->rank[el->edges[i].t];
if (source<target){
tmp=source;
source=target;
target=tmp;
}
el->edges[i].s=source;
el->edges[i].t=target;
}
}
///// CORE ordering /////////////////////
typedef struct {
unsigned key;
unsigned value;
} keyvalue;
typedef struct {
unsigned n_max; // max number of nodes.
unsigned n; // number of nodes.
unsigned *pt; // pointers to nodes.
keyvalue *kv; // nodes.
} bheap;
bheap *construct(unsigned n_max){
unsigned i;
bheap *heap=malloc(sizeof(bheap));
heap->n_max=n_max;
heap->n=0;
heap->pt=malloc(n_max*sizeof(unsigned));
for (i=0;i<n_max;i++) heap->pt[i]=-1;
heap->kv=malloc(n_max*sizeof(keyvalue));
return heap;
}
void swap(bheap *heap,unsigned i, unsigned j) {
keyvalue kv_tmp=heap->kv[i];
unsigned pt_tmp=heap->pt[kv_tmp.key];
heap->pt[heap->kv[i].key]=heap->pt[heap->kv[j].key];
heap->kv[i]=heap->kv[j];
heap->pt[heap->kv[j].key]=pt_tmp;
heap->kv[j]=kv_tmp;
}
void bubble_up(bheap *heap,unsigned i) {
unsigned j=(i-1)/2;
while (i>0) {
if (heap->kv[j].value>heap->kv[i].value) {
swap(heap,i,j);
i=j;
j=(i-1)/2;
}
else break;
}
}
void bubble_down(bheap *heap) {
unsigned i=0,j1=1,j2=2,j;
while (j1<heap->n) {
j=( (j2<heap->n) && (heap->kv[j2].value<heap->kv[j1].value) ) ? j2 : j1 ;
if (heap->kv[j].value < heap->kv[i].value) {
swap(heap,i,j);
i=j;
j1=2*i+1;
j2=j1+1;
continue;
}
break;
}
}
void insert(bheap *heap,keyvalue kv){
heap->pt[kv.key]=(heap->n)++;
heap->kv[heap->n-1]=kv;
bubble_up(heap,heap->n-1);
}
void update(bheap *heap,unsigned key){
unsigned i=heap->pt[key];
if (i!=-1){
((heap->kv[i]).value)--;
bubble_up(heap,i);
}
}
keyvalue popmin(bheap *heap){
keyvalue min=heap->kv[0];
heap->pt[min.key]=-1;
heap->kv[0]=heap->kv[--(heap->n)];
heap->pt[heap->kv[0].key]=0;
bubble_down(heap);
return min;
}
//Building the heap structure with (key,value)=(node,degree) for each node
bheap* mkheap(unsigned n,unsigned *v){
unsigned i;
keyvalue kv;
bheap* heap=construct(n);
for (i=0;i<n;i++){
kv.key=i;
kv.value=v[i];
insert(heap,kv);
}
return heap;
}
void freeheap(bheap *heap){
free(heap->pt);
free(heap->kv);
free(heap);
}
//computing degeneracy ordering and core value
void ord_core(edgelist* el){
unsigned i,j,r=0,n=el->n,e=el->e;
keyvalue kv;
bheap *heap;
unsigned *d0=calloc(el->n,sizeof(unsigned));
unsigned *cd0=malloc((el->n+1)*sizeof(unsigned));
unsigned *adj0=malloc(2*el->e*sizeof(unsigned));
for (i=0;i<e;i++) {
d0[el->edges[i].s]++;
d0[el->edges[i].t]++;
}
cd0[0]=0;
for (i=1;i<n+1;i++) {
cd0[i]=cd0[i-1]+d0[i-1];
d0[i-1]=0;
}
for (i=0;i<e;i++) {
adj0[ cd0[el->edges[i].s] + d0[ el->edges[i].s ]++ ]=el->edges[i].t;
adj0[ cd0[el->edges[i].t] + d0[ el->edges[i].t ]++ ]=el->edges[i].s;
}
heap=mkheap(n,d0);
el->rank=malloc(n*sizeof(unsigned));
for (i=0;i<n;i++){
kv=popmin(heap);
el->rank[kv.key]=n-(++r);
for (j=cd0[kv.key];j<cd0[kv.key+1];j++){
update(heap,adj0[j]);
}
}
freeheap(heap);
free(d0);
free(cd0);
free(adj0);
}
//////////////////////////
//Building the special graph
graph* mkgraph(edgelist *el){
unsigned i,max;
unsigned *d;
graph* g=malloc(sizeof(graph));
d=calloc(el->n,sizeof(unsigned));
for (i=0;i<el->e;i++) {
d[el->edges[i].s]++;
}
g->cd=malloc((el->n+1)*sizeof(unsigned));
g->cd[0]=0;
max=0;
for (i=1;i<el->n+1;i++) {
g->cd[i]=g->cd[i-1]+d[i-1];
max=(max>d[i-1])?max:d[i-1];
d[i-1]=0;
}
printf("core value (max truncated degree) = %u\n",max);
g->adj=malloc(el->e*sizeof(unsigned));
for (i=0;i<el->e;i++) {
g->adj[ g->cd[el->edges[i].s] + d[ el->edges[i].s ]++ ]=el->edges[i].t;
}
free(d);
g->core=max;
g->n=el->n;
return g;
}
subgraph* allocsub(graph *g,unsigned char k){
unsigned i;
subgraph* sg=malloc(sizeof(subgraph));
sg->n=calloc(k,sizeof(unsigned));
sg->d=malloc(k*sizeof(unsigned*));
sg->nodes=malloc(k*sizeof(unsigned*));
for (i=2;i<k;i++){
sg->d[i]=malloc(g->core*sizeof(unsigned));
sg->nodes[i]=malloc(g->core*sizeof(unsigned));
}
sg->lab=calloc(g->core,sizeof(unsigned char));
sg->adj=malloc(g->core*g->core*sizeof(unsigned));
sg->core=g->core;
return sg;
}
void mksub(graph* g,unsigned u,subgraph* sg,unsigned char k){
unsigned i,j,l,v,w;
static unsigned *old=NULL,*new=NULL;//to improve
#pragma omp threadprivate(new,old)
if (old==NULL){
new=malloc(g->n*sizeof(unsigned));
old=malloc(g->core*sizeof(unsigned));
for (i=0;i<g->n;i++){
new[i]=-1;
}
}
for (i=0;i<sg->n[k-1];i++){
sg->lab[i]=0;
}
j=0;
for (i=g->cd[u];i<g->cd[u+1];i++){
v=g->adj[i];
new[v]=j;
old[j]=v;
sg->lab[j]=k-1;
sg->nodes[k-1][j]=j;
sg->d[k-1][j]=0;//new degrees
j++;
}
sg->n[k-1]=j;
for (i=0;i<sg->n[k-1];i++){//reodering adjacency list and computing new degrees
v=old[i];
for (l=g->cd[v];l<g->cd[v+1];l++){
w=g->adj[l];
j=new[w];
if (j!=-1){
sg->adj[sg->core*i+sg->d[k-1][i]++]=j;
}
}
}
for (i=g->cd[u];i<g->cd[u+1];i++){
new[g->adj[i]]=-1;
}
}
void kclique_thread(unsigned char l, subgraph *sg, unsigned long long *n) {
unsigned i,j,k,end,u,v,w;
if(l==2){
for(i=0; i<sg->n[2]; i++){//list all edges
u=sg->nodes[2][i];
end=u*sg->core+sg->d[2][u];
for (j=u*sg->core;j<end;j++) {
(*n)++;//listing here!!! // NOTE THAT WE COULD DO (*n)+=g->d[2][u] to be much faster (for counting only); !!!!!!!!!!!!!!!!!!
}
}
return;
}
for(i=0; i<sg->n[l]; i++){
u=sg->nodes[l][i];
//printf("%u %u\n",i,u);
sg->n[l-1]=0;
end=u*sg->core+sg->d[l][u];
for (j=u*sg->core;j<end;j++){//relabeling nodes and forming U'.
v=sg->adj[j];
if (sg->lab[v]==l){
sg->lab[v]=l-1;
sg->nodes[l-1][sg->n[l-1]++]=v;
sg->d[l-1][v]=0;//new degrees
}
}
for (j=0;j<sg->n[l-1];j++){//reodering adjacency list and computing new degrees
v=sg->nodes[l-1][j];
end=sg->core*v+sg->d[l][v];
for (k=sg->core*v;k<end;k++){
w=sg->adj[k];
if (sg->lab[w]==l-1){
sg->d[l-1][v]++;
}
else{
sg->adj[k--]=sg->adj[--end];
sg->adj[end]=w;
}
}
}
kclique_thread(l-1, sg, n);
for (j=0;j<sg->n[l-1];j++){//restoring labels
v=sg->nodes[l-1][j];
sg->lab[v]=l;
}
}
}
unsigned long long kclique_main(unsigned char k, graph *g) {
unsigned u;
unsigned long long n=0;
subgraph *sg;
#pragma omp parallel private(sg,u) reduction(+:n)
{
sg=allocsub(g,k);
#pragma omp for schedule(dynamic, 1) nowait
for(u=0; u<g->n; u++){
mksub(g,u,sg,k);
kclique_thread(k-1, sg, &n);
}
}
return n;
}
int main(int argc,char** argv){
edgelist* el;
graph* g;
unsigned char k=atoi(argv[2]);
unsigned long long n;
omp_set_num_threads(atoi(argv[1]));
time_t t0,t1,t2;
t1=time(NULL);
t0=t1;
printf("Reading edgelist from file %s\n",argv[3]);
el=readedgelist(argv[3]);
printf("Number of nodes = %u\n",el->n);
printf("Number of edges = %u\n",el->e);
t2=time(NULL);
printf("- Time = %ldh%ldm%lds\n",(t2-t1)/3600,((t2-t1)%3600)/60,((t2-t1)%60));
t1=t2;
printf("Building the graph structure\n");
ord_core(el);
relabel(el);
g=mkgraph(el);
printf("Number of nodes (degree > 0) = %u\n",g->n);
free_edgelist(el);
t2=time(NULL);
printf("- Time = %ldh%ldm%lds\n",(t2-t1)/3600,((t2-t1)%3600)/60,((t2-t1)%60));
t1=t2;
printf("Iterate over all cliques\n");
n=kclique_main(k, g);
printf("Number of %u-cliques: %llu\n",k,n);
t2=time(NULL);
printf("- Time = %ldh%ldm%lds\n",(t2-t1)/3600,((t2-t1)%3600)/60,((t2-t1)%60));
t1=t2;
free_graph(g);
printf("- Overall time = %ldh%ldm%lds\n",(t2-t0)/3600,((t2-t0)%3600)/60,((t2-t0)%60));
return 0;
}
|
hermm_c_dia_n_lo_row.c | #include "alphasparse/kernel.h"
#include "alphasparse/util.h"
#include "alphasparse/opt.h"
#ifdef _OPENMP
#include <omp.h>
#endif
#include <memory.h>
#include <stdlib.h>
alphasparse_status_t ONAME(const ALPHA_Complex alpha, const ALPHA_SPMAT_DIA *mat, const ALPHA_Complex *x, const ALPHA_INT columns, const ALPHA_INT ldx, const ALPHA_Complex beta, ALPHA_Complex *y, const ALPHA_INT ldy)
{
ALPHA_INT num_threads = alpha_get_thread_num();
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_threads)
#endif
for (ALPHA_INT r = 0; r < mat->rows; r++)
for(ALPHA_INT c = 0; c < columns; c++)
alpha_mul(y[index2(r,c,ldy)],y[index2(r,c,ldy)],beta);
#ifdef _OPENMP
#pragma omp parallel num_threads(num_threads)
#endif
{
ALPHA_INT tid = alpha_get_thread_id();
ALPHA_INT bcl = cross_block_low(tid,num_threads,columns);
ALPHA_INT bch = cross_block_high(tid,num_threads,columns);
for(ALPHA_INT di = 0; di < mat->ndiag;++di){
ALPHA_INT d = mat->distance[di];
if(d < 0){
ALPHA_INT ars = alpha_max(0,-d);
ALPHA_INT acs = alpha_max(0,d);
ALPHA_INT an = alpha_min(mat->rows - ars,mat->cols - acs);
for(ALPHA_INT i = 0; i < an; ++i){
ALPHA_INT ar = ars + i;
ALPHA_INT ac = acs + i;
ALPHA_Complex val,val_c;
alpha_mul(val,mat->values[index2(di,ar,mat->lval)],alpha);
alpha_mul_2c(val_c,mat->values[index2(di,ar,mat->lval)],alpha);
for(ALPHA_INT bc = bcl;bc < bch;++bc){
alpha_madde(y[index2(ar,bc,ldy)],val,x[index2(ac,bc,ldx)]);
alpha_madde(y[index2(ac,bc,ldy)],val_c,x[index2(ar,bc,ldx)]);
}
}
}
if(d == 0){
for(ALPHA_INT r = 0; r < mat->rows; ++r){
ALPHA_Number val;
alpha_mul(val,mat->values[index2(di,r,mat->lval)],alpha);
for(ALPHA_INT bc = bcl;bc < bch;++bc){
alpha_madde(y[index2(r,bc,ldy)],val,x[index2(r,bc,ldx)]);
}
}
}
}
}
return ALPHA_SPARSE_STATUS_SUCCESS;
}
|
nanort.h | //
// NanoRT, single header only modern ray tracing kernel.
//
/*
The MIT License (MIT)
Copyright (c) 2015 - 2016 Light Transport Entertainment, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
#ifndef NANORT_H_
#define NANORT_H_
#include <algorithm>
#include <cassert>
#include <cmath>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <functional>
#include <limits>
#include <memory>
#include <queue>
#include <string>
#include <vector>
namespace nanort {
#ifdef __clang__
#pragma clang diagnostic push
#if __has_warning("-Wzero-as-null-pointer-constant")
#pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant"
#endif
#endif
// Parallelized BVH build is not yet fully tested,
// thus turn off if you face a problem when building BVH.
#define NANORT_ENABLE_PARALLEL_BUILD (1)
// ----------------------------------------------------------------------------
// Small vector class useful for multi-threaded environment.
//
// stack_container.h
//
// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// This allocator can be used with STL containers to provide a stack buffer
// from which to allocate memory and overflows onto the heap. This stack buffer
// would be allocated on the stack and allows us to avoid heap operations in
// some situations.
//
// STL likes to make copies of allocators, so the allocator itself can't hold
// the data. Instead, we make the creator responsible for creating a
// StackAllocator::Source which contains the data. Copying the allocator
// merely copies the pointer to this shared source, so all allocators created
// based on our allocator will share the same stack buffer.
//
// This stack buffer implementation is very simple. The first allocation that
// fits in the stack buffer will use the stack buffer. Any subsequent
// allocations will not use the stack buffer, even if there is unused room.
// This makes it appropriate for array-like containers, but the caller should
// be sure to reserve() in the container up to the stack buffer size. Otherwise
// the container will allocate a small array which will "use up" the stack
// buffer.
template <typename T, size_t stack_capacity>
class StackAllocator : public std::allocator<T> {
public:
typedef typename std::allocator<T>::pointer pointer;
typedef typename std::allocator<T>::size_type size_type;
// Backing store for the allocator. The container owner is responsible for
// maintaining this for as long as any containers using this allocator are
// live.
struct Source {
Source() : used_stack_buffer_(false) {}
// Casts the buffer in its right type.
T *stack_buffer() { return reinterpret_cast<T *>(stack_buffer_); }
const T *stack_buffer() const {
return reinterpret_cast<const T *>(stack_buffer_);
}
//
// IMPORTANT: Take care to ensure that stack_buffer_ is aligned
// since it is used to mimic an array of T.
// Be careful while declaring any unaligned types (like bool)
// before stack_buffer_.
//
// The buffer itself. It is not of type T because we don't want the
// constructors and destructors to be automatically called. Define a POD
// buffer of the right size instead.
char stack_buffer_[sizeof(T[stack_capacity])];
// Set when the stack buffer is used for an allocation. We do not track
// how much of the buffer is used, only that somebody is using it.
bool used_stack_buffer_;
};
// Used by containers when they want to refer to an allocator of type U.
template <typename U>
struct rebind {
typedef StackAllocator<U, stack_capacity> other;
};
// For the straight up copy c-tor, we can share storage.
StackAllocator(const StackAllocator<T, stack_capacity> &rhs)
: source_(rhs.source_) {}
// ISO C++ requires the following constructor to be defined,
// and std::vector in VC++2008SP1 Release fails with an error
// in the class _Container_base_aux_alloc_real (from <xutility>)
// if the constructor does not exist.
// For this constructor, we cannot share storage; there's
// no guarantee that the Source buffer of Ts is large enough
// for Us.
// TODO(Google): If we were fancy pants, perhaps we could share storage
// iff sizeof(T) == sizeof(U).
template <typename U, size_t other_capacity>
StackAllocator(const StackAllocator<U, other_capacity> &other)
: source_(NULL) {
(void)other;
}
explicit StackAllocator(Source *source) : source_(source) {}
// Actually do the allocation. Use the stack buffer if nobody has used it yet
// and the size requested fits. Otherwise, fall through to the standard
// allocator.
pointer allocate(size_type n, void *hint = 0) {
if (source_ != NULL && !source_->used_stack_buffer_ &&
n <= stack_capacity) {
source_->used_stack_buffer_ = true;
return source_->stack_buffer();
} else {
return std::allocator<T>::allocate(n, hint);
}
}
// Free: when trying to free the stack buffer, just mark it as free. For
// non-stack-buffer pointers, just fall though to the standard allocator.
void deallocate(pointer p, size_type n) {
if (source_ != NULL && p == source_->stack_buffer())
source_->used_stack_buffer_ = false;
else
std::allocator<T>::deallocate(p, n);
}
private:
Source *source_;
};
// A wrapper around STL containers that maintains a stack-sized buffer that the
// initial capacity of the vector is based on. Growing the container beyond the
// stack capacity will transparently overflow onto the heap. The container must
// support reserve().
//
// WATCH OUT: the ContainerType MUST use the proper StackAllocator for this
// type. This object is really intended to be used only internally. You'll want
// to use the wrappers below for different types.
template <typename TContainerType, int stack_capacity>
class StackContainer {
public:
typedef TContainerType ContainerType;
typedef typename ContainerType::value_type ContainedType;
typedef StackAllocator<ContainedType, stack_capacity> Allocator;
// Allocator must be constructed before the container!
StackContainer() : allocator_(&stack_data_), container_(allocator_) {
// Make the container use the stack allocation by reserving our buffer size
// before doing anything else.
container_.reserve(stack_capacity);
}
// Getters for the actual container.
//
// Danger: any copies of this made using the copy constructor must have
// shorter lifetimes than the source. The copy will share the same allocator
// and therefore the same stack buffer as the original. Use std::copy to
// copy into a "real" container for longer-lived objects.
ContainerType &container() { return container_; }
const ContainerType &container() const { return container_; }
// Support operator-> to get to the container. This allows nicer syntax like:
// StackContainer<...> foo;
// std::sort(foo->begin(), foo->end());
ContainerType *operator->() { return &container_; }
const ContainerType *operator->() const { return &container_; }
#ifdef UNIT_TEST
// Retrieves the stack source so that that unit tests can verify that the
// buffer is being used properly.
const typename Allocator::Source &stack_data() const { return stack_data_; }
#endif
protected:
typename Allocator::Source stack_data_;
unsigned char pad_[7];
Allocator allocator_;
ContainerType container_;
// DISALLOW_EVIL_CONSTRUCTORS(StackContainer);
StackContainer(const StackContainer &);
void operator=(const StackContainer &);
};
// StackVector
//
// Example:
// StackVector<int, 16> foo;
// foo->push_back(22); // we have overloaded operator->
// foo[0] = 10; // as well as operator[]
template <typename T, size_t stack_capacity>
class StackVector
: public StackContainer<std::vector<T, StackAllocator<T, stack_capacity> >,
stack_capacity> {
public:
StackVector()
: StackContainer<std::vector<T, StackAllocator<T, stack_capacity> >,
stack_capacity>() {}
// We need to put this in STL containers sometimes, which requires a copy
// constructor. We can't call the regular copy constructor because that will
// take the stack buffer from the original. Here, we create an empty object
// and make a stack buffer of its own.
StackVector(const StackVector<T, stack_capacity> &other)
: StackContainer<std::vector<T, StackAllocator<T, stack_capacity> >,
stack_capacity>() {
this->container().assign(other->begin(), other->end());
}
StackVector<T, stack_capacity> &operator=(
const StackVector<T, stack_capacity> &other) {
this->container().assign(other->begin(), other->end());
return *this;
}
// Vectors are commonly indexed, which isn't very convenient even with
// operator-> (using "->at()" does exception stuff we don't want).
T &operator[](size_t i) { return this->container().operator[](i); }
const T &operator[](size_t i) const {
return this->container().operator[](i);
}
};
// ----------------------------------------------------------------------------
template <typename T = float>
class real3 {
public:
real3() {}
real3(T x) {
v[0] = x;
v[1] = x;
v[2] = x;
}
real3(T xx, T yy, T zz) {
v[0] = xx;
v[1] = yy;
v[2] = zz;
}
explicit real3(const T *p) {
v[0] = p[0];
v[1] = p[1];
v[2] = p[2];
}
inline T x() const { return v[0]; }
inline T y() const { return v[1]; }
inline T z() const { return v[2]; }
real3 operator*(T f) const { return real3(x() * f, y() * f, z() * f); }
real3 operator-(const real3 &f2) const {
return real3(x() - f2.x(), y() - f2.y(), z() - f2.z());
}
real3 operator*(const real3 &f2) const {
return real3(x() * f2.x(), y() * f2.y(), z() * f2.z());
}
real3 operator+(const real3 &f2) const {
return real3(x() + f2.x(), y() + f2.y(), z() + f2.z());
}
real3 &operator+=(const real3 &f2) {
v[0] += f2.x();
v[1] += f2.y();
v[2] += f2.z();
return (*this);
}
real3 operator/(const real3 &f2) const {
return real3(x() / f2.x(), y() / f2.y(), z() / f2.z());
}
real3 operator-() const { return real3(-x(), -y(), -z()); }
T operator[](int i) const { return v[i]; }
T &operator[](int i) { return v[i]; }
T v[3];
// T pad; // for alignment(when T = float)
};
template <typename T>
inline real3<T> operator*(T f, const real3<T> &v) {
return real3<T>(v.x() * f, v.y() * f, v.z() * f);
}
template <typename T>
inline real3<T> vneg(const real3<T> &rhs) {
return real3<T>(-rhs.x(), -rhs.y(), -rhs.z());
}
template <typename T>
inline T vlength(const real3<T> &rhs) {
return std::sqrt(rhs.x() * rhs.x() + rhs.y() * rhs.y() + rhs.z() * rhs.z());
}
template <typename T>
inline real3<T> vnormalize(const real3<T> &rhs) {
real3<T> v = rhs;
T len = vlength(rhs);
if (std::fabs(len) > static_cast<T>(1.0e-6)) {
T inv_len = static_cast<T>(1.0) / len;
v.v[0] *= inv_len;
v.v[1] *= inv_len;
v.v[2] *= inv_len;
}
return v;
}
template <typename T>
inline real3<T> vcross(real3<T> a, real3<T> b) {
real3<T> c;
c[0] = a[1] * b[2] - a[2] * b[1];
c[1] = a[2] * b[0] - a[0] * b[2];
c[2] = a[0] * b[1] - a[1] * b[0];
return c;
}
template <typename T>
inline T vdot(real3<T> a, real3<T> b) {
return a[0] * b[0] + a[1] * b[1] + a[2] * b[2];
}
template <typename real>
inline const real *get_vertex_addr(const real *p, const size_t idx,
const size_t stride_bytes) {
return reinterpret_cast<const real *>(
reinterpret_cast<const unsigned char *>(p) + idx * stride_bytes);
}
template <typename T = float>
class Ray {
public:
Ray() : min_t(static_cast<T>(0.0)), max_t(std::numeric_limits<T>::max()) {
org[0] = static_cast<T>(0.0);
org[1] = static_cast<T>(0.0);
org[2] = static_cast<T>(0.0);
dir[0] = static_cast<T>(0.0);
dir[1] = static_cast<T>(0.0);
dir[2] = static_cast<T>(-1.0);
}
T org[3]; // must set
T dir[3]; // must set
T min_t; // minimum ray hit distance.
T max_t; // maximum ray hit distance.
T inv_dir[3]; // filled internally
int dir_sign[3]; // filled internally
};
template <typename T = float>
class BVHNode {
public:
BVHNode() {}
BVHNode(const BVHNode &rhs) {
bmin[0] = rhs.bmin[0];
bmin[1] = rhs.bmin[1];
bmin[2] = rhs.bmin[2];
flag = rhs.flag;
bmax[0] = rhs.bmax[0];
bmax[1] = rhs.bmax[1];
bmax[2] = rhs.bmax[2];
axis = rhs.axis;
data[0] = rhs.data[0];
data[1] = rhs.data[1];
}
BVHNode &operator=(const BVHNode &rhs) {
bmin[0] = rhs.bmin[0];
bmin[1] = rhs.bmin[1];
bmin[2] = rhs.bmin[2];
flag = rhs.flag;
bmax[0] = rhs.bmax[0];
bmax[1] = rhs.bmax[1];
bmax[2] = rhs.bmax[2];
axis = rhs.axis;
data[0] = rhs.data[0];
data[1] = rhs.data[1];
return (*this);
}
~BVHNode() {}
T bmin[3];
T bmax[3];
int flag; // 1 = leaf node, 0 = branch node
int axis;
// leaf
// data[0] = npoints
// data[1] = index
//
// branch
// data[0] = child[0]
// data[1] = child[1]
unsigned int data[2];
};
template <class H>
class IntersectComparator {
public:
bool operator()(const H &a, const H &b) const { return a.t < b.t; }
};
/// BVH build option.
template <typename T = float>
struct BVHBuildOptions {
T cost_t_aabb;
unsigned int min_leaf_primitives;
unsigned int max_tree_depth;
unsigned int bin_size;
unsigned int shallow_depth;
unsigned int min_primitives_for_parallel_build;
// Cache bounding box computation.
// Requires more memory, but BVHbuild can be faster.
bool cache_bbox;
unsigned char pad[3];
// Set default value: Taabb = 0.2
BVHBuildOptions()
: cost_t_aabb(0.2f),
min_leaf_primitives(4),
max_tree_depth(256),
bin_size(64),
shallow_depth(3),
min_primitives_for_parallel_build(1024 * 128),
cache_bbox(false) {}
};
/// BVH build statistics.
class BVHBuildStatistics {
public:
unsigned int max_tree_depth;
unsigned int num_leaf_nodes;
unsigned int num_branch_nodes;
float build_secs;
// Set default value: Taabb = 0.2
BVHBuildStatistics()
: max_tree_depth(0),
num_leaf_nodes(0),
num_branch_nodes(0),
build_secs(0.0f) {}
};
/// BVH trace option.
class BVHTraceOptions {
public:
// Hit only for face IDs in indexRange.
// This feature is good to mimic something like glDrawArrays()
unsigned int prim_ids_range[2];
bool cull_back_face;
unsigned char pad[3]; ///< Padding(not used)
BVHTraceOptions() {
prim_ids_range[0] = 0;
prim_ids_range[1] = 0x7FFFFFFF; // Up to 2G face IDs.
cull_back_face = false;
}
};
template <typename T>
class BBox {
public:
real3<T> bmin;
real3<T> bmax;
BBox() {
bmin[0] = bmin[1] = bmin[2] = std::numeric_limits<T>::max();
bmax[0] = bmax[1] = bmax[2] = -std::numeric_limits<T>::max();
}
};
template <typename T>
class NodeHit {
public:
NodeHit()
: t_min(std::numeric_limits<T>::max()),
t_max(-std::numeric_limits<T>::max()),
node_id(static_cast<unsigned int>(-1)) {}
NodeHit(const NodeHit<T> &rhs) {
t_min = rhs.t_min;
t_max = rhs.t_max;
node_id = rhs.node_id;
}
NodeHit &operator=(const NodeHit<T> &rhs) {
t_min = rhs.t_min;
t_max = rhs.t_max;
node_id = rhs.node_id;
return (*this);
}
~NodeHit() {}
T t_min;
T t_max;
unsigned int node_id;
};
template <typename T>
class NodeHitComparator {
public:
inline bool operator()(const NodeHit<T> &a, const NodeHit<T> &b) {
return a.t_min < b.t_min;
}
};
template <typename T>
class BVHAccel {
public:
BVHAccel() : pad0_(0) { (void)pad0_; }
~BVHAccel() {}
///
/// Build BVH for input primitives.
///
template <class P, class Pred>
bool Build(const unsigned int num_primitives, const P &p, const Pred &pred,
const BVHBuildOptions<T> &options = BVHBuildOptions<T>());
///
/// Get statistics of built BVH tree. Valid after Build()
///
BVHBuildStatistics GetStatistics() const { return stats_; }
///
/// Dump built BVH to the file.
///
bool Dump(const char *filename);
///
/// Load BVH binary
///
bool Load(const char *filename);
void Debug();
///
/// Traverse into BVH along ray and find closest hit point & primitive if
/// found
///
template <class I, class H>
bool Traverse(const Ray<T> &ray, const I &intersector, H *isect,
const BVHTraceOptions &options = BVHTraceOptions()) const;
#if 0
/// Multi-hit ray traversal
/// Returns `max_intersections` frontmost intersections
template<class I, class H, class Comp>
bool MultiHitTraverse(const Ray<T> &ray,
int max_intersections,
const I &intersector,
StackVector<H, 128> *isects,
const BVHTraceOptions &options = BVHTraceOptions()) const;
#endif
///
/// List up nodes which intersects along the ray.
/// This function is useful for two-level BVH traversal.
///
template <class I>
bool ListNodeIntersections(const Ray<T> &ray, int max_intersections,
const I &intersector,
StackVector<NodeHit<T>, 128> *hits) const;
const std::vector<BVHNode<T> > &GetNodes() const { return nodes_; }
const std::vector<unsigned int> &GetIndices() const { return indices_; }
///
/// Returns bounding box of built BVH.
///
void BoundingBox(T bmin[3], T bmax[3]) const {
if (nodes_.empty()) {
bmin[0] = bmin[1] = bmin[2] = std::numeric_limits<T>::max();
bmax[0] = bmax[1] = bmax[2] = -std::numeric_limits<T>::max();
} else {
bmin[0] = nodes_[0].bmin[0];
bmin[1] = nodes_[0].bmin[1];
bmin[2] = nodes_[0].bmin[2];
bmax[0] = nodes_[0].bmax[0];
bmax[1] = nodes_[0].bmax[1];
bmax[2] = nodes_[0].bmax[2];
}
}
bool IsValid() const { return nodes_.size() > 0; }
private:
#if NANORT_ENABLE_PARALLEL_BUILD
typedef struct {
unsigned int left_idx;
unsigned int right_idx;
unsigned int offset;
} ShallowNodeInfo;
// Used only during BVH construction
std::vector<ShallowNodeInfo> shallow_node_infos_;
/// Builds shallow BVH tree recursively.
template <class P, class Pred>
unsigned int BuildShallowTree(std::vector<BVHNode<T> > *out_nodes,
unsigned int left_idx, unsigned int right_idx,
unsigned int depth,
unsigned int max_shallow_depth, const P &p,
const Pred &pred);
#endif
/// Builds BVH tree recursively.
template <class P, class Pred>
unsigned int BuildTree(BVHBuildStatistics *out_stat,
std::vector<BVHNode<T> > *out_nodes,
unsigned int left_idx, unsigned int right_idx,
unsigned int depth, const P &p, const Pred &pred);
template <class I>
bool TestLeafNode(const BVHNode<T> &node, const Ray<T> &ray,
const I &intersector) const;
template <class I>
bool TestLeafNodeIntersections(
const BVHNode<T> &node, const Ray<T> &ray, const int max_intersections,
const I &intersector,
std::priority_queue<NodeHit<T>, std::vector<NodeHit<T> >,
NodeHitComparator<T> > *isect_pq) const;
#if 0
template<class I, class H, class Comp>
bool MultiHitTestLeafNode(std::priority_queue<H, std::vector<H>, Comp> *isect_pq,
int max_intersections,
const BVHNode<T> &node, const Ray<T> &ray,
const I &intersector) const;
#endif
std::vector<BVHNode<T> > nodes_;
std::vector<unsigned int> indices_; // max 4G triangles.
std::vector<BBox<T> > bboxes_;
BVHBuildOptions<T> options_;
BVHBuildStatistics stats_;
unsigned int pad0_;
};
// Predefined SAH predicator for triangle.
template <typename T = float>
class TriangleSAHPred {
public:
TriangleSAHPred(
const T *vertices, const unsigned int *faces,
size_t vertex_stride_bytes) // e.g. 12 for sizeof(float) * XYZ
: axis_(0),
pos_(0.0f),
vertices_(vertices),
faces_(faces),
vertex_stride_bytes_(vertex_stride_bytes) {}
void Set(int axis, T pos) const {
axis_ = axis;
pos_ = pos;
}
bool operator()(unsigned int i) const {
int axis = axis_;
T pos = pos_;
unsigned int i0 = faces_[3 * i + 0];
unsigned int i1 = faces_[3 * i + 1];
unsigned int i2 = faces_[3 * i + 2];
real3<T> p0(get_vertex_addr<T>(vertices_, i0, vertex_stride_bytes_));
real3<T> p1(get_vertex_addr<T>(vertices_, i1, vertex_stride_bytes_));
real3<T> p2(get_vertex_addr<T>(vertices_, i2, vertex_stride_bytes_));
T center = p0[axis] + p1[axis] + p2[axis];
return (center < pos * static_cast<T>(3.0));
}
private:
mutable int axis_;
mutable T pos_;
const T *vertices_;
const unsigned int *faces_;
const size_t vertex_stride_bytes_;
};
// Predefined Triangle mesh geometry.
template <typename T = float>
class TriangleMesh {
public:
TriangleMesh(
const T *vertices, const unsigned int *faces,
const size_t vertex_stride_bytes) // e.g. 12 for sizeof(float) * XYZ
: vertices_(vertices),
faces_(faces),
vertex_stride_bytes_(vertex_stride_bytes) {}
/// Compute bounding box for `prim_index`th triangle.
/// This function is called for each primitive in BVH build.
void BoundingBox(real3<T> *bmin, real3<T> *bmax,
unsigned int prim_index) const {
(*bmin)[0] = get_vertex_addr(vertices_, faces_[3 * prim_index + 0],
vertex_stride_bytes_)[0];
(*bmin)[1] = get_vertex_addr(vertices_, faces_[3 * prim_index + 0],
vertex_stride_bytes_)[1];
(*bmin)[2] = get_vertex_addr(vertices_, faces_[3 * prim_index + 0],
vertex_stride_bytes_)[2];
(*bmax)[0] = get_vertex_addr(vertices_, faces_[3 * prim_index + 0],
vertex_stride_bytes_)[0];
(*bmax)[1] = get_vertex_addr(vertices_, faces_[3 * prim_index + 0],
vertex_stride_bytes_)[1];
(*bmax)[2] = get_vertex_addr(vertices_, faces_[3 * prim_index + 0],
vertex_stride_bytes_)[2];
for (unsigned int i = 1; i < 3; i++) {
for (unsigned int k = 0; k < 3; k++) {
if ((*bmin)[static_cast<int>(k)] >
get_vertex_addr<T>(vertices_, faces_[3 * prim_index + i],
vertex_stride_bytes_)[k]) {
(*bmin)[static_cast<int>(k)] = get_vertex_addr<T>(
vertices_, faces_[3 * prim_index + i], vertex_stride_bytes_)[k];
}
if ((*bmax)[static_cast<int>(k)] <
get_vertex_addr<T>(vertices_, faces_[3 * prim_index + i],
vertex_stride_bytes_)[k]) {
(*bmax)[static_cast<int>(k)] = get_vertex_addr<T>(
vertices_, faces_[3 * prim_index + i], vertex_stride_bytes_)[k];
}
}
}
}
const T *vertices_;
const unsigned int *faces_;
const size_t vertex_stride_bytes_;
};
template <typename T = float>
class TriangleIntersection {
public:
T u;
T v;
// Required member variables.
T t;
unsigned int prim_id;
};
template <typename T = float, class H = TriangleIntersection<T> >
class TriangleIntersector {
public:
TriangleIntersector(const T *vertices, const unsigned int *faces,
const size_t vertex_stride_bytes) // e.g.
// vertex_stride_bytes
// = 12 = sizeof(float)
// * 3
: vertices_(vertices),
faces_(faces),
vertex_stride_bytes_(vertex_stride_bytes) {}
// For Watertight Ray/Triangle Intersection.
typedef struct {
T Sx;
T Sy;
T Sz;
int kx;
int ky;
int kz;
} RayCoeff;
/// Do ray interesection stuff for `prim_index` th primitive and return hit
/// distance `t`,
/// varycentric coordinate `u` and `v`.
/// Returns true if there's intersection.
bool Intersect(T *t_inout, const unsigned int prim_index) const {
if ((prim_index < trace_options_.prim_ids_range[0]) ||
(prim_index >= trace_options_.prim_ids_range[1])) {
return false;
}
const unsigned int f0 = faces_[3 * prim_index + 0];
const unsigned int f1 = faces_[3 * prim_index + 1];
const unsigned int f2 = faces_[3 * prim_index + 2];
const real3<T> p0(get_vertex_addr(vertices_, f0 + 0, vertex_stride_bytes_));
const real3<T> p1(get_vertex_addr(vertices_, f1 + 0, vertex_stride_bytes_));
const real3<T> p2(get_vertex_addr(vertices_, f2 + 0, vertex_stride_bytes_));
const real3<T> A = p0 - ray_org_;
const real3<T> B = p1 - ray_org_;
const real3<T> C = p2 - ray_org_;
const T Ax = A[ray_coeff_.kx] - ray_coeff_.Sx * A[ray_coeff_.kz];
const T Ay = A[ray_coeff_.ky] - ray_coeff_.Sy * A[ray_coeff_.kz];
const T Bx = B[ray_coeff_.kx] - ray_coeff_.Sx * B[ray_coeff_.kz];
const T By = B[ray_coeff_.ky] - ray_coeff_.Sy * B[ray_coeff_.kz];
const T Cx = C[ray_coeff_.kx] - ray_coeff_.Sx * C[ray_coeff_.kz];
const T Cy = C[ray_coeff_.ky] - ray_coeff_.Sy * C[ray_coeff_.kz];
T U = Cx * By - Cy * Bx;
T V = Ax * Cy - Ay * Cx;
T W = Bx * Ay - By * Ax;
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wfloat-equal"
#endif
// Fall back to test against edges using double precision.
if (U == static_cast<T>(0.0) || V == static_cast<T>(0.0) || W == static_cast<T>(0.0)) {
double CxBy = static_cast<double>(Cx) * static_cast<double>(By);
double CyBx = static_cast<double>(Cy) * static_cast<double>(Bx);
U = static_cast<T>(CxBy - CyBx);
double AxCy = static_cast<double>(Ax) * static_cast<double>(Cy);
double AyCx = static_cast<double>(Ay) * static_cast<double>(Cx);
V = static_cast<T>(AxCy - AyCx);
double BxAy = static_cast<double>(Bx) * static_cast<double>(Ay);
double ByAx = static_cast<double>(By) * static_cast<double>(Ax);
W = static_cast<T>(BxAy - ByAx);
}
if (trace_options_.cull_back_face) {
if (U < static_cast<T>(0.0) || V < static_cast<T>(0.0) || W < static_cast<T>(0.0)) return false;
} else {
if ((U < static_cast<T>(0.0) || V < static_cast<T>(0.0) || W < static_cast<T>(0.0)) && (U > static_cast<T>(0.0) || V > static_cast<T>(0.0) || W > static_cast<T>(0.0))) {
return false;
}
}
T det = U + V + W;
if (det == static_cast<T>(0.0)) return false;
#ifdef __clang__
#pragma clang diagnostic pop
#endif
const T Az = ray_coeff_.Sz * A[ray_coeff_.kz];
const T Bz = ray_coeff_.Sz * B[ray_coeff_.kz];
const T Cz = ray_coeff_.Sz * C[ray_coeff_.kz];
const T D = U * Az + V * Bz + W * Cz;
const T rcpDet = static_cast<T>(1.0) / det;
T tt = D * rcpDet;
if (tt > (*t_inout)) {
return false;
}
if (tt < t_min_) {
return false;
}
(*t_inout) = tt;
// Use Thomas-Mueller style barycentric coord.
// U + V + W = 1.0 and interp(p) = U * p0 + V * p1 + W * p2
// We want interp(p) = (1 - u - v) * p0 + u * v1 + v * p2;
// => u = V, v = W.
u_ = V * rcpDet;
v_ = W * rcpDet;
return true;
}
/// Returns the nearest hit distance.
T GetT() const { return t_; }
/// Update is called when initializing intesection and nearest hit is found.
void Update(T t, unsigned int prim_idx) const {
t_ = t;
prim_id_ = prim_idx;
}
/// Prepare BVH traversal(e.g. compute inverse ray direction)
/// This function is called only once in BVH traversal.
void PrepareTraversal(const Ray<T> &ray,
const BVHTraceOptions &trace_options) const {
ray_org_[0] = ray.org[0];
ray_org_[1] = ray.org[1];
ray_org_[2] = ray.org[2];
// Calculate dimension where the ray direction is maximal.
ray_coeff_.kz = 0;
T absDir = std::fabs(ray.dir[0]);
if (absDir < std::fabs(ray.dir[1])) {
ray_coeff_.kz = 1;
absDir = std::fabs(ray.dir[1]);
}
if (absDir < std::fabs(ray.dir[2])) {
ray_coeff_.kz = 2;
absDir = std::fabs(ray.dir[2]);
}
ray_coeff_.kx = ray_coeff_.kz + 1;
if (ray_coeff_.kx == 3) ray_coeff_.kx = 0;
ray_coeff_.ky = ray_coeff_.kx + 1;
if (ray_coeff_.ky == 3) ray_coeff_.ky = 0;
// Swap kx and ky dimention to preserve widing direction of triangles.
if (ray.dir[ray_coeff_.kz] < 0.0f) std::swap(ray_coeff_.kx, ray_coeff_.ky);
// Claculate shear constants.
ray_coeff_.Sx = ray.dir[ray_coeff_.kx] / ray.dir[ray_coeff_.kz];
ray_coeff_.Sy = ray.dir[ray_coeff_.ky] / ray.dir[ray_coeff_.kz];
ray_coeff_.Sz = 1.0f / ray.dir[ray_coeff_.kz];
trace_options_ = trace_options;
t_min_ = ray.min_t;
u_ = 0.0f;
v_ = 0.0f;
}
/// Post BVH traversal stuff.
/// Fill `isect` if there is a hit.
void PostTraversal(const Ray<T> &ray, bool hit, H *isect) const {
if (hit && isect) {
(*isect).t = t_;
(*isect).u = u_;
(*isect).v = v_;
(*isect).prim_id = prim_id_;
}
(void)ray;
}
private:
const T *vertices_;
const unsigned int *faces_;
const size_t vertex_stride_bytes_;
mutable real3<T> ray_org_;
mutable RayCoeff ray_coeff_;
mutable BVHTraceOptions trace_options_;
mutable T t_min_;
mutable T t_;
mutable T u_;
mutable T v_;
mutable unsigned int prim_id_;
int _pad_;
};
//
// Robust BVH Ray Traversal : http://jcgt.org/published/0002/02/02/paper.pdf
//
// NaN-safe min and max function.
template <class T>
const T &safemin(const T &a, const T &b) {
return (a < b) ? a : b;
}
template <class T>
const T &safemax(const T &a, const T &b) {
return (a > b) ? a : b;
}
//
// SAH functions
//
struct BinBuffer {
explicit BinBuffer(unsigned int size) {
bin_size = size;
bin.resize(2 * 3 * size);
clear();
}
void clear() { memset(&bin[0], 0, sizeof(size_t) * 2 * 3 * bin_size); }
std::vector<size_t> bin; // (min, max) * xyz * binsize
unsigned int bin_size;
unsigned int pad0;
};
template <typename T>
inline T CalculateSurfaceArea(const real3<T> &min, const real3<T> &max) {
real3<T> box = max - min;
return static_cast<T>(2.0) *
(box[0] * box[1] + box[1] * box[2] + box[2] * box[0]);
}
template <typename T>
inline void GetBoundingBoxOfTriangle(real3<T> *bmin, real3<T> *bmax,
const T *vertices,
const unsigned int *faces,
unsigned int index) {
unsigned int f0 = faces[3 * index + 0];
unsigned int f1 = faces[3 * index + 1];
unsigned int f2 = faces[3 * index + 2];
real3<T> p[3];
p[0] = real3<T>(&vertices[3 * f0]);
p[1] = real3<T>(&vertices[3 * f1]);
p[2] = real3<T>(&vertices[3 * f2]);
(*bmin) = p[0];
(*bmax) = p[0];
for (int i = 1; i < 3; i++) {
(*bmin)[0] = std::min((*bmin)[0], p[i][0]);
(*bmin)[1] = std::min((*bmin)[1], p[i][1]);
(*bmin)[2] = std::min((*bmin)[2], p[i][2]);
(*bmax)[0] = std::max((*bmax)[0], p[i][0]);
(*bmax)[1] = std::max((*bmax)[1], p[i][1]);
(*bmax)[2] = std::max((*bmax)[2], p[i][2]);
}
}
template <typename T, class P>
inline void ContributeBinBuffer(BinBuffer *bins, // [out]
const real3<T> &scene_min,
const real3<T> &scene_max,
unsigned int *indices, unsigned int left_idx,
unsigned int right_idx, const P &p) {
T bin_size = static_cast<T>(bins->bin_size);
// Calculate extent
real3<T> scene_size, scene_inv_size;
scene_size = scene_max - scene_min;
for (int i = 0; i < 3; ++i) {
assert(scene_size[i] >= static_cast<T>(0.0));
if (scene_size[i] > static_cast<T>(0.0)) {
scene_inv_size[i] = bin_size / scene_size[i];
} else {
scene_inv_size[i] = static_cast<T>(0.0);
}
}
// Clear bin data
std::fill(bins->bin.begin(), bins->bin.end(), 0);
// memset(&bins->bin[0], 0, sizeof(2 * 3 * bins->bin_size));
size_t idx_bmin[3];
size_t idx_bmax[3];
for (size_t i = left_idx; i < right_idx; i++) {
//
// Quantize the position into [0, BIN_SIZE)
//
// q[i] = (int)(p[i] - scene_bmin) / scene_size
//
real3<T> bmin;
real3<T> bmax;
p.BoundingBox(&bmin, &bmax, indices[i]);
// GetBoundingBoxOfTriangle(&bmin, &bmax, vertices, faces, indices[i]);
real3<T> quantized_bmin = (bmin - scene_min) * scene_inv_size;
real3<T> quantized_bmax = (bmax - scene_min) * scene_inv_size;
// idx is now in [0, BIN_SIZE)
for (int j = 0; j < 3; ++j) {
int q0 = static_cast<int>(quantized_bmin[j]);
if (q0 < 0) q0 = 0;
int q1 = static_cast<int>(quantized_bmax[j]);
if (q1 < 0) q1 = 0;
idx_bmin[j] = static_cast<unsigned int>(q0);
idx_bmax[j] = static_cast<unsigned int>(q1);
if (idx_bmin[j] >= bin_size)
idx_bmin[j] = static_cast<unsigned int>(bin_size) - 1;
if (idx_bmax[j] >= bin_size)
idx_bmax[j] = static_cast<unsigned int>(bin_size) - 1;
assert(idx_bmin[j] < bin_size);
assert(idx_bmax[j] < bin_size);
// Increment bin counter
bins->bin[0 * (bins->bin_size * 3) +
static_cast<size_t>(j) * bins->bin_size + idx_bmin[j]] += 1;
bins->bin[1 * (bins->bin_size * 3) +
static_cast<size_t>(j) * bins->bin_size + idx_bmax[j]] += 1;
}
}
}
template <typename T>
inline T SAH(size_t ns1, T leftArea, size_t ns2, T rightArea, T invS, T Taabb,
T Ttri) {
T sah;
sah = static_cast<T>(2.0) * Taabb +
(leftArea * invS) * static_cast<T>(ns1) * Ttri +
(rightArea * invS) * static_cast<T>(ns2) * Ttri;
return sah;
}
template <typename T>
inline bool FindCutFromBinBuffer(T *cut_pos, // [out] xyz
int *minCostAxis, // [out]
const BinBuffer *bins, const real3<T> &bmin,
const real3<T> &bmax, size_t num_primitives,
T costTaabb) { // should be in [0.0, 1.0]
const T kEPS = std::numeric_limits<T>::epsilon(); // * epsScale;
size_t left, right;
real3<T> bsize, bstep;
real3<T> bminLeft, bmaxLeft;
real3<T> bminRight, bmaxRight;
T saLeft, saRight, saTotal;
T pos;
T minCost[3];
T costTtri = static_cast<T>(1.0) - costTaabb;
(*minCostAxis) = 0;
bsize = bmax - bmin;
bstep = bsize * (static_cast<T>(1.0) / bins->bin_size);
saTotal = CalculateSurfaceArea(bmin, bmax);
T invSaTotal = static_cast<T>(0.0);
if (saTotal > kEPS) {
invSaTotal = static_cast<T>(1.0) / saTotal;
}
for (int j = 0; j < 3; ++j) {
//
// Compute SAH cost for the right side of each cell of the bbox.
// Exclude both extreme side of the bbox.
//
// i: 0 1 2 3
// +----+----+----+----+----+
// | | | | | |
// +----+----+----+----+----+
//
T minCostPos = bmin[j] + static_cast<T>(1.0) * bstep[j];
minCost[j] = std::numeric_limits<T>::max();
left = 0;
right = num_primitives;
bminLeft = bminRight = bmin;
bmaxLeft = bmaxRight = bmax;
for (int i = 0; i < static_cast<int>(bins->bin_size) - 1; ++i) {
left += bins->bin[0 * (3 * bins->bin_size) +
static_cast<size_t>(j) * bins->bin_size +
static_cast<size_t>(i)];
right -= bins->bin[1 * (3 * bins->bin_size) +
static_cast<size_t>(j) * bins->bin_size +
static_cast<size_t>(i)];
assert(left <= num_primitives);
assert(right <= num_primitives);
//
// Split pos bmin + (i + 1) * (bsize / BIN_SIZE)
// +1 for i since we want a position on right side of the cell.
//
pos = bmin[j] + (i + static_cast<T>(1.0)) * bstep[j];
bmaxLeft[j] = pos;
bminRight[j] = pos;
saLeft = CalculateSurfaceArea(bminLeft, bmaxLeft);
saRight = CalculateSurfaceArea(bminRight, bmaxRight);
T cost =
SAH(left, saLeft, right, saRight, invSaTotal, costTaabb, costTtri);
if (cost < minCost[j]) {
//
// Update the min cost
//
minCost[j] = cost;
minCostPos = pos;
// minCostAxis = j;
}
}
cut_pos[j] = minCostPos;
}
// cut_axis = minCostAxis;
// cut_pos = minCostPos;
// Find min cost axis
T cost = minCost[0];
(*minCostAxis) = 0;
if (cost > minCost[1]) {
(*minCostAxis) = 1;
cost = minCost[1];
}
if (cost > minCost[2]) {
(*minCostAxis) = 2;
cost = minCost[2];
}
return true;
}
#ifdef _OPENMP
template <typename T, class P>
void ComputeBoundingBoxOMP(real3<T> *bmin, real3<T> *bmax,
const unsigned int *indices, unsigned int left_index,
unsigned int right_index, const P &p) {
{ p.BoundingBox(bmin, bmax, indices[left_index]); }
T local_bmin[3] = {(*bmin)[0], (*bmin)[1], (*bmin)[2]};
T local_bmax[3] = {(*bmax)[0], (*bmax)[1], (*bmax)[2]};
unsigned int n = right_index - left_index;
#pragma omp parallel firstprivate(local_bmin, local_bmax) if (n > (1024 * 128))
{
#pragma omp for
for (int i = left_index; i < right_index; i++) { // for each faces
unsigned int idx = indices[i];
real3<T> bbox_min, bbox_max;
p.BoundingBox(&bbox_min, &bbox_max, idx);
for (int k = 0; k < 3; k++) { // xyz
if ((*bmin)[k] > bbox_min[k]) (*bmin)[k] = bbox_min[k];
if ((*bmax)[k] < bbox_max[k]) (*bmax)[k] = bbox_max[k];
}
}
#pragma omp critical
{
for (int k = 0; k < 3; k++) {
if (local_bmin[k] < (*bmin)[k]) {
{
if (local_bmin[k] < (*bmin)[k]) (*bmin)[k] = local_bmin[k];
}
}
if (local_bmax[k] > (*bmax)[k]) {
{
if (local_bmax[k] > (*bmax)[k]) (*bmax)[k] = local_bmax[k];
}
}
}
}
}
}
#endif
template <typename T, class P>
inline void ComputeBoundingBox(real3<T> *bmin, real3<T> *bmax,
const unsigned int *indices,
unsigned int left_index,
unsigned int right_index, const P &p) {
{
unsigned int idx = indices[left_index];
p.BoundingBox(bmin, bmax, idx);
}
{
for (unsigned int i = left_index + 1; i < right_index;
i++) { // for each primitives
unsigned int idx = indices[i];
real3<T> bbox_min, bbox_max;
p.BoundingBox(&bbox_min, &bbox_max, idx);
for (int k = 0; k < 3; k++) { // xyz
if ((*bmin)[k] > bbox_min[k]) (*bmin)[k] = bbox_min[k];
if ((*bmax)[k] < bbox_max[k]) (*bmax)[k] = bbox_max[k];
}
}
}
}
template <typename T>
inline void GetBoundingBox(real3<T> *bmin, real3<T> *bmax,
const std::vector<BBox<T> > &bboxes,
unsigned int *indices, unsigned int left_index,
unsigned int right_index) {
{
unsigned int i = left_index;
unsigned int idx = indices[i];
(*bmin)[0] = bboxes[idx].bmin[0];
(*bmin)[1] = bboxes[idx].bmin[1];
(*bmin)[2] = bboxes[idx].bmin[2];
(*bmax)[0] = bboxes[idx].bmax[0];
(*bmax)[1] = bboxes[idx].bmax[1];
(*bmax)[2] = bboxes[idx].bmax[2];
}
T local_bmin[3] = {(*bmin)[0], (*bmin)[1], (*bmin)[2]};
T local_bmax[3] = {(*bmax)[0], (*bmax)[1], (*bmax)[2]};
{
for (unsigned int i = left_index; i < right_index; i++) { // for each faces
unsigned int idx = indices[i];
for (int k = 0; k < 3; k++) { // xyz
T minval = bboxes[idx].bmin[k];
T maxval = bboxes[idx].bmax[k];
if (local_bmin[k] > minval) local_bmin[k] = minval;
if (local_bmax[k] < maxval) local_bmax[k] = maxval;
}
}
for (int k = 0; k < 3; k++) {
(*bmin)[k] = local_bmin[k];
(*bmax)[k] = local_bmax[k];
}
}
}
//
// --
//
#if NANORT_ENABLE_PARALLEL_BUILD
template <typename T>
template <class P, class Pred>
unsigned int BVHAccel<T>::BuildShallowTree(std::vector<BVHNode<T> > *out_nodes,
unsigned int left_idx,
unsigned int right_idx,
unsigned int depth,
unsigned int max_shallow_depth,
const P &p, const Pred &pred) {
assert(left_idx <= right_idx);
unsigned int offset = static_cast<unsigned int>(out_nodes->size());
if (stats_.max_tree_depth < depth) {
stats_.max_tree_depth = depth;
}
real3<T> bmin, bmax;
ComputeBoundingBox(&bmin, &bmax, &indices_.at(0), left_idx, right_idx, p);
unsigned int n = right_idx - left_idx;
if ((n <= options_.min_leaf_primitives) ||
(depth >= options_.max_tree_depth)) {
// Create leaf node.
BVHNode<T> leaf;
leaf.bmin[0] = bmin[0];
leaf.bmin[1] = bmin[1];
leaf.bmin[2] = bmin[2];
leaf.bmax[0] = bmax[0];
leaf.bmax[1] = bmax[1];
leaf.bmax[2] = bmax[2];
assert(left_idx < std::numeric_limits<unsigned int>::max());
leaf.flag = 1; // leaf
leaf.data[0] = n;
leaf.data[1] = left_idx;
out_nodes->push_back(leaf); // atomic update
stats_.num_leaf_nodes++;
return offset;
}
//
// Create branch node.
//
if (depth >= max_shallow_depth) {
// Delay to build tree
ShallowNodeInfo info;
info.left_idx = left_idx;
info.right_idx = right_idx;
info.offset = offset;
shallow_node_infos_.push_back(info);
// Add dummy node.
BVHNode<T> node;
node.axis = -1;
node.flag = -1;
out_nodes->push_back(node);
return offset;
} else {
//
// Compute SAH and find best split axis and position
//
int min_cut_axis = 0;
T cut_pos[3] = {0.0, 0.0, 0.0};
BinBuffer bins(options_.bin_size);
ContributeBinBuffer(&bins, bmin, bmax, &indices_.at(0), left_idx, right_idx,
p);
FindCutFromBinBuffer(cut_pos, &min_cut_axis, &bins, bmin, bmax, n,
options_.cost_t_aabb);
// Try all 3 axis until good cut position avaiable.
unsigned int mid_idx = left_idx;
int cut_axis = min_cut_axis;
for (int axis_try = 0; axis_try < 3; axis_try++) {
unsigned int *begin = &indices_[left_idx];
unsigned int *end =
&indices_[right_idx - 1] + 1; // mimics end() iterator.
unsigned int *mid = 0;
// try min_cut_axis first.
cut_axis = (min_cut_axis + axis_try) % 3;
// @fixme { We want some thing like: std::partition(begin, end,
// pred(cut_axis, cut_pos[cut_axis])); }
pred.Set(cut_axis, cut_pos[cut_axis]);
//
// Split at (cut_axis, cut_pos)
// indices_ will be modified.
//
mid = std::partition(begin, end, pred);
mid_idx = left_idx + static_cast<unsigned int>((mid - begin));
if ((mid_idx == left_idx) || (mid_idx == right_idx)) {
// Can't split well.
// Switch to object median(which may create unoptimized tree, but
// stable)
mid_idx = left_idx + (n >> 1);
// Try another axis if there's axis to try.
} else {
// Found good cut. exit loop.
break;
}
}
BVHNode<T> node;
node.axis = cut_axis;
node.flag = 0; // 0 = branch
out_nodes->push_back(node);
unsigned int left_child_index = 0;
unsigned int right_child_index = 0;
left_child_index = BuildShallowTree(out_nodes, left_idx, mid_idx, depth + 1,
max_shallow_depth, p, pred);
right_child_index = BuildShallowTree(out_nodes, mid_idx, right_idx,
depth + 1, max_shallow_depth, p, pred);
(*out_nodes)[offset].data[0] = left_child_index;
(*out_nodes)[offset].data[1] = right_child_index;
(*out_nodes)[offset].bmin[0] = bmin[0];
(*out_nodes)[offset].bmin[1] = bmin[1];
(*out_nodes)[offset].bmin[2] = bmin[2];
(*out_nodes)[offset].bmax[0] = bmax[0];
(*out_nodes)[offset].bmax[1] = bmax[1];
(*out_nodes)[offset].bmax[2] = bmax[2];
}
stats_.num_branch_nodes++;
return offset;
}
#endif
template <typename T>
template <class P, class Pred>
unsigned int BVHAccel<T>::BuildTree(BVHBuildStatistics *out_stat,
std::vector<BVHNode<T> > *out_nodes,
unsigned int left_idx,
unsigned int right_idx, unsigned int depth,
const P &p, const Pred &pred) {
assert(left_idx <= right_idx);
unsigned int offset = static_cast<unsigned int>(out_nodes->size());
if (out_stat->max_tree_depth < depth) {
out_stat->max_tree_depth = depth;
}
real3<T> bmin, bmax;
if (!bboxes_.empty()) {
GetBoundingBox(&bmin, &bmax, bboxes_, &indices_.at(0), left_idx, right_idx);
} else {
ComputeBoundingBox(&bmin, &bmax, &indices_.at(0), left_idx, right_idx, p);
}
unsigned int n = right_idx - left_idx;
if ((n <= options_.min_leaf_primitives) ||
(depth >= options_.max_tree_depth)) {
// Create leaf node.
BVHNode<T> leaf;
leaf.bmin[0] = bmin[0];
leaf.bmin[1] = bmin[1];
leaf.bmin[2] = bmin[2];
leaf.bmax[0] = bmax[0];
leaf.bmax[1] = bmax[1];
leaf.bmax[2] = bmax[2];
assert(left_idx < std::numeric_limits<unsigned int>::max());
leaf.flag = 1; // leaf
leaf.data[0] = n;
leaf.data[1] = left_idx;
out_nodes->push_back(leaf); // atomic update
out_stat->num_leaf_nodes++;
return offset;
}
//
// Create branch node.
//
//
// Compute SAH and find best split axis and position
//
int min_cut_axis = 0;
T cut_pos[3] = {0.0, 0.0, 0.0};
BinBuffer bins(options_.bin_size);
ContributeBinBuffer(&bins, bmin, bmax, &indices_.at(0), left_idx, right_idx,
p);
FindCutFromBinBuffer(cut_pos, &min_cut_axis, &bins, bmin, bmax, n,
options_.cost_t_aabb);
// Try all 3 axis until good cut position avaiable.
unsigned int mid_idx = left_idx;
int cut_axis = min_cut_axis;
for (int axis_try = 0; axis_try < 3; axis_try++) {
unsigned int *begin = &indices_[left_idx];
unsigned int *end = &indices_[right_idx - 1] + 1; // mimics end() iterator.
unsigned int *mid = 0;
// try min_cut_axis first.
cut_axis = (min_cut_axis + axis_try) % 3;
pred.Set(cut_axis, cut_pos[cut_axis]);
//
// Split at (cut_axis, cut_pos)
// indices_ will be modified.
//
mid = std::partition(begin, end, pred);
mid_idx = left_idx + static_cast<unsigned int>((mid - begin));
if ((mid_idx == left_idx) || (mid_idx == right_idx)) {
// Can't split well.
// Switch to object median(which may create unoptimized tree, but
// stable)
mid_idx = left_idx + (n >> 1);
// Try another axis to find better cut.
} else {
// Found good cut. exit loop.
break;
}
}
BVHNode<T> node;
node.axis = cut_axis;
node.flag = 0; // 0 = branch
out_nodes->push_back(node);
unsigned int left_child_index = 0;
unsigned int right_child_index = 0;
left_child_index =
BuildTree(out_stat, out_nodes, left_idx, mid_idx, depth + 1, p, pred);
right_child_index =
BuildTree(out_stat, out_nodes, mid_idx, right_idx, depth + 1, p, pred);
{
(*out_nodes)[offset].data[0] = left_child_index;
(*out_nodes)[offset].data[1] = right_child_index;
(*out_nodes)[offset].bmin[0] = bmin[0];
(*out_nodes)[offset].bmin[1] = bmin[1];
(*out_nodes)[offset].bmin[2] = bmin[2];
(*out_nodes)[offset].bmax[0] = bmax[0];
(*out_nodes)[offset].bmax[1] = bmax[1];
(*out_nodes)[offset].bmax[2] = bmax[2];
}
out_stat->num_branch_nodes++;
return offset;
}
template <typename T>
template <class P, class Pred>
bool BVHAccel<T>::Build(unsigned int num_primitives, const P &p,
const Pred &pred, const BVHBuildOptions<T> &options) {
options_ = options;
stats_ = BVHBuildStatistics();
nodes_.clear();
bboxes_.clear();
assert(options_.bin_size > 1);
if (num_primitives == 0) {
return false;
}
unsigned int n = num_primitives;
//
// 1. Create triangle indices(this will be permutated in BuildTree)
//
indices_.resize(n);
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int i = 0; i < static_cast<int>(n); i++) {
indices_[static_cast<size_t>(i)] = static_cast<unsigned int>(i);
}
//
// 2. Compute bounding box(optional).
//
real3<T> bmin, bmax;
if (options.cache_bbox) {
bmin[0] = bmin[1] = bmin[2] = std::numeric_limits<T>::max();
bmax[0] = bmax[1] = bmax[2] = -std::numeric_limits<T>::max();
bboxes_.resize(n);
for (size_t i = 0; i < n; i++) { // for each primitived
unsigned int idx = indices_[i];
BBox<T> bbox;
p.BoundingBox(&(bbox.bmin), &(bbox.bmax), static_cast<unsigned int>(i));
bboxes_[idx] = bbox;
for (int k = 0; k < 3; k++) { // xyz
if (bmin[k] > bbox.bmin[k]) {
bmin[k] = bbox.bmin[k];
}
if (bmax[k] < bbox.bmax[k]) {
bmax[k] = bbox.bmax[k];
}
}
}
} else {
#ifdef _OPENMP
ComputeBoundingBoxOMP(&bmin, &bmax, &indices_.at(0), 0, n, p);
#else
ComputeBoundingBox(&bmin, &bmax, &indices_.at(0), 0, n, p);
#endif
}
//
// 3. Build tree
//
#ifdef _OPENMP
#if NANORT_ENABLE_PARALLEL_BUILD
// Do parallel build for enoughly large dataset.
if (n > options.min_primitives_for_parallel_build) {
BuildShallowTree(&nodes_, 0, n, /* root depth */ 0, options.shallow_depth,
p, pred); // [0, n)
assert(shallow_node_infos_.size() > 0);
// Build deeper tree in parallel
std::vector<std::vector<BVHNode<T> > > local_nodes(
shallow_node_infos_.size());
std::vector<BVHBuildStatistics> local_stats(shallow_node_infos_.size());
#pragma omp parallel for
for (int i = 0; i < static_cast<int>(shallow_node_infos_.size()); i++) {
unsigned int left_idx = shallow_node_infos_[i].left_idx;
unsigned int right_idx = shallow_node_infos_[i].right_idx;
BuildTree(&(local_stats[i]), &(local_nodes[i]), left_idx, right_idx,
options.shallow_depth, p, pred);
}
// Join local nodes
for (int i = 0; i < static_cast<int>(local_nodes.size()); i++) {
assert(!local_nodes[i].empty());
size_t offset = nodes_.size();
// Add offset to child index(for branch node).
for (size_t j = 0; j < local_nodes[i].size(); j++) {
if (local_nodes[i][j].flag == 0) { // branch
local_nodes[i][j].data[0] += offset - 1;
local_nodes[i][j].data[1] += offset - 1;
}
}
// replace
nodes_[shallow_node_infos_[i].offset] = local_nodes[i][0];
// Skip root element of the local node.
nodes_.insert(nodes_.end(), local_nodes[i].begin() + 1,
local_nodes[i].end());
}
// Join statistics
for (int i = 0; i < static_cast<int>(local_nodes.size()); i++) {
stats_.max_tree_depth =
std::max(stats_.max_tree_depth, local_stats[i].max_tree_depth);
stats_.num_leaf_nodes += local_stats[i].num_leaf_nodes;
stats_.num_branch_nodes += local_stats[i].num_branch_nodes;
}
} else {
BuildTree(&stats_, &nodes_, 0, n,
/* root depth */ 0, p, pred); // [0, n)
}
#else // !NANORT_ENABLE_PARALLEL_BUILD
{
BuildTree(&stats_, &nodes_, 0, n,
/* root depth */ 0, p, pred); // [0, n)
}
#endif
#else // !_OPENMP
{
BuildTree(&stats_, &nodes_, 0, n,
/* root depth */ 0, p, pred); // [0, n)
}
#endif
return true;
}
template <typename T>
void BVHAccel<T>::Debug() {
for (size_t i = 0; i < indices_.size(); i++) {
printf("index[%d] = %d\n", int(i), int(indices_[i]));
}
for (size_t i = 0; i < nodes_.size(); i++) {
printf("node[%d] : bmin %f, %f, %f, bmax %f, %f, %f\n", int(i),
nodes_[i].bmin[0], nodes_[i].bmin[1], nodes_[i].bmin[1],
nodes_[i].bmax[0], nodes_[i].bmax[1], nodes_[i].bmax[1]);
}
}
template <typename T>
bool BVHAccel<T>::Dump(const char *filename) {
FILE *fp = fopen(filename, "wb");
if (!fp) {
// fprintf(stderr, "[BVHAccel] Cannot write a file: %s\n", filename);
return false;
}
size_t numNodes = nodes_.size();
assert(nodes_.size() > 0);
size_t numIndices = indices_.size();
size_t r = 0;
r = fwrite(&numNodes, sizeof(size_t), 1, fp);
assert(r == 1);
r = fwrite(&nodes_.at(0), sizeof(BVHNode<T>), numNodes, fp);
assert(r == numNodes);
r = fwrite(&numIndices, sizeof(size_t), 1, fp);
assert(r == 1);
r = fwrite(&indices_.at(0), sizeof(unsigned int), numIndices, fp);
assert(r == numIndices);
fclose(fp);
return true;
}
template <typename T>
bool BVHAccel<T>::Load(const char *filename) {
FILE *fp = fopen(filename, "rb");
if (!fp) {
// fprintf(stderr, "Cannot open file: %s\n", filename);
return false;
}
size_t numNodes;
size_t numIndices;
size_t r = 0;
r = fread(&numNodes, sizeof(size_t), 1, fp);
assert(r == 1);
assert(numNodes > 0);
nodes_.resize(numNodes);
r = fread(&nodes_.at(0), sizeof(BVHNode<T>), numNodes, fp);
assert(r == numNodes);
r = fread(&numIndices, sizeof(size_t), 1, fp);
assert(r == 1);
indices_.resize(numIndices);
r = fread(&indices_.at(0), sizeof(unsigned int), numIndices, fp);
assert(r == numIndices);
fclose(fp);
return true;
}
template <typename T>
inline bool IntersectRayAABB(T *tminOut, // [out]
T *tmaxOut, // [out]
T min_t, T max_t, const T bmin[3], const T bmax[3],
real3<T> ray_org, real3<T> ray_inv_dir,
int ray_dir_sign[3]) {
T tmin, tmax;
const T min_x = ray_dir_sign[0] ? bmax[0] : bmin[0];
const T min_y = ray_dir_sign[1] ? bmax[1] : bmin[1];
const T min_z = ray_dir_sign[2] ? bmax[2] : bmin[2];
const T max_x = ray_dir_sign[0] ? bmin[0] : bmax[0];
const T max_y = ray_dir_sign[1] ? bmin[1] : bmax[1];
const T max_z = ray_dir_sign[2] ? bmin[2] : bmax[2];
// X
const T tmin_x = (min_x - ray_org[0]) * ray_inv_dir[0];
// MaxMult robust BVH traversal(up to 4 ulp).
// 1.0000000000000004 for double precision.
const T tmax_x = (max_x - ray_org[0]) * ray_inv_dir[0] * 1.00000024f;
// Y
const T tmin_y = (min_y - ray_org[1]) * ray_inv_dir[1];
const T tmax_y = (max_y - ray_org[1]) * ray_inv_dir[1] * 1.00000024f;
// Z
const T tmin_z = (min_z - ray_org[2]) * ray_inv_dir[2];
const T tmax_z = (max_z - ray_org[2]) * ray_inv_dir[2] * 1.00000024f;
tmin = safemax(tmin_z, safemax(tmin_y, safemax(tmin_x, min_t)));
tmax = safemin(tmax_z, safemin(tmax_y, safemin(tmax_x, max_t)));
if (tmin <= tmax) {
(*tminOut) = tmin;
(*tmaxOut) = tmax;
return true;
}
return false; // no hit
}
template <typename T>
template <class I>
inline bool BVHAccel<T>::TestLeafNode(const BVHNode<T> &node, const Ray<T> &ray,
const I &intersector) const {
bool hit = false;
unsigned int num_primitives = node.data[0];
unsigned int offset = node.data[1];
T t = intersector.GetT(); // current hit distance
real3<T> ray_org;
ray_org[0] = ray.org[0];
ray_org[1] = ray.org[1];
ray_org[2] = ray.org[2];
real3<T> ray_dir;
ray_dir[0] = ray.dir[0];
ray_dir[1] = ray.dir[1];
ray_dir[2] = ray.dir[2];
for (unsigned int i = 0; i < num_primitives; i++) {
unsigned int prim_idx = indices_[i + offset];
T local_t = t;
if (intersector.Intersect(&local_t, prim_idx)) {
// Update isect state
t = local_t;
intersector.Update(t, prim_idx);
hit = true;
}
}
return hit;
}
#if 0 // TODO(LTE): Implement
template <typename T> template<class I, class H, class Comp>
bool BVHAccel<T>::MultiHitTestLeafNode(
std::priority_queue<H, std::vector<H>, Comp> *isect_pq,
int max_intersections,
const BVHNode<T> &node,
const Ray<T> &ray,
const I &intersector) const {
bool hit = false;
unsigned int num_primitives = node.data[0];
unsigned int offset = node.data[1];
T t = std::numeric_limits<T>::max();
if (isect_pq->size() >= static_cast<size_t>(max_intersections)) {
t = isect_pq->top().t; // current furthest hit distance
}
real3<T> ray_org;
ray_org[0] = ray.org[0];
ray_org[1] = ray.org[1];
ray_org[2] = ray.org[2];
real3<T> ray_dir;
ray_dir[0] = ray.dir[0];
ray_dir[1] = ray.dir[1];
ray_dir[2] = ray.dir[2];
for (unsigned int i = 0; i < num_primitives; i++) {
unsigned int prim_idx = indices_[i + offset];
T local_t = t, u = 0.0f, v = 0.0f;
if (intersector.Intersect(&local_t, &u, &v, prim_idx)) {
// Update isect state
if ((local_t > ray.min_t)) {
if (isect_pq->size() < static_cast<size_t>(max_intersections)) {
H isect;
t = local_t;
isect.t = t;
isect.u = u;
isect.v = v;
isect.prim_id = prim_idx;
isect_pq->push(isect);
// Update t to furthest distance.
t = ray.max_t;
hit = true;
} else {
if (local_t < isect_pq->top().t) {
// delete furthest intersection and add new intersection.
isect_pq->pop();
H hit;
hit.t = local_t;
hit.u = u;
hit.v = v;
hit.prim_id = prim_idx;
isect_pq->push(hit);
// Update furthest hit distance
t = isect_pq->top().t;
hit = true;
}
}
}
}
}
return hit;
}
#endif
template <typename T>
template <class I, class H>
bool BVHAccel<T>::Traverse(const Ray<T> &ray, const I &intersector, H *isect,
const BVHTraceOptions &options) const {
const int kMaxStackDepth = 512;
T hit_t = ray.max_t;
int node_stack_index = 0;
unsigned int node_stack[512];
node_stack[0] = 0;
// Init isect info as no hit
intersector.Update(hit_t, static_cast<unsigned int>(-1));
intersector.PrepareTraversal(ray, options);
int dir_sign[3];
dir_sign[0] = ray.dir[0] < 0.0f ? 1 : 0;
dir_sign[1] = ray.dir[1] < 0.0f ? 1 : 0;
dir_sign[2] = ray.dir[2] < 0.0f ? 1 : 0;
// @fixme { Check edge case; i.e., 1/0 }
real3<T> ray_inv_dir;
ray_inv_dir[0] = 1.0f / (ray.dir[0] + 1.0e-12f);
ray_inv_dir[1] = 1.0f / (ray.dir[1] + 1.0e-12f);
ray_inv_dir[2] = 1.0f / (ray.dir[2] + 1.0e-12f);
real3<T> ray_org;
ray_org[0] = ray.org[0];
ray_org[1] = ray.org[1];
ray_org[2] = ray.org[2];
T min_t = std::numeric_limits<T>::max();
T max_t = -std::numeric_limits<T>::max();
while (node_stack_index >= 0) {
unsigned int index = node_stack[node_stack_index];
const BVHNode<T> &node = nodes_[index];
node_stack_index--;
bool hit = IntersectRayAABB(&min_t, &max_t, ray.min_t, hit_t, node.bmin,
node.bmax, ray_org, ray_inv_dir, dir_sign);
if (node.flag == 0) { // branch node
if (hit) {
int order_near = dir_sign[node.axis];
int order_far = 1 - order_near;
// Traverse near first.
node_stack[++node_stack_index] = node.data[order_far];
node_stack[++node_stack_index] = node.data[order_near];
}
} else { // leaf node
if (hit) {
if (TestLeafNode(node, ray, intersector)) {
hit_t = intersector.GetT();
}
}
}
}
assert(node_stack_index < kMaxStackDepth);
bool hit = (intersector.GetT() < ray.max_t);
intersector.PostTraversal(ray, hit, isect);
return hit;
}
template <typename T>
template <class I>
inline bool BVHAccel<T>::TestLeafNodeIntersections(
const BVHNode<T> &node, const Ray<T> &ray, const int max_intersections,
const I &intersector,
std::priority_queue<NodeHit<T>, std::vector<NodeHit<T> >,
NodeHitComparator<T> > *isect_pq) const {
bool hit = false;
unsigned int num_primitives = node.data[0];
unsigned int offset = node.data[1];
real3<T> ray_org;
ray_org[0] = ray.org[0];
ray_org[1] = ray.org[1];
ray_org[2] = ray.org[2];
real3<T> ray_dir;
ray_dir[0] = ray.dir[0];
ray_dir[1] = ray.dir[1];
ray_dir[2] = ray.dir[2];
intersector.PrepareTraversal(ray);
for (unsigned int i = 0; i < num_primitives; i++) {
unsigned int prim_idx = indices_[i + offset];
T min_t, max_t;
if (intersector.Intersect(&min_t, &max_t, prim_idx)) {
// Always add to isect lists.
NodeHit<T> isect;
isect.t_min = min_t;
isect.t_max = max_t;
isect.node_id = prim_idx;
if (isect_pq->size() < static_cast<size_t>(max_intersections)) {
isect_pq->push(isect);
} else {
if (min_t < isect_pq->top().t_min) {
// delete the furthest intersection and add a new intersection.
isect_pq->pop();
isect_pq->push(isect);
}
}
}
}
return hit;
}
template <typename T>
template <class I>
bool BVHAccel<T>::ListNodeIntersections(
const Ray<T> &ray, int max_intersections, const I &intersector,
StackVector<NodeHit<T>, 128> *hits) const {
const int kMaxStackDepth = 512;
T hit_t = ray.max_t;
int node_stack_index = 0;
unsigned int node_stack[512];
node_stack[0] = 0;
// Stores furthest intersection at top
std::priority_queue<NodeHit<T>, std::vector<NodeHit<T> >,
NodeHitComparator<T> >
isect_pq;
(*hits)->clear();
int dir_sign[3];
dir_sign[0] =
ray.dir[0] < static_cast<T>(0.0) ? 1 : 0;
dir_sign[1] =
ray.dir[1] < static_cast<T>(0.0) ? 1 : 0;
dir_sign[2] =
ray.dir[2] < static_cast<T>(0.0) ? 1 : 0;
// @fixme { Check edge case; i.e., 1/0 }
real3<T> ray_inv_dir;
ray_inv_dir[0] = static_cast<T>(1.0) / ray.dir[0];
ray_inv_dir[1] = static_cast<T>(1.0) / ray.dir[1];
ray_inv_dir[2] = static_cast<T>(1.0) / ray.dir[2];
real3<T> ray_org;
ray_org[0] = ray.org[0];
ray_org[1] = ray.org[1];
ray_org[2] = ray.org[2];
T min_t, max_t;
while (node_stack_index >= 0) {
unsigned int index = node_stack[node_stack_index];
const BVHNode<T> &node = nodes_[static_cast<size_t>(index)];
node_stack_index--;
bool hit = IntersectRayAABB(&min_t, &max_t, ray.min_t, hit_t, node.bmin,
node.bmax, ray_org, ray_inv_dir, dir_sign);
if (node.flag == 0) { // branch node
if (hit) {
int order_near = dir_sign[node.axis];
int order_far = 1 - order_near;
// Traverse near first.
node_stack[++node_stack_index] = node.data[order_far];
node_stack[++node_stack_index] = node.data[order_near];
}
} else { // leaf node
if (hit) {
TestLeafNodeIntersections(node, ray, max_intersections, intersector,
&isect_pq);
}
}
}
assert(node_stack_index < kMaxStackDepth);
(void)kMaxStackDepth;
if (!isect_pq.empty()) {
// Store intesection in reverse order(make it frontmost order)
size_t n = isect_pq.size();
(*hits)->resize(n);
for (size_t i = 0; i < n; i++) {
const NodeHit<T> &isect = isect_pq.top();
(*hits)[n - i - 1] = isect;
isect_pq.pop();
}
return true;
}
return false;
}
#if 0 // TODO(LTE): Implement
template <typename T> template<class I, class H, class Comp>
bool BVHAccel<T>::MultiHitTraverse(const Ray<T> &ray,
int max_intersections,
const I &intersector,
StackVector<H, 128> *hits,
const BVHTraceOptions& options) const {
const int kMaxStackDepth = 512;
T hit_t = ray.max_t;
int node_stack_index = 0;
unsigned int node_stack[512];
node_stack[0] = 0;
// Stores furthest intersection at top
std::priority_queue<H, std::vector<H>, Comp> isect_pq;
(*hits)->clear();
// Init isect info as no hit
intersector.Update(hit_t, static_cast<unsigned int>(-1));
intersector.PrepareTraversal(ray, options);
int dir_sign[3];
dir_sign[0] = ray.dir[0] < static_cast<T>(0.0) ? static_cast<T>(1) : static_cast<T>(0);
dir_sign[1] = ray.dir[1] < static_cast<T>(0.0) ? static_cast<T>(1) : static_cast<T>(0);
dir_sign[2] = ray.dir[2] < static_cast<T>(0.0) ? static_cast<T>(1) : static_cast<T>(0);
// @fixme { Check edge case; i.e., 1/0 }
real3<T> ray_inv_dir;
ray_inv_dir[0] = static_cast<T>(1.0) / ray.dir[0];
ray_inv_dir[1] = static_cast<T>(1.0) / ray.dir[1];
ray_inv_dir[2] = static_cast<T>(1.0) / ray.dir[2];
real3<T> ray_org;
ray_org[0] = ray.org[0];
ray_org[1] = ray.org[1];
ray_org[2] = ray.org[2];
T min_t, max_t;
while (node_stack_index >= 0) {
unsigned int index = node_stack[node_stack_index];
const BVHNode<T> &node = nodes_[static_cast<size_t>(index)];
node_stack_index--;
bool hit = IntersectRayAABB(&min_t, &max_t, ray.min_t, hit_t, node.bmin,
node.bmax, ray_org, ray_inv_dir, dir_sign);
if (node.flag == 0) { // branch node
if (hit) {
int order_near = dir_sign[node.axis];
int order_far = 1 - order_near;
// Traverse near first.
node_stack[++node_stack_index] = node.data[order_far];
node_stack[++node_stack_index] = node.data[order_near];
}
} else { // leaf node
if (hit) {
if (MultiHitTestLeafNode(&isect_pq, max_intersections, node, ray, intersector)) {
// Only update `hit_t` when queue is full.
if (isect_pq.size() >= static_cast<size_t>(max_intersections)) {
hit_t = isect_pq.top().t;
}
}
}
}
}
assert(node_stack_index < kMaxStackDepth);
(void)kMaxStackDepth;
if (!isect_pq.empty()) {
// Store intesection in reverse order(make it frontmost order)
size_t n = isect_pq.size();
(*hits)->resize(n);
for (size_t i = 0; i < n; i++) {
const H &isect = isect_pq.top();
(*hits)[n - i - 1] = isect;
isect_pq.pop();
}
return true;
}
return false;
}
#endif
#ifdef __clang__
#pragma clang diagnostic pop
#endif
} // namespace nanort
#endif // NANORT_H_
|
parallel_dsu.h | #ifndef __DSU_H
#define __DSU_H
#include <atomic>
#include <omp.h>
#include <stdexcept>
#include "defs.h"
#include "parallel_array.h"
/**
* INTERFACE:
*
* ParallelDSU(uint32_t N, uint32_t NUM_THREADS) - constructs a DSU of size N using NUM_THREADS
* uint32_t find_root(uint32_t id) - finds root node of id
* bool same_set(uint32_t id1, uint32_t id2) - checks if id1 and id2 are in the same set
* void unite(uint32_t id1, uint32_t id2) - unites sets of id1 and id2
*
* DETAILS:
*
* Implementation was inspired by this repo
* https://github.com/wjakob/dset/blob/master/dset.h
* and this paper
* http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.56.8354&rep=rep1&type=pdf
* It uses both rank and path heuristics in parallel
* which should allow for O(\alpha S) time on both find_root and unite operations
*
* Data is stored in unsigned 64 bit integers
* The first 32 bits encode node parent
* The last 32 bits encode node rank
* This allows for easier compare and swap and should work slightly faster
*
* E.g. if X is the stored value:
* X & 0x00000000FFFFFFFF <- parent
* X & 0xFFFFFFFF00000000 <- rank
*
* To decode these values from u64 one should use get_parent() and get_rank()
*
* I also check if id is within range and throw an exception otherwise,
* this slows the code down a little bit but should save you some time debugging
*/
struct ParallelDSU {
const u32 NUM_THREADS;
u32 dsu_size;
atomic_u64* data;
const u32 BINARY_BUCKET_SIZE = 32;
const u64 RANK_MASK = 0xFFFFFFFF00000000ULL;
ParallelDSU(u32 size, u32 NUM_THREADS = omp_get_max_threads()) : NUM_THREADS(NUM_THREADS), dsu_size(size) {
if (size == 0) {
throw std::invalid_argument("DSU size cannot be zero");
}
data = static_cast<atomic_u64*>(operator new[] (size * sizeof(atomic_u64)));;
#pragma omp parallel for shared(data) num_threads(NUM_THREADS)
for (u32 i = 0; i < size; ++i) data[i] = i;
}
u32 size() const {
return dsu_size;
}
void check_out_of_range(u32 id) const {
if (id >= size()) {
throw std::out_of_range("Node id out of range");
}
}
u64 encode_node(u32 parent, u32 rank) {
return (static_cast<u64>(rank) << BINARY_BUCKET_SIZE) | parent;
}
u32 get_parent(u32 id) const {
return static_cast<u32>(data[id]);
}
u32 get_rank(u32 id) const {
return static_cast<u32>(data[id] >> BINARY_BUCKET_SIZE);
}
/**
* On each step we try to apply path heuristic using CAS
* and then move closer to the root and
*
* The loop breaks when a node's parent is equal to itself
* E.g. when we find the root
*/
u32 find_root(u32 id) {
check_out_of_range(id);
while (id != get_parent(id)) {
u64 value = data[id];
u32 grandparent = get_parent(static_cast<u32>(value));
u64 new_value = (value & RANK_MASK) | grandparent;
/* Path heuristic */
if (value != new_value) {
data[id].compare_exchange_strong(value, new_value);
}
id = grandparent;
}
return id;
}
/**
* We try to check if two nodes are in the same set
* by checking if their roots are the same
*
* Since it is a parallel structure, node roots may change during runtime
* In order to account for this we do a while loop and repeat if
* our current node is no longer the root of its set
*
* In general, you should call this after synchronization,
* It still works during parallel segments, but the results will make no sense
*/
bool same_set(u32 id1, u32 id2) {
check_out_of_range(id1);
check_out_of_range(id2);
while (true) {
id1 = find_root(id1);
id2 = find_root(id2);
if (id1 == id2) {
return true;
} else if (get_parent(id1) == id1) {
return false;
}
}
}
/**
* We try to hang the smaller component onto the bigger one
*
* Since it is a parallel structure, node roots may change during runtime
* In order to account for this we do a while loop and repeat if
* the smaller node was updated e.g. when CAS failed
*/
void unite(u32 id1, u32 id2) {
check_out_of_range(id1);
check_out_of_range(id2);
while (true) {
id1 = find_root(id1);
id2 = find_root(id2);
/* Nodes are already in the same set */
if (id1 == id2) return;
u32 rank1 = get_rank(id1);
u32 rank2 = get_rank(id2);
/* Hanging the smaller set to the bigger one, rank heuristic */
if (rank1 < rank2 || (rank1 == rank2 && id1 > id2)) {
std::swap(rank1, rank2);
std::swap(id1, id2);
}
u64 old_value = encode_node(id2, rank2);
u64 new_value = encode_node(id1, rank2);
/* If CAS fails we need to repeat the same step once again */
if (!data[id2].compare_exchange_strong(old_value, new_value)) {
continue;
}
/* Updating rank */
if (rank1 == rank2) {
old_value = encode_node(id1, rank1);
new_value = encode_node(id1, rank1 + 1);
data[id1].compare_exchange_strong(old_value, new_value);
}
break;
}
}
};
#endif
|
expected_output.c | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <sys/time.h>
//---------------------------------------------------------------------
// program CG
//---------------------------------------------------------------------
//----------
// Class S:
//----------
//----------
// Class W:
//----------
//----------
// Class A:
//----------
//----------
// Class B:
//----------
//----------
// Class C:
//----------
struct anon_NAS_CG_c_75 {
double real;
double imag;
};
typedef struct anon_NAS_CG_c_75 dcomplex;
//---------------------------------------------------------------------
/*common / main_int_mem /*/
int colidx[567000];
int rowstr[7001];
int iv[7000];
int arow[7000];
int acol[63000];
/*common / main_flt_mem /*/
double aelt[63000];
double a[567000];
double x[7002];
double z[7002];
double p[7002];
double q[7002];
double r[7002];
/*common / partit_size /*/
int naa;
int nzz;
int firstrow;
int lastrow;
int firstcol;
int lastcol;
/*common /urando/*/
double amult;
double tran;
//---------------------------------------------------------------------
void conj_grad(int colidx[], int rowstr[], double x[], double z[], double a[], double p[], double q[], double r[], double *rnorm);
void makea(int n, int nz, double a[], int colidx[], int rowstr[], int firstrow, int lastrow, int firstcol, int lastcol, int arow[], int acol[][9], double aelt[][9], int iv[]);
void sparse(double a[], int colidx[], int rowstr[], int n, int nz, int nozer, int arow[], int acol[][9], double aelt[][9], int firstrow, int lastrow, int nzloc[], double rcond, double shift);
void sprnvc(int n, int nz, int nn1, double v[], int iv[]);
int icnvrt(double x, int ipwr2);
void vecset(int n, double v[], int iv[], int *nzv, int i, double val);
void print_results(char *name, char class, int n1, int n2, int n3, int niter, double t, double mops, char *optype, int verified);
double randlc(double *x, double a);
void vranlc(int n, double *x, double a, double y[]);
double start[64];
double elapsed[64];
double elapsed_time();
void timer_clear(int n);
void timer_start(int n);
void timer_stop(int n);
double timer_read(int n);
void wtime(double *t);
//---------------------------------------------------------------------
int main(int argc, char *argv[]) {
int i, j, k, it;
double zeta;
double rnorm;
double norm_temp1, norm_temp2;
double t, mflops, tmax;
char Class;
int verified;
double zeta_verify_value, epsilon, err;
char *t_names[3];
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(i = 0; i < 3; i++) {
timer_clear(i);
}
timer_start(0);
firstrow = 0;
lastrow = 7000 - 1;
firstcol = 0;
lastcol = 7000 - 1;
if(7000 == 1400 && 8 == 7 && 15 == 15 && 12.0 == 10) {
Class = 'S';
zeta_verify_value = 8.5971775078648;
}
else if(7000 == 7000 && 8 == 8 && 15 == 15 && 12.0 == 12) {
Class = 'W';
zeta_verify_value = 10.362595087124;
}
else if(7000 == 14000 && 8 == 11 && 15 == 15 && 12.0 == 20) {
Class = 'A';
zeta_verify_value = 17.130235054029;
}
else if(7000 == 75000 && 8 == 13 && 15 == 75 && 12.0 == 60) {
Class = 'B';
zeta_verify_value = 22.712745482631;
}
else if(7000 == 150000 && 8 == 15 && 15 == 75 && 12.0 == 110) {
Class = 'C';
zeta_verify_value = 28.973605592845;
}
else if(7000 == 1500000 && 8 == 21 && 15 == 100 && 12.0 == 500) {
Class = 'D';
zeta_verify_value = 52.514532105794;
}
else if(7000 == 9000000 && 8 == 26 && 15 == 100 && 12.0 == 1500) {
Class = 'E';
zeta_verify_value = 77.522164599383;
}
else {
Class = 'U';
}
printf("\n\n NAS Parallel Benchmarks (NPB3.3-SER-C) - CG Benchmark\n\n");
printf(" Size: %11d\n", 7000);
printf(" Iterations: %5d\n", 15);
printf("\n");
naa = 7000;
nzz = (7000 * (8 + 1) * (8 + 1));
//---------------------------------------------------------------------
// Inialize random number generator
//---------------------------------------------------------------------
tran = 314159265.0;
amult = 1220703125.0;
zeta = randlc(&tran, amult);
//---------------------------------------------------------------------
//
//---------------------------------------------------------------------
makea(naa, nzz, a, colidx, rowstr, firstrow, lastrow, firstcol, lastcol, arow, (int (*)[9]) (void *) acol, (double (*)[9]) (void *) aelt, iv);
//---------------------------------------------------------------------
// Note: as a result of the above call to makea:
// values of j used in indexing rowstr go from 0 --> lastrow-firstrow
// values of colidx which are col indexes go from firstcol --> lastcol
// So:
// Shift the col index vals from actual (firstcol --> lastcol )
// to local, i.e., (0 --> lastcol-firstcol)
//---------------------------------------------------------------------
#pragma omp parallel for default(shared) private(j, k) firstprivate(lastrow, firstrow, firstcol, rowstr) reduction(- : colidx[:567000])
for(j = 0; j < lastrow - firstrow + 1; j++) {
#pragma omp parallel for default(shared) private(k) firstprivate(j, firstcol, rowstr)
for(k = rowstr[j]; k < rowstr[j + 1]; k++) {
colidx[k] = colidx[k] - firstcol;
}
}
//---------------------------------------------------------------------
// set starting vector to (1, 1, .... 1)
//---------------------------------------------------------------------
#pragma omp parallel for default(shared) private(i)
for(i = 0; i < 7000 + 1; i++) {
x[i] = 1.0;
}
#pragma omp parallel for default(shared) private(j) firstprivate(lastcol, firstcol)
for(j = 0; j < lastcol - firstcol + 1; j++) {
q[j] = 0.0;
z[j] = 0.0;
r[j] = 0.0;
p[j] = 0.0;
}
zeta = 0.0;
//---------------------------------------------------------------------
//---->
// Do one iteration untimed to init all code and data page tables
//----> (then reinit, start timing, to niter its)
//---------------------------------------------------------------------
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(it = 1; it <= 1; it++) { // end of do one iteration untimed
//---------------------------------------------------------------------
// The call to the conjugate gradient routine:
//---------------------------------------------------------------------
conj_grad(colidx, rowstr, x, z, a, p, q, r, &rnorm);
//---------------------------------------------------------------------
// zeta = shift + 1/(x.z)
// So, first: (x.z)
// Also, find norm of z
// So, first: (z.z)
//---------------------------------------------------------------------
norm_temp1 = 0.0;
norm_temp2 = 0.0;
#pragma omp parallel for default(shared) private(j) firstprivate(lastcol, firstcol, x, z) reduction(+ : norm_temp1) reduction(+ : norm_temp2)
for(j = 0; j < lastcol - firstcol + 1; j++) {
norm_temp1 = norm_temp1 + x[j] * z[j];
norm_temp2 = norm_temp2 + z[j] * z[j];
}
norm_temp2 = 1.0 / sqrt(norm_temp2);
//---------------------------------------------------------------------
// Normalize z to obtain x
//---------------------------------------------------------------------
#pragma omp parallel for default(shared) private(j) firstprivate(lastcol, firstcol, norm_temp2, z)
for(j = 0; j < lastcol - firstcol + 1; j++) {
x[j] = norm_temp2 * z[j];
}
}
//---------------------------------------------------------------------
// set starting vector to (1, 1, .... 1)
//---------------------------------------------------------------------
#pragma omp parallel for default(shared) private(i)
for(i = 0; i < 7000 + 1; i++) {
x[i] = 1.0;
}
zeta = 0.0;
timer_stop(0);
printf(" Initialization time = %15.3f seconds\n", timer_read(0));
timer_start(1);
//---------------------------------------------------------------------
//---->
// Main Iteration for inverse power method
//---->
//---------------------------------------------------------------------
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(it = 1; it <= 15; it++) { // end of main iter inv pow meth
//---------------------------------------------------------------------
// The call to the conjugate gradient routine:
//---------------------------------------------------------------------
conj_grad(colidx, rowstr, x, z, a, p, q, r, &rnorm);
//---------------------------------------------------------------------
// zeta = shift + 1/(x.z)
// So, first: (x.z)
// Also, find norm of z
// So, first: (z.z)
//---------------------------------------------------------------------
norm_temp1 = 0.0;
norm_temp2 = 0.0;
#pragma omp parallel for default(shared) private(j) firstprivate(lastcol, firstcol, x, z) reduction(+ : norm_temp1) reduction(+ : norm_temp2)
for(j = 0; j < lastcol - firstcol + 1; j++) {
norm_temp1 = norm_temp1 + x[j] * z[j];
norm_temp2 = norm_temp2 + z[j] * z[j];
}
norm_temp2 = 1.0 / sqrt(norm_temp2);
zeta = 12.0 + 1.0 / norm_temp1;
if(it == 1) printf("\n iteration ||r|| zeta\n");
printf(" %5d %20.14E%20.13f\n", it, rnorm, zeta);
//---------------------------------------------------------------------
// Normalize z to obtain x
//---------------------------------------------------------------------
#pragma omp parallel for default(shared) private(j) firstprivate(lastcol, firstcol, norm_temp2, z)
for(j = 0; j < lastcol - firstcol + 1; j++) {
x[j] = norm_temp2 * z[j];
}
}
timer_stop(1);
//---------------------------------------------------------------------
// End of timed section
//---------------------------------------------------------------------
t = timer_read(1);
printf(" Benchmark completed\n");
epsilon = 1.0e-10;
if(Class != 'U') {
err = fabs(zeta - zeta_verify_value) / zeta_verify_value;
if(err <= epsilon) {
verified = 1;
printf(" VERIFICATION SUCCESSFUL\n");
printf(" Zeta is %20.13E\n", zeta);
printf(" Error is %20.13E\n", err);
}
else {
verified = 0;
printf(" VERIFICATION FAILED\n");
printf(" Zeta %20.13E\n", zeta);
printf(" The correct zeta is %20.13E\n", zeta_verify_value);
}
}
else {
verified = 0;
printf(" Problem size unknown\n");
printf(" NO VERIFICATION PERFORMED\n");
}
if(t != 0.0) {
mflops = (double) (2 * 15 * 7000) * (3.0 + (double) (8 * (8 + 1)) + 25.0 * (5.0 + (double) (8 * (8 + 1))) + 3.0) / t / 1000000.0;
}
else {
mflops = 0.0;
}
print_results("CG", Class, 7000, 0, 0, 15, t, mflops, " floating point", verified);
int exitValue = verified ? 0 : 1;
return exitValue;
}
//---------------------------------------------------------------------
// Floaging point arrays here are named as in NPB1 spec discussion of
// CG algorithm
//---------------------------------------------------------------------
void conj_grad(int colidx[], int rowstr[], double x[], double z[], double a[], double p[], double q[], double r[], double *rnorm) {
int j, k;
int cgit, cgitmax = 25;
double d, sum, rho, rho0, alpha, beta;
rho = 0.0;
//---------------------------------------------------------------------
// Initialize the CG algorithm:
//---------------------------------------------------------------------
#pragma omp parallel for default(shared) private(j) firstprivate(naa, x)
for(j = 0; j < naa + 1; j++) {
q[j] = 0.0;
z[j] = 0.0;
r[j] = x[j];
p[j] = r[j];
}
//---------------------------------------------------------------------
// rho = r.r
// Now, obtain the norm of r: First, sum squares of r elements locally...
//---------------------------------------------------------------------
#pragma omp parallel for default(shared) private(j) firstprivate(lastcol, firstcol, r) reduction(+ : rho)
for(j = 0; j < lastcol - firstcol + 1; j++) {
rho = rho + r[j] * r[j];
}
//---------------------------------------------------------------------
//---->
// The conj grad iteration loop
//---->
//---------------------------------------------------------------------
/*************** Clava msgError **************
Variable rho could not be categorized into any OpenMP Variable Scopeuse : RWR
****************************************/
for(cgit = 1; cgit <= cgitmax; cgit++) { // end of do cgit=1,cgitmax
//---------------------------------------------------------------------
// q = A.p
// The partition submatrix-vector multiply: use workspace w
//---------------------------------------------------------------------
//
// NOTE: this version of the multiply is actually (slightly: maybe %5)
// faster on the sp2 on 16 nodes than is the unrolled-by-2 version
// below. On the Cray t3d, the reverse is 1, i.e., the
// unrolled-by-two version is some 10% faster.
// The unrolled-by-8 version below is significantly faster
// on the Cray t3d - overall speed of code is 1.5 times faster.
#pragma omp parallel for default(shared) private(j, k, sum) firstprivate(lastrow, firstrow, rowstr, a, colidx, p)
for(j = 0; j < lastrow - firstrow + 1; j++) {
sum = 0.0;
#pragma omp parallel for default(shared) private(k) firstprivate(j, rowstr, a, colidx, p) reduction(+ : sum)
for(k = rowstr[j]; k < rowstr[j + 1]; k++) {
sum = sum + a[k] * p[colidx[k]];
}
q[j] = sum;
}
/*
for (j = 0; j < lastrow - firstrow + 1; j++) {
int i = rowstr[j];
int iresidue = (rowstr[j+1] - i) % 2;
double sum1 = 0.0;
double sum2 = 0.0;
if (iresidue == 1)
sum1 = sum1 + a[i]*p[colidx[i]];
for (k = i + iresidue; k <= rowstr[j+1] - 2; k += 2) {
sum1 = sum1 + a[k] *p[colidx[k]];
sum2 = sum2 + a[k+1]*p[colidx[k+1]];
}
q[j] = sum1 + sum2;
}
*/
/*
for (j = 0; j < lastrow - firstrow + 1; j++) {
int i = rowstr[j];
int iresidue = (rowstr[j+1] - i) % 8;
double sum = 0.0;
for (k = i; k <= i + iresidue - 1; k++) {
sum = sum + a[k]*p[colidx[k]];
}
for (k = i + iresidue; k <= rowstr[j+1] - 8; k += 8) {
sum = sum + a[k ]*p[colidx[k ]]
+ a[k+1]*p[colidx[k+1]]
+ a[k+2]*p[colidx[k+2]]
+ a[k+3]*p[colidx[k+3]]
+ a[k+4]*p[colidx[k+4]]
+ a[k+5]*p[colidx[k+5]]
+ a[k+6]*p[colidx[k+6]]
+ a[k+7]*p[colidx[k+7]];
}
q[j] = sum;
}
*/
//---------------------------------------------------------------------
// Obtain p.q
//---------------------------------------------------------------------
d = 0.0;
#pragma omp parallel for default(shared) private(j) firstprivate(lastcol, firstcol, p, q) reduction(+ : d)
for(j = 0; j < lastcol - firstcol + 1; j++) {
d = d + p[j] * q[j];
}
//---------------------------------------------------------------------
// Obtain alpha = rho / (p.q)
//---------------------------------------------------------------------
alpha = rho / d;
//---------------------------------------------------------------------
// Save a temporary of rho
//---------------------------------------------------------------------
rho0 = rho;
//---------------------------------------------------------------------
// Obtain z = z + alpha*p
// and r = r - alpha*q
//---------------------------------------------------------------------
rho = 0.0;
#pragma omp parallel for default(shared) private(j) firstprivate(lastcol, firstcol, alpha, p, q)
for(j = 0; j < lastcol - firstcol + 1; j++) {
z[j] = z[j] + alpha * p[j];
r[j] = r[j] - alpha * q[j];
}
//---------------------------------------------------------------------
// rho = r.r
// Now, obtain the norm of r: First, sum squares of r elements locally...
//---------------------------------------------------------------------
#pragma omp parallel for default(shared) private(j) firstprivate(lastcol, firstcol, r) reduction(+ : rho)
for(j = 0; j < lastcol - firstcol + 1; j++) {
rho = rho + r[j] * r[j];
}
//---------------------------------------------------------------------
// Obtain beta:
//---------------------------------------------------------------------
beta = rho / rho0;
//---------------------------------------------------------------------
// p = r + beta*p
//---------------------------------------------------------------------
#pragma omp parallel for default(shared) private(j) firstprivate(lastcol, firstcol, beta, r)
for(j = 0; j < lastcol - firstcol + 1; j++) {
p[j] = r[j] + beta * p[j];
}
}
//---------------------------------------------------------------------
// Compute residual norm explicitly: ||r|| = ||x - A.z||
// First, form A.z
// The partition submatrix-vector multiply
//---------------------------------------------------------------------
sum = 0.0;
#pragma omp parallel for default(shared) private(j, k, d) firstprivate(lastrow, firstrow, rowstr, a, colidx, z)
for(j = 0; j < lastrow - firstrow + 1; j++) {
d = 0.0;
#pragma omp parallel for default(shared) private(k) firstprivate(j, rowstr, a, colidx, z) reduction(+ : d)
for(k = rowstr[j]; k < rowstr[j + 1]; k++) {
d = d + a[k] * z[colidx[k]];
}
r[j] = d;
}
//---------------------------------------------------------------------
// At this point, r contains A.z
//---------------------------------------------------------------------
#pragma omp parallel for default(shared) private(j, d) firstprivate(lastcol, firstcol, x, r) reduction(+ : sum)
for(j = 0; j < lastcol - firstcol + 1; j++) {
d = x[j] - r[j];
sum = sum + d * d;
}
*rnorm = sqrt(sum);
}
//---------------------------------------------------------------------
// generate the test problem for benchmark 6
// makea generates a sparse matrix with a
// prescribed sparsity distribution
//
// parameter type usage
//
// input
//
// n i number of cols/rows of matrix
// nz i nonzeros as declared array size
// rcond r*8 condition number
// shift r*8 main diagonal shift
//
// output
//
// a r*8 array for nonzeros
// colidx i col indices
// rowstr i row pointers
//
// workspace
//
// iv, arow, acol i
// aelt r*8
//---------------------------------------------------------------------
void makea(int n, int nz, double a[], int colidx[], int rowstr[], int firstrow, int lastrow, int firstcol, int lastcol, int arow[], int acol[][9], double aelt[][9], int iv[]) {
int iouter, ivelt, nzv, nn1;
int ivc[9];
double vc[9];
//---------------------------------------------------------------------
// nonzer is approximately (int(sqrt(nnza /n)));
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// nn1 is the smallest power of two not less than n
//---------------------------------------------------------------------
nn1 = 1;
do {
nn1 = 2 * nn1;
}
while (nn1 < n);
//---------------------------------------------------------------------
// Generate nonzero positions and save for the use in sparse.
//---------------------------------------------------------------------
/*************** Clava msgError **************
unsolved dependency for arrayAccess ivc use : RWR
****************************************/
for(iouter = 0; iouter < n; iouter++) {
nzv = 8;
sprnvc(n, nzv, nn1, vc, ivc);
vecset(n, vc, ivc, &nzv, iouter + 1, 0.5);
arow[iouter] = nzv;
#pragma omp parallel for default(shared) private(ivelt) firstprivate(nzv, iouter, ivc, vc)
for(ivelt = 0; ivelt < nzv; ivelt++) {
acol[iouter][ivelt] = ivc[ivelt] - 1;
aelt[iouter][ivelt] = vc[ivelt];
}
}
//---------------------------------------------------------------------
// ... make the sparse matrix from list of elements with duplicates
// (iv is used as workspace)
//---------------------------------------------------------------------
sparse(a, colidx, rowstr, n, nz, 8, arow, acol, aelt, firstrow, lastrow, iv, 1.0e-1, 12.0);
}
//---------------------------------------------------------------------
// rows range from firstrow to lastrow
// the rowstr pointers are defined for nrows = lastrow-firstrow+1 values
//---------------------------------------------------------------------
void sparse(double a[], int colidx[], int rowstr[], int n, int nz, int nozer, int arow[], int acol[][9], double aelt[][9], int firstrow, int lastrow, int nzloc[], double rcond, double shift) {
int nrows;
//---------------------------------------------------
// generate a sparse matrix from a list of
// [col, row, element] tri
//---------------------------------------------------
int i, j, j1, j2, nza, k, kk, nzrow, jcol;
double size, scale, ratio, va;
int cont40;
//---------------------------------------------------------------------
// how many rows of result
//---------------------------------------------------------------------
nrows = lastrow - firstrow + 1;
//---------------------------------------------------------------------
// ...count the number of triples in each row
//---------------------------------------------------------------------
#pragma omp parallel for default(shared) private(j) firstprivate(nrows)
for(j = 0; j < nrows + 1; j++) {
rowstr[j] = 0;
}
/*************** Clava msgError **************
unsolved dependency for arrayAccess rowstr use : RW
****************************************/
for(i = 0; i < n; i++) {
/*************** Clava msgError **************
unsolved dependency for arrayAccess rowstr use : RW
****************************************/
for(nza = 0; nza < arow[i]; nza++) {
j = acol[i][nza] + 1;
rowstr[j] = rowstr[j] + arow[i];
}
}
rowstr[0] = 0;
/*************** Clava msgError **************
unsolved dependency for arrayAccess rowstr use : RW
****************************************/
for(j = 1; j < nrows + 1; j++) {
rowstr[j] = rowstr[j] + rowstr[j - 1];
}
nza = rowstr[nrows] - 1;
//---------------------------------------------------------------------
// ... rowstr(j) now is the location of the first nonzero
// of row j of a
//---------------------------------------------------------------------
if(nza > nz) {
printf("Space for matrix elements exceeded in sparse\n");
printf("nza, nzmax = %d, %d\n", nza, nz);
exit(1);
}
//---------------------------------------------------------------------
// ... preload data pages
//---------------------------------------------------------------------
/*************** Clava msgError **************
unsolved dependency for arrayAccess a use : W
unsolved dependency for arrayAccess colidx use : W
****************************************/
for(j = 0; j < nrows; j++) {
#pragma omp parallel for default(shared) private(k) firstprivate(j, rowstr)
for(k = rowstr[j]; k < rowstr[j + 1]; k++) {
a[k] = 0.0;
colidx[k] = -1;
}
nzloc[j] = 0;
}
//---------------------------------------------------------------------
// ... generate actual values by summing duplicates
//---------------------------------------------------------------------
size = 1.0;
ratio = pow(rcond, (1.0 / (double) (n)));
/*************** Clava msgError **************
Loop contains Invalid Statement -> exit#874
****************************************/
for(i = 0; i < n; i++) {
/*************** Clava msgError **************
Loop contains Invalid Statement -> exit#874
****************************************/
for(nza = 0; nza < arow[i]; nza++) {
j = acol[i][nza];
scale = size * aelt[i][nza];
/*************** Clava msgError **************
Loop contains Invalid Statement -> exit#874
****************************************/
for(nzrow = 0; nzrow < arow[i]; nzrow++) {
jcol = acol[i][nzrow];
va = aelt[i][nzrow] * scale;
//--------------------------------------------------------------------
// ... add the identity * rcond to the generated matrix to bound
// the smallest eigenvalue from below by rcond
//--------------------------------------------------------------------
if(jcol == j && j == i) {
va = va + rcond - shift;
}
cont40 = 0;
/*************** Clava msgError **************
Loop contains Invalid Statement -> BreakStmt#852
****************************************/
for(k = rowstr[j]; k < rowstr[j + 1]; k++) {
if(colidx[k] > jcol) {
//----------------------------------------------------------------
// ... insert colidx here orderly
//----------------------------------------------------------------
/*************** Clava msgError **************
unsolved dependency for arrayAccess colidx use : RW
unsolved dependency for arrayAccess a use : RW
****************************************/
for(kk = rowstr[j + 1] - 2; kk >= k; kk--) {
if(colidx[kk] > -1) {
a[kk + 1] = a[kk];
colidx[kk + 1] = colidx[kk];
}
}
colidx[k] = jcol;
a[k] = 0.0;
cont40 = 1;
break;
}
else if(colidx[k] == -1) {
colidx[k] = jcol;
cont40 = 1;
break;
}
else if(colidx[k] == jcol) {
//--------------------------------------------------------------
// ... mark the duplicated entry
//--------------------------------------------------------------
nzloc[j] = nzloc[j] + 1;
cont40 = 1;
break;
}
}
if(cont40 == 0) {
printf("internal error in sparse: i=%d\n", i);
exit(1);
}
a[k] = a[k] + va;
}
}
size = size * ratio;
}
//---------------------------------------------------------------------
// ... remove empty entries and generate final results
//---------------------------------------------------------------------
/*************** Clava msgError **************
unsolved dependency for arrayAccess nzloc use : RW
****************************************/
for(j = 1; j < nrows; j++) {
nzloc[j] = nzloc[j] + nzloc[j - 1];
}
/*************** Clava msgError **************
unsolved dependency for arrayAccess a use : RW
unsolved dependency for arrayAccess colidx use : RW
****************************************/
for(j = 0; j < nrows; j++) {
if(j > 0) {
j1 = rowstr[j] - nzloc[j - 1];
}
else {
j1 = 0;
}
j2 = rowstr[j + 1] - nzloc[j];
nza = rowstr[j];
/*************** Clava msgError **************
Variable nza could not be categorized into any OpenMP Variable Scopeuse : RW
****************************************/
for(k = j1; k < j2; k++) {
a[k] = a[nza];
colidx[k] = colidx[nza];
nza = nza + 1;
}
}
#pragma omp parallel for default(shared) private(j) firstprivate(nrows, nzloc)
for(j = 1; j < nrows + 1; j++) {
rowstr[j] = rowstr[j] - nzloc[j - 1];
}
nza = rowstr[nrows] - 1;
}
//---------------------------------------------------------------------
// generate a sparse n-vector (v, iv)
// having nzv nonzeros
//
// mark(i) is set to 1 if position i is nonzero.
// mark is all zero on entry and is reset to all zero before exit
// this corrects a performance bug found by John G. Lewis, caused by
// reinitialization of mark on every one of the n calls to sprnvc
//---------------------------------------------------------------------
void sprnvc(int n, int nz, int nn1, double v[], int iv[]) {
int nzv, ii, i;
double vecelt, vecloc;
nzv = 0;
while(nzv < nz) {
vecelt = randlc(&tran, amult);
//---------------------------------------------------------------------
// generate an integer between 1 and n in a portable manner
//---------------------------------------------------------------------
vecloc = randlc(&tran, amult);
i = icnvrt(vecloc, nn1) + 1;
if(i > n) continue;
//---------------------------------------------------------------------
// was this integer generated already?
//---------------------------------------------------------------------
int was_gen = 0;
/*************** Clava msgError **************
Loop contains Invalid Statement -> BreakStmt#948
****************************************/
for(ii = 0; ii < nzv; ii++) {
if(iv[ii] == i) {
was_gen = 1;
break;
}
}
if(was_gen) continue;
v[nzv] = vecelt;
iv[nzv] = i;
nzv = nzv + 1;
}
}
//---------------------------------------------------------------------
// scale a double precision number x in (0,1) by a power of 2 and chop it
//---------------------------------------------------------------------
int icnvrt(double x, int ipwr2) {
return (int) (ipwr2 * x);
}
//---------------------------------------------------------------------
// set ith element of sparse vector (v, iv) with
// nzv nonzeros to val
//---------------------------------------------------------------------
void vecset(int n, double v[], int iv[], int *nzv, int i, double val) {
int k;
int set;
set = 0;
/*************** Clava msgError **************
Variable Access set is changed inside of ifstmt
****************************************/
for(k = 0; k < *nzv; k++) {
if(iv[k] == i) {
v[k] = val;
set = 1;
}
}
if(set == 0) {
v[*nzv] = val;
iv[*nzv] = i;
*nzv = *nzv + 1;
}
}
double randlc(double *x, double a) {
//--------------------------------------------------------------------
//
// This routine returns a uniform pseudorandom double precision number in the
// range (0, 1) by using the linear congruential generator
//
// x_{k+1} = a x_k (mod 2^46)
//
// where 0 < x_k < 2^46 and 0 < a < 2^46. This scheme generates 2^44 numbers
// before repeating. The argument A is the same as 'a' in the above formula,
// and X is the same as x_0. A and X must be odd double precision integers
// in the range (1, 2^46). The returned value RANDLC is normalized to be
// between 0 and 1, i.e. RANDLC = 2^(-46) * x_1. X is updated to contain
// the new seed x_1, so that subsequent calls to RANDLC using the same
// arguments will generate a continuous sequence.
//
// This routine should produce the same results on any computer with at least
// 48 mantissa bits in double precision floating point data. On 64 bit
// systems, double precision should be disabled.
//
// David H. Bailey October 26, 1990
//
//--------------------------------------------------------------------
// r23 = pow(0.5, 23.0);
//// pow(0.5, 23.0) = 1.1920928955078125e-07
// r46 = r23 * r23;
// t23 = pow(2.0, 23.0);
//// pow(2.0, 23.0) = 8.388608e+06
// t46 = t23 * t23;
double const r23 = 1.1920928955078125e-07;
double const r46 = r23 * r23;
double const t23 = 8.388608e+06;
double const t46 = t23 * t23;
double t1, t2, t3, t4, a1, a2, x1, x2, z;
double r;
//--------------------------------------------------------------------
// Break A into two parts such that A = 2^23 * A1 + A2.
//--------------------------------------------------------------------
t1 = r23 * a;
a1 = (int) t1;
a2 = a - t23 * a1;
//--------------------------------------------------------------------
// Break X into two parts such that X = 2^23 * X1 + X2, compute
// Z = A1 * X2 + A2 * X1 (mod 2^23), and then
// X = 2^23 * Z + A2 * X2 (mod 2^46).
//--------------------------------------------------------------------
t1 = r23 * (*x);
x1 = (int) t1;
x2 = *x - t23 * x1;
t1 = a1 * x2 + a2 * x1;
t2 = (int) (r23 * t1);
z = t1 - t23 * t2;
t3 = t23 * z + a2 * x2;
t4 = (int) (r46 * t3);
*x = t3 - t46 * t4;
r = r46 * (*x);
return r;
}
void vranlc(int n, double *x, double a, double y[]) {
//--------------------------------------------------------------------
//
// This routine generates N uniform pseudorandom double precision numbers in
// the range (0, 1) by using the linear congruential generator
//
// x_{k+1} = a x_k (mod 2^46)
//
// where 0 < x_k < 2^46 and 0 < a < 2^46. This scheme generates 2^44 numbers
// before repeating. The argument A is the same as 'a' in the above formula,
// and X is the same as x_0. A and X must be odd double precision integers
// in the range (1, 2^46). The N results are placed in Y and are normalized
// to be between 0 and 1. X is updated to contain the new seed, so that
// subsequent calls to VRANLC using the same arguments will generate a
// continuous sequence. If N is zero, only initialization is performed, and
// the variables X, A and Y are ignored.
//
// This routine is the standard version designed for scalar or RISC systems.
// However, it should produce the same results on any single processor
// computer with at least 48 mantissa bits in double precision floating point
// data. On 64 bit systems, double precision should be disabled.
//
//--------------------------------------------------------------------
// r23 = pow(0.5, 23.0);
//// pow(0.5, 23.0) = 1.1920928955078125e-07
// r46 = r23 * r23;
// t23 = pow(2.0, 23.0);
//// pow(2.0, 23.0) = 8.388608e+06
// t46 = t23 * t23;
double const r23 = 1.1920928955078125e-07;
double const r46 = r23 * r23;
double const t23 = 8.388608e+06;
double const t46 = t23 * t23;
double t1, t2, t3, t4, a1, a2, x1, x2, z;
int i;
//--------------------------------------------------------------------
// Break A into two parts such that A = 2^23 * A1 + A2.
//--------------------------------------------------------------------
t1 = r23 * a;
a1 = (int) t1;
a2 = a - t23 * a1;
//--------------------------------------------------------------------
// Generate N results. This loop is not vectorizable.
//--------------------------------------------------------------------
/*************** Clava msgError **************
Variable x could not be categorized into any OpenMP Variable Scopeuse : RWR
****************************************/
for(i = 0; i < n; i++) {
//--------------------------------------------------------------------
// Break X into two parts such that X = 2^23 * X1 + X2, compute
// Z = A1 * X2 + A2 * X1 (mod 2^23), and then
// X = 2^23 * Z + A2 * X2 (mod 2^46).
//--------------------------------------------------------------------
t1 = r23 * (*x);
x1 = (int) t1;
x2 = *x - t23 * x1;
t1 = a1 * x2 + a2 * x1;
t2 = (int) (r23 * t1);
z = t1 - t23 * t2;
t3 = t23 * z + a2 * x2;
t4 = (int) (r46 * t3);
*x = t3 - t46 * t4;
y[i] = r46 * (*x);
}
return;
}
void wtime(double *t) {
static int sec = -1;
struct timeval tv;
gettimeofday(&tv, (void *) 0);
if(sec < 0) sec = tv.tv_sec;
*t = (tv.tv_sec - sec) + 1.0e-6 * tv.tv_usec;
}
/*****************************************************************/
/****** E L A P S E D _ T I M E ******/
/*****************************************************************/
double elapsed_time() {
double t;
wtime(&t);
return (t);
}
/*****************************************************************/
/****** T I M E R _ C L E A R ******/
/*****************************************************************/
void timer_clear(int n) {
elapsed[n] = 0.0;
}
/*****************************************************************/
/****** T I M E R _ S T A R T ******/
/*****************************************************************/
void timer_start(int n) {
start[n] = elapsed_time();
}
/*****************************************************************/
/****** T I M E R _ S T O P ******/
/*****************************************************************/
void timer_stop(int n) {
double t, now;
now = elapsed_time();
t = now - start[n];
elapsed[n] += t;
}
/*****************************************************************/
/****** T I M E R _ R E A D ******/
/*****************************************************************/
double timer_read(int n) {
return (elapsed[n]);
}
void print_results(char *name, char class, int n1, int n2, int n3, int niter, double t, double mops, char *optype, int verified) {
char size[16];
int j;
printf("\n\n %s Benchmark Completed.\n", name);
printf(" Class = %12c\n", class);
// If this is not a grid-based problem (EP, FT, CG), then
// we only print n1, which contains some measure of the
// problem size. In that case, n2 and n3 are both zero.
// Otherwise, we print the grid size n1xn2xn3
if((n2 == 0) && (n3 == 0)) {
if((name[0] == 'E') && (name[1] == 'P')) {
sprintf(size, "%15.0lf", pow(2.0, n1));
j = 14;
if(size[j] == '.') {
size[j] = ' ';
j--;
}
size[j + 1] = '\0';
printf(" Size = %15s\n", size);
}
else {
printf(" Size = %12d\n", n1);
}
}
else {
printf(" Size = %4dx%4dx%4d\n", n1, n2, n3);
}
printf(" Iterations = %12d\n", niter);
printf(" Time in seconds = %12.2lf\n", t);
printf(" Mop/s total = %15.2lf\n", mops);
printf(" Operation type = %24s\n", optype);
if(verified) printf(" Verification = %12s\n", "SUCCESSFUL");
else printf(" Verification = %12s\n", "UNSUCCESSFUL");
}
|
convolutiondepthwise_3x3_pack4_bf16s.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void convdw3x3s1_pack4_bf16s_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int outw = top_blob.w;
int outh = top_blob.h;
const int group = bottom_blob.c;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int g=0; g<group; g++)
{
Mat out = top_blob.channel(g);
float32x4_t _bias0 = bias ? vld1q_f32((const float*)bias + g * 4) : vdupq_n_f32(0.f);
const unsigned short* k0 = kernel.row<const unsigned short>(g);
unsigned short* outptr0 = out.row<unsigned short>(0);
unsigned short* outptr1 = out.row<unsigned short>(1);
const Mat img0 = bottom_blob.channel(g);
const unsigned short* r0 = img0.row<const unsigned short>(0);
const unsigned short* r1 = img0.row<const unsigned short>(1);
const unsigned short* r2 = img0.row<const unsigned short>(2);
const unsigned short* r3 = img0.row<const unsigned short>(3);
float32x4_t _k00 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0), 16));
float32x4_t _k01 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0+4), 16));
float32x4_t _k02 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0+8), 16));
float32x4_t _k10 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0+12), 16));
float32x4_t _k11 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0+16), 16));
float32x4_t _k12 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0+20), 16));
float32x4_t _k20 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0+24), 16));
float32x4_t _k21 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0+28), 16));
float32x4_t _k22 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0+32), 16));
int i = 0;
#if __aarch64__
for (; i+1 < outh; i+=2)
{
int j = 0;
for (; j+3 < outw; j+=4)
{
asm volatile(
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v10.4h, v11.4h, v12.4h, v13.4h}, [%3], #32 \n"// r10 r11 r12 r13
"mov v16.16b, %21.16b \n"// sum00
"mov v17.16b, %21.16b \n"// sum01
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v28.4h, v29.4h}, [%3] \n"// r14 r15
"shll v10.4s, v10.4h, #16 \n"
"shll v11.4s, v11.4h, #16 \n"
"mov v18.16b, %21.16b \n"// sum02
"mov v19.16b, %21.16b \n"// sum03
"shll v12.4s, v12.4h, #16 \n"
"shll v13.4s, v13.4h, #16 \n"
"mov v20.16b, %21.16b \n"// sum10
"fmla v16.4s, %15.4s, v10.4s \n"
"fmla v17.4s, %15.4s, v11.4s \n"
"mov v21.16b, %21.16b \n"// sum11
"fmla v18.4s, %15.4s, v12.4s \n"
"fmla v19.4s, %15.4s, v13.4s \n"
"mov v22.16b, %21.16b \n"// sum12
"fmla v20.4s, %12.4s, v10.4s \n"
"fmla v21.4s, %12.4s, v11.4s \n"
"mov v23.16b, %21.16b \n"// sum13
"fmla v22.4s, %12.4s, v12.4s \n"
"fmla v23.4s, %12.4s, v13.4s \n"
"shll v28.4s, v28.4h, #16 \n"
"fmla v16.4s, %16.4s, v11.4s \n"
"fmla v17.4s, %16.4s, v12.4s \n"
"shll v29.4s, v29.4h, #16 \n"
"fmla v18.4s, %16.4s, v13.4s \n"
"fmla v19.4s, %16.4s, v28.4s \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%4], #32 \n"// r20 r21 r22 r23
"fmla v20.4s, %13.4s, v11.4s \n"
"fmla v21.4s, %13.4s, v12.4s \n"
"fmla v22.4s, %13.4s, v13.4s \n"
"fmla v23.4s, %13.4s, v28.4s \n"
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v14.4h, v15.4h}, [%4] \n"// r24 r25
"fmla v16.4s, %17.4s, v12.4s \n"
"fmla v17.4s, %17.4s, v13.4s \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v18.4s, %17.4s, v28.4s \n"
"fmla v19.4s, %17.4s, v29.4s \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v20.4s, %14.4s, v12.4s \n"
"fmla v21.4s, %14.4s, v13.4s \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v10.4h, v11.4h, v12.4h, v13.4h}, [%2], #32 \n"// r00 r01 r02 r03
"fmla v22.4s, %14.4s, v28.4s \n"
"fmla v23.4s, %14.4s, v29.4s \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v16.4s, %18.4s, v24.4s \n"
"fmla v17.4s, %18.4s, v25.4s \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v18.4s, %18.4s, v26.4s \n"
"fmla v19.4s, %18.4s, v27.4s \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%5], #32 \n"// r30 r31 r32 r33
"fmla v20.4s, %15.4s, v24.4s \n"
"fmla v21.4s, %15.4s, v25.4s \n"
"shll v14.4s, v14.4h, #16 \n"
"fmla v22.4s, %15.4s, v26.4s \n"
"fmla v23.4s, %15.4s, v27.4s \n"
"shll v15.4s, v15.4h, #16 \n"
"fmla v16.4s, %19.4s, v25.4s \n"
"fmla v17.4s, %19.4s, v26.4s \n"
"fmla v18.4s, %19.4s, v27.4s \n"
"fmla v19.4s, %19.4s, v14.4s \n"
"fmla v20.4s, %16.4s, v25.4s \n"
"fmla v21.4s, %16.4s, v26.4s \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v24.4h, v25.4h}, [%2] \n"// r04 r05
"fmla v22.4s, %16.4s, v27.4s \n"
"fmla v23.4s, %16.4s, v14.4s \n"
"shll v10.4s, v10.4h, #16 \n"
"shll v11.4s, v11.4h, #16 \n"
"fmla v16.4s, %20.4s, v26.4s \n"
"fmla v17.4s, %20.4s, v27.4s \n"
"shll v12.4s, v12.4h, #16 \n"
"fmla v18.4s, %20.4s, v14.4s \n"
"fmla v19.4s, %20.4s, v15.4s \n"
"shll v13.4s, v13.4h, #16 \n"
"fmla v20.4s, %17.4s, v26.4s \n"
"fmla v21.4s, %17.4s, v27.4s \n"
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v26.4h, v27.4h}, [%5] \n"// r34 r35
"fmla v22.4s, %17.4s, v14.4s \n"
"fmla v23.4s, %17.4s, v15.4s \n"
"shll v28.4s, v28.4h, #16 \n"
"fmla v16.4s, %12.4s, v10.4s \n"
"fmla v17.4s, %12.4s, v11.4s \n"
"shll v29.4s, v29.4h, #16 \n"
"fmla v18.4s, %12.4s, v12.4s \n"
"fmla v19.4s, %12.4s, v13.4s \n"
"shll v30.4s, v30.4h, #16 \n"
"fmla v20.4s, %18.4s, v28.4s \n"
"fmla v21.4s, %18.4s, v29.4s \n"
"shll v31.4s, v31.4h, #16 \n"
"fmla v22.4s, %18.4s, v30.4s \n"
"fmla v23.4s, %18.4s, v31.4s \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v16.4s, %13.4s, v11.4s \n"
"fmla v17.4s, %13.4s, v12.4s \n"
"fmla v18.4s, %13.4s, v13.4s \n"
"fmla v19.4s, %13.4s, v24.4s \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v20.4s, %19.4s, v29.4s \n"
"fmla v21.4s, %19.4s, v30.4s \n"
"fmla v22.4s, %19.4s, v31.4s \n"
"fmla v23.4s, %19.4s, v26.4s \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v16.4s, %14.4s, v12.4s \n"
"fmla v17.4s, %14.4s, v13.4s \n"
"fmla v18.4s, %14.4s, v24.4s \n"
"fmla v19.4s, %14.4s, v25.4s \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v20.4s, %20.4s, v30.4s \n"
"fmla v21.4s, %20.4s, v31.4s \n"
"fmla v22.4s, %20.4s, v26.4s \n"
"fmla v23.4s, %20.4s, v27.4s \n"
"shrn v16.4h, v16.4s, #16 \n"
"shrn v17.4h, v17.4s, #16 \n"
"shrn v18.4h, v18.4s, #16 \n"
"shrn v19.4h, v19.4s, #16 \n"
"shrn v20.4h, v20.4s, #16 \n"
"shrn v21.4h, v21.4s, #16 \n"
"shrn v22.4h, v22.4s, #16 \n"
"shrn v23.4h, v23.4s, #16 \n"
"st1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%0], #32 \n"
"st1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%1], #32 \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3) // %5
: "0"(outptr0),
"1"(outptr1),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"w"(_k00), // %12
"w"(_k01), // %13
"w"(_k02), // %14
"w"(_k10), // %15
"w"(_k11), // %16
"w"(_k12), // %17
"w"(_k20), // %18
"w"(_k21), // %19
"w"(_k22), // %20
"w"(_bias0) // %21
: "memory", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"
);
}
for (; j+1 < outw; j+=2)
{
asm volatile(
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v10.4h, v11.4h, v12.4h, v13.4h}, [%3] \n"// r10 r11 r12 r13
"mov v16.16b, %21.16b \n"// sum00
"mov v17.16b, %21.16b \n"// sum01
"shll v10.4s, v10.4h, #16 \n"
"shll v11.4s, v11.4h, #16 \n"
"mov v18.16b, %21.16b \n"// sum10
"mov v19.16b, %21.16b \n"// sum11
"fmla v16.4s, %15.4s, v10.4s \n"
"fmla v17.4s, %15.4s, v11.4s \n"
"shll v12.4s, v12.4h, #16 \n"
"fmla v18.4s, %12.4s, v10.4s \n"
"fmla v19.4s, %12.4s, v11.4s \n"
"shll v13.4s, v13.4h, #16 \n"
"fmla v16.4s, %16.4s, v11.4s \n"
"fmla v17.4s, %16.4s, v12.4s \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%4] \n"// r20 r21 r22 r23
"fmla v18.4s, %13.4s, v11.4s \n"
"fmla v19.4s, %13.4s, v12.4s \n"
"shll v20.4s, v20.4h, #16 \n"
"fmla v16.4s, %17.4s, v12.4s \n"
"fmla v17.4s, %17.4s, v13.4s \n"
"shll v21.4s, v21.4h, #16 \n"
"fmla v18.4s, %14.4s, v12.4s \n"
"fmla v19.4s, %14.4s, v13.4s \n"
"shll v22.4s, v22.4h, #16 \n"
"fmla v16.4s, %18.4s, v20.4s \n"
"fmla v17.4s, %18.4s, v21.4s \n"
"shll v23.4s, v23.4h, #16 \n"
"fmla v18.4s, %15.4s, v20.4s \n"
"fmla v19.4s, %15.4s, v21.4s \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v10.4h, v11.4h, v12.4h, v13.4h}, [%2] \n"// r00 r01 r02 r03
"fmla v16.4s, %19.4s, v21.4s \n"
"fmla v17.4s, %19.4s, v22.4s \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%5] \n"// r30 r31 r32 r33
"fmla v18.4s, %16.4s, v21.4s \n"
"fmla v19.4s, %16.4s, v22.4s \n"
"shll v10.4s, v10.4h, #16 \n"
"fmla v16.4s, %20.4s, v22.4s \n"
"fmla v17.4s, %20.4s, v23.4s \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v18.4s, %17.4s, v22.4s \n"
"fmla v19.4s, %17.4s, v23.4s \n"
"shll v11.4s, v11.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v16.4s, %12.4s, v10.4s \n"
"fmla v17.4s, %12.4s, v11.4s \n"
"shll v12.4s, v12.4h, #16 \n"
"fmla v18.4s, %18.4s, v24.4s \n"
"fmla v19.4s, %18.4s, v25.4s \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v16.4s, %13.4s, v11.4s \n"
"fmla v17.4s, %13.4s, v12.4s \n"
"shll v13.4s, v13.4h, #16 \n"
"fmla v18.4s, %19.4s, v25.4s \n"
"fmla v19.4s, %19.4s, v26.4s \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v16.4s, %14.4s, v12.4s \n"
"fmla v17.4s, %14.4s, v13.4s \n"
"add %3, %3, #16 \n"
"fmla v18.4s, %20.4s, v26.4s \n"
"fmla v19.4s, %20.4s, v27.4s \n"
"add %4, %4, #16 \n"
"shrn v16.4h, v16.4s, #16 \n"
"shrn v17.4h, v17.4s, #16 \n"
"add %2, %2, #16 \n"
"shrn v18.4h, v18.4s, #16 \n"
"shrn v19.4h, v19.4s, #16 \n"
"add %5, %5, #16 \n"
"st1 {v16.4h, v17.4h}, [%0], #16 \n"
"st1 {v18.4h, v19.4h}, [%1], #16 \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3) // %5
: "0"(outptr0),
"1"(outptr1),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"w"(_k00), // %12
"w"(_k01), // %13
"w"(_k02), // %14
"w"(_k10), // %15
"w"(_k11), // %16
"w"(_k12), // %17
"w"(_k20), // %18
"w"(_k21), // %19
"w"(_k22), // %20
"w"(_bias0) // %21
: "memory", "v10", "v11", "v12", "v13", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27"
);
}
for (; j < outw; j++)
{
asm volatile(
"prfm pldl1keep, [%3, #192] \n"
"ld1 {v10.4h, v11.4h, v12.4h}, [%3] \n"// r10 r11 r12
"mov v18.16b, %21.16b \n"// sum0
"mov v19.16b, %21.16b \n"// sum1
"shll v10.4s, v10.4h, #16 \n"
"shll v11.4s, v11.4h, #16 \n"
"fmul v16.4s, %15.4s, v10.4s \n"
"fmul v17.4s, %12.4s, v10.4s \n"
"shll v12.4s, v12.4h, #16 \n"
"fmla v18.4s, %16.4s, v11.4s \n"
"fmla v19.4s, %13.4s, v11.4s \n"
"prfm pldl1keep, [%4, #192] \n"
"ld1 {v20.4h, v21.4h, v22.4h}, [%4] \n"// r20 r21 r22
"fmla v16.4s, %17.4s, v12.4s \n"
"fmla v17.4s, %14.4s, v12.4s \n"
"shll v20.4s, v20.4h, #16 \n"
"shll v21.4s, v21.4h, #16 \n"
"fmla v18.4s, %18.4s, v20.4s \n"
"fmla v19.4s, %15.4s, v20.4s \n"
"prfm pldl1keep, [%2, #192] \n"
"ld1 {v10.4h, v11.4h, v12.4h}, [%2] \n"// r00 r01 r02
"shll v22.4s, v22.4h, #16 \n"
"prfm pldl1keep, [%5, #192] \n"
"ld1 {v24.4h, v25.4h, v26.4h}, [%5] \n"// r30 r31 r32
"fmla v16.4s, %19.4s, v21.4s \n"
"fmla v17.4s, %16.4s, v21.4s \n"
"shll v10.4s, v10.4h, #16 \n"
"shll v24.4s, v24.4h, #16 \n"
"fmla v18.4s, %20.4s, v22.4s \n"
"fmla v19.4s, %17.4s, v22.4s \n"
"shll v11.4s, v11.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v16.4s, %12.4s, v10.4s \n"
"fmla v17.4s, %18.4s, v24.4s \n"
"shll v12.4s, v12.4h, #16 \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v18.4s, %13.4s, v11.4s \n"
"fmla v19.4s, %19.4s, v25.4s \n"
"add %3, %3, #8 \n"
"fmla v16.4s, %14.4s, v12.4s \n"
"fmla v17.4s, %20.4s, v26.4s \n"
"add %4, %4, #8 \n"
"fadd v18.4s, v18.4s, v16.4s \n"
"fadd v19.4s, v19.4s, v17.4s \n"
"add %2, %2, #8 \n"
"shrn v18.4h, v18.4s, #16 \n"
"shrn v19.4h, v19.4s, #16 \n"
"add %5, %5, #8 \n"
"st1 {v18.4h}, [%0], #8 \n"
"st1 {v19.4h}, [%1], #8 \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3) // %5
: "0"(outptr0),
"1"(outptr1),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"w"(_k00), // %12
"w"(_k01), // %13
"w"(_k02), // %14
"w"(_k10), // %15
"w"(_k11), // %16
"w"(_k12), // %17
"w"(_k20), // %18
"w"(_k21), // %19
"w"(_k22), // %20
"w"(_bias0) // %21
: "memory", "v10", "v11", "v12", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v24", "v25", "v26"
);
}
r0 += 2 * 4 + w * 4;
r1 += 2 * 4 + w * 4;
r2 += 2 * 4 + w * 4;
r3 += 2 * 4 + w * 4;
outptr0 += outw * 4;
outptr1 += outw * 4;
}
#endif // __aarch64__
for (; i < outh; i++)
{
int j = 0;
for (; j+3 < outw; j+=4)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%1, #256] \n"
"ld1 {v10.4h, v11.4h, v12.4h, v13.4h}, [%1], #32 \n"// r00 r01 r02 r03
"mov v16.16b, %17.16b \n"// sum00
"mov v17.16b, %17.16b \n"// sum01
"mov v18.16b, %17.16b \n"// sum02
"mov v19.16b, %17.16b \n"// sum03
"shll v10.4s, v10.4h, #16 \n"
"shll v11.4s, v11.4h, #16 \n"
"fmla v16.4s, %8.4s, v10.4s \n"
"fmla v17.4s, %8.4s, v11.4s \n"
"shll v12.4s, v12.4h, #16 \n"
"shll v13.4s, v13.4h, #16 \n"
"fmla v18.4s, %8.4s, v12.4s \n"
"fmla v19.4s, %8.4s, v13.4s \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v14.4h, v15.4h}, [%1] \n"// r04 r05
"fmla v16.4s, %9.4s, v11.4s \n"
"fmla v17.4s, %9.4s, v12.4s \n"
"shll v14.4s, v14.4h, #16 \n"
"fmla v18.4s, %9.4s, v13.4s \n"
"fmla v19.4s, %9.4s, v14.4s \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%2], #32 \n"// r10 r11 r12 r13
"fmla v16.4s, %10.4s, v12.4s \n"
"fmla v17.4s, %10.4s, v13.4s \n"
"shll v15.4s, v15.4h, #16 \n"
"fmla v18.4s, %10.4s, v14.4s \n"
"fmla v19.4s, %10.4s, v15.4s \n"
"shll v20.4s, v20.4h, #16 \n"
"shll v21.4s, v21.4h, #16 \n"
"fmla v16.4s, %11.4s, v20.4s \n"
"fmla v17.4s, %11.4s, v21.4s \n"
"shll v22.4s, v22.4h, #16 \n"
"shll v23.4s, v23.4h, #16 \n"
"fmla v18.4s, %11.4s, v22.4s \n"
"fmla v19.4s, %11.4s, v23.4s \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v14.4h, v15.4h}, [%2] \n"// r14 r15
"fmla v16.4s, %12.4s, v21.4s \n"
"fmla v17.4s, %12.4s, v22.4s \n"
"shll v14.4s, v14.4h, #16 \n"
"fmla v18.4s, %12.4s, v23.4s \n"
"fmla v19.4s, %12.4s, v14.4s \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v10.4h, v11.4h, v12.4h, v13.4h}, [%3], #32 \n"// r20 r21 r22 r23
"fmla v16.4s, %13.4s, v22.4s \n"
"fmla v17.4s, %13.4s, v23.4s \n"
"shll v15.4s, v15.4h, #16 \n"
"fmla v18.4s, %13.4s, v14.4s \n"
"fmla v19.4s, %13.4s, v15.4s \n"
"shll v10.4s, v10.4h, #16 \n"
"shll v11.4s, v11.4h, #16 \n"
"fmla v16.4s, %14.4s, v10.4s \n"
"fmla v17.4s, %14.4s, v11.4s \n"
"shll v12.4s, v12.4h, #16 \n"
"shll v13.4s, v13.4h, #16 \n"
"fmla v18.4s, %14.4s, v12.4s \n"
"fmla v19.4s, %14.4s, v13.4s \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v14.4h, v15.4h}, [%3] \n"// r24 r25
"fmla v16.4s, %15.4s, v11.4s \n"
"fmla v17.4s, %15.4s, v12.4s \n"
"shll v14.4s, v14.4h, #16 \n"
"fmla v18.4s, %15.4s, v13.4s \n"
"fmla v19.4s, %15.4s, v14.4s \n"
"fmla v16.4s, %16.4s, v12.4s \n"
"fmla v17.4s, %16.4s, v13.4s \n"
"shll v15.4s, v15.4h, #16 \n"
"fmla v18.4s, %16.4s, v14.4s \n"
"fmla v19.4s, %16.4s, v15.4s \n"
"shrn v16.4h, v16.4s, #16 \n"
"shrn v17.4h, v17.4s, #16 \n"
"shrn v18.4h, v18.4s, #16 \n"
"shrn v19.4h, v19.4s, #16 \n"
"st1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%0], #32 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00), // %8
"w"(_k01), // %9
"w"(_k02), // %10
"w"(_k10), // %11
"w"(_k11), // %12
"w"(_k12), // %13
"w"(_k20), // %14
"w"(_k21), // %15
"w"(_k22), // %16
"w"(_bias0) // %17
: "memory", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"
);
#else
asm volatile(
"pld [%1, #128] \n"
"vld1.u16 {d30-d31}, [%1 :64]! \n"// r00 r01
"vmov q10, %q17 \n"// sum00
"vmov q11, %q17 \n"// sum01
"vshll.u16 q14, d30, #16 \n"
"vshll.u16 q15, d31, #16 \n"
"vmla.f32 q10, %q8, q14 \n"
"vmla.f32 q11, %q8, q15 \n"
"vmla.f32 q10, %q9, q15 \n"
"pld [%1, #128] \n"
"vld1.u16 {d30-d31}, [%1 :64]! \n"// r02 r03
"vmov q12, %q17 \n"// sum02
"vmov q13, %q17 \n"// sum03
"vshll.u16 q14, d30, #16 \n"
"vshll.u16 q15, d31, #16 \n"
"vmla.f32 q12, %q8, q14 \n"
"vmla.f32 q11, %q9, q14 \n"
"vmla.f32 q13, %q8, q15 \n"
"vmla.f32 q10, %q10, q14 \n"
"vmla.f32 q12, %q9, q15 \n"
"vmla.f32 q11, %q10, q15 \n"
// "pld [%1, #128] \n"
"vld1.u16 {d30-d31}, [%1 :64] \n"// r04 r05
"vshll.u16 q14, d30, #16 \n"
"vshll.u16 q15, d31, #16 \n"
"vmla.f32 q13, %q9, q14 \n"
"vmla.f32 q12, %q10, q14 \n"
"vmla.f32 q13, %q10, q15 \n"
"pld [%2, #128] \n"
"vld1.u16 {d30-d31}, [%2 :64]! \n"// r10 r11
"vshll.u16 q14, d30, #16 \n"
"vshll.u16 q15, d31, #16 \n"
"vmla.f32 q10, %q11, q14 \n"
"vmla.f32 q11, %q11, q15 \n"
"vmla.f32 q10, %q12, q15 \n"
"pld [%2, #128] \n"
"vld1.u16 {d30-d31}, [%2 :64]! \n"// r12 r13
"vshll.u16 q14, d30, #16 \n"
"vshll.u16 q15, d31, #16 \n"
"vmla.f32 q12, %q11, q14 \n"
"vmla.f32 q11, %q12, q14 \n"
"vmla.f32 q13, %q11, q15 \n"
"vmla.f32 q10, %q13, q14 \n"
"vmla.f32 q12, %q12, q15 \n"
"vmla.f32 q11, %q13, q15 \n"
// "pld [%2, #128] \n"
"vld1.u16 {d30-d31}, [%2 :64] \n"// r14 r15
"vshll.u16 q14, d30, #16 \n"
"vshll.u16 q15, d31, #16 \n"
"vmla.f32 q13, %q12, q14 \n"
"vmla.f32 q12, %q13, q14 \n"
"vmla.f32 q13, %q13, q15 \n"
"pld [%3, #128] \n"
"vld1.u16 {d30-d31}, [%3 :64]! \n"// r20 r21
"vshll.u16 q14, d30, #16 \n"
"vshll.u16 q15, d31, #16 \n"
"vmla.f32 q10, %q14, q14 \n"
"vmla.f32 q11, %q14, q15 \n"
"vmla.f32 q10, %q15, q15 \n"
"pld [%3, #128] \n"
"vld1.u16 {d30-d31}, [%3 :64]! \n"// r22 r23
"vshll.u16 q14, d30, #16 \n"
"vshll.u16 q15, d31, #16 \n"
"vmla.f32 q12, %q14, q14 \n"
"vmla.f32 q11, %q15, q14 \n"
"vmla.f32 q13, %q14, q15 \n"
"vmla.f32 q10, %q16, q14 \n"
"vmla.f32 q12, %q15, q15 \n"
"vmla.f32 q11, %q16, q15 \n"
// "pld [%3, #128] \n"
"vld1.u16 {d30-d31}, [%3 :64] \n"// r24 r25
"vshll.u16 q14, d30, #16 \n"
"vshll.u16 q15, d31, #16 \n"
"vmla.f32 q13, %q15, q14 \n"
"vmla.f32 q12, %q16, q14 \n"
"vmla.f32 q13, %q16, q15 \n"
"vshrn.u32 d20, q10, #16 \n"
"vshrn.u32 d21, q11, #16 \n"
"vshrn.u32 d22, q12, #16 \n"
"vshrn.u32 d23, q13, #16 \n"
"vst1.u16 {d20-d23}, [%0 :64]! \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00), // %8
"w"(_k01), // %9
"w"(_k02), // %10
"w"(_k10), // %11
"w"(_k11), // %12
"w"(_k12), // %13
"w"(_k20), // %14
"w"(_k21), // %15
"w"(_k22), // %16
"w"(_bias0) // %17
: "memory", "q10", "q11", "q12", "q13", "q14", "q15"
);
#endif
}
for (; j+1 < outw; j+=2)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%1, #256] \n"
"ld1 {v12.4h, v13.4h, v14.4h, v15.4h}, [%1] \n"// r00 r01 r02 r03
"mov v18.16b, %17.16b \n"// sum00
"mov v19.16b, %17.16b \n"// sum01
"shll v12.4s, v12.4h, #16 \n"
"shll v13.4s, v13.4h, #16 \n"
"fmul v16.4s, %8.4s, v12.4s \n"
"fmul v17.4s, %8.4s, v13.4s \n"
"shll v14.4s, v14.4h, #16 \n"
"shll v15.4s, v15.4h, #16 \n"
"fmla v18.4s, %9.4s, v13.4s \n"
"fmla v19.4s, %9.4s, v14.4s \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%2] \n"// r10 r11 r12 r13
"fmla v16.4s, %10.4s, v14.4s \n"
"fmla v17.4s, %10.4s, v15.4s \n"
"shll v20.4s, v20.4h, #16 \n"
"shll v21.4s, v21.4h, #16 \n"
"fmla v18.4s, %11.4s, v20.4s \n"
"fmla v19.4s, %11.4s, v21.4s \n"
"shll v22.4s, v22.4h, #16 \n"
"shll v23.4s, v23.4h, #16 \n"
"fmla v16.4s, %12.4s, v21.4s \n"
"fmla v17.4s, %12.4s, v22.4s \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v12.4h, v13.4h, v14.4h, v15.4h}, [%3] \n"// r20 r21 r22 r23
"fmla v18.4s, %13.4s, v22.4s \n"
"fmla v19.4s, %13.4s, v23.4s \n"
"shll v12.4s, v12.4h, #16 \n"
"shll v13.4s, v13.4h, #16 \n"
"fmla v16.4s, %14.4s, v12.4s \n"
"fmla v17.4s, %14.4s, v13.4s \n"
"shll v14.4s, v14.4h, #16 \n"
"shll v15.4s, v15.4h, #16 \n"
"fmla v18.4s, %15.4s, v13.4s \n"
"fmla v19.4s, %15.4s, v14.4s \n"
"add %1, %1, #16 \n"
"fmla v16.4s, %16.4s, v14.4s \n"
"fmla v17.4s, %16.4s, v15.4s \n"
"add %2, %2, #16 \n"
"fadd v18.4s, v18.4s, v16.4s \n"
"fadd v19.4s, v19.4s, v17.4s \n"
"add %3, %3, #16 \n"
"shrn v18.4h, v18.4s, #16 \n"
"shrn v19.4h, v19.4s, #16 \n"
"st1 {v18.4h, v19.4h}, [%0], #16 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00), // %8
"w"(_k01), // %9
"w"(_k02), // %10
"w"(_k10), // %11
"w"(_k11), // %12
"w"(_k12), // %13
"w"(_k20), // %14
"w"(_k21), // %15
"w"(_k22), // %16
"w"(_bias0) // %17
: "memory", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"
);
#else
asm volatile(
"pld [%1, #256] \n"
"vld1.u16 {d28-d31}, [%1 :64] \n"// r00 r01 r02 r03
"vmov q10, %q17 \n"// sum00
"vmov q11, %q17 \n"// sum01
"vshll.u16 q12, d28, #16 \n"
"vshll.u16 q13, d29, #16 \n"
"vmla.f32 q10, %q8, q12 \n"
"vmla.f32 q11, %q8, q13 \n"
"vshll.u16 q14, d30, #16 \n"
"vmla.f32 q10, %q9, q13 \n"
"vmla.f32 q11, %q9, q14 \n"
"vshll.u16 q15, d31, #16 \n"
"vmla.f32 q10, %q10, q14 \n"
"vmla.f32 q11, %q10, q15 \n"
"pld [%2, #256] \n"
"vld1.u16 {d28-d31}, [%2 :64] \n"// r10 r11 r12 r13
"vshll.u16 q12, d28, #16 \n"
"vshll.u16 q13, d29, #16 \n"
"vmla.f32 q10, %q11, q12 \n"
"vmla.f32 q11, %q11, q13 \n"
"vshll.u16 q14, d30, #16 \n"
"vmla.f32 q10, %q12, q13 \n"
"vmla.f32 q11, %q12, q14 \n"
"vshll.u16 q15, d31, #16 \n"
"vmla.f32 q10, %q13, q14 \n"
"vmla.f32 q11, %q13, q15 \n"
"pld [%3, #256] \n"
"vld1.u16 {d28-d31}, [%3 :64] \n"// r20 r21 r22 r23
"vshll.u16 q12, d28, #16 \n"
"vshll.u16 q13, d29, #16 \n"
"vmla.f32 q10, %q14, q12 \n"
"vmla.f32 q11, %q14, q13 \n"
"vshll.u16 q14, d30, #16 \n"
"vmla.f32 q10, %q15, q13 \n"
"vmla.f32 q11, %q15, q14 \n"
"vshll.u16 q15, d31, #16 \n"
"vmla.f32 q10, %q16, q14 \n"
"vmla.f32 q11, %q16, q15 \n"
"add %1, %1, #16 \n"
"add %2, %2, #16 \n"
"vshrn.u32 d20, q10, #16 \n"
"vshrn.u32 d21, q11, #16 \n"
"add %3, %3, #16 \n"
"vst1.u16 {d20-d21}, [%0 :64]! \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00), // %8
"w"(_k01), // %9
"w"(_k02), // %10
"w"(_k10), // %11
"w"(_k11), // %12
"w"(_k12), // %13
"w"(_k20), // %14
"w"(_k21), // %15
"w"(_k22), // %16
"w"(_bias0) // %17
: "memory", "q10", "q11", "q12", "q13", "q14", "q15"
);
#endif
}
for (; j < outw; j++)
{
float32x4_t _sum0 = _bias0;
float32x4_t _r00 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(r0), 16));
float32x4_t _r01 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(r0+4), 16));
float32x4_t _r02 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(r0+8), 16));
float32x4_t _r10 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(r1), 16));
float32x4_t _r11 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(r1+4), 16));
float32x4_t _r12 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(r1+8), 16));
float32x4_t _r20 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(r2), 16));
float32x4_t _r21 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(r2+4), 16));
float32x4_t _r22 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(r2+8), 16));
_sum0 = vmlaq_f32(_sum0, _k00, _r00);
_sum0 = vmlaq_f32(_sum0, _k01, _r01);
_sum0 = vmlaq_f32(_sum0, _k02, _r02);
_sum0 = vmlaq_f32(_sum0, _k10, _r10);
_sum0 = vmlaq_f32(_sum0, _k11, _r11);
_sum0 = vmlaq_f32(_sum0, _k12, _r12);
_sum0 = vmlaq_f32(_sum0, _k20, _r20);
_sum0 = vmlaq_f32(_sum0, _k21, _r21);
_sum0 = vmlaq_f32(_sum0, _k22, _r22);
vst1_u16(outptr0, vshrn_n_u32(vreinterpretq_u32_f32(_sum0), 16));
r0 += 4;
r1 += 4;
r2 += 4;
outptr0 += 4;
}
r0 += 2*4;
r1 += 2*4;
r2 += 2*4;
}
}
}
static void convdw3x3s2_pack4_bf16s_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int outw = top_blob.w;
int outh = top_blob.h;
const int group = bottom_blob.c;
const int tailstep = (w - 2*outw + w) * 4;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int g=0; g<group; g++)
{
Mat out = top_blob.channel(g);
float32x4_t _bias0 = bias ? vld1q_f32((const float*)bias + g * 4) : vdupq_n_f32(0.f);
const unsigned short* k0 = kernel.row<const unsigned short>(g);
unsigned short* outptr0 = out;
const Mat img0 = bottom_blob.channel(g);
const unsigned short* r0 = img0.row<const unsigned short>(0);
const unsigned short* r1 = img0.row<const unsigned short>(1);
const unsigned short* r2 = img0.row<const unsigned short>(2);
float32x4_t _k00 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0), 16));
float32x4_t _k01 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0+4), 16));
float32x4_t _k02 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0+8), 16));
float32x4_t _k10 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0+12), 16));
float32x4_t _k11 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0+16), 16));
float32x4_t _k12 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0+20), 16));
float32x4_t _k20 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0+24), 16));
float32x4_t _k21 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0+28), 16));
float32x4_t _k22 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(k0+32), 16));
int i = 0;
for (; i < outh; i++)
{
int j = 0;
#if __aarch64__
for (; j+3 < outw; j+=4)
{
asm volatile(
"prfm pldl1keep, [%1, #256] \n"
"ld1 {v10.4h, v11.4h, v12.4h, v13.4h}, [%1], #32 \n"// r00 r01 r02 r03
"mov v28.16b, %17.16b \n"// sum00
"mov v29.16b, %17.16b \n"// sum01
"mov v30.16b, %17.16b \n"// sum02
"mov v31.16b, %17.16b \n"// sum03
"prfm pldl1keep, [%1, #256] \n"
"ld1 {v14.4h, v15.4h, v16.4h, v17.4h}, [%1], #32 \n"// r04 r05 r06 r07
"shll v10.4s, v10.4h, #16 \n"
"shll v11.4s, v11.4h, #16 \n"
"shll v12.4s, v12.4h, #16 \n"
"shll v13.4s, v13.4h, #16 \n"
"prfm pldl1keep, [%1, #64] \n"
"ld1 {v18.4h}, [%1] \n"// r08
"shll v14.4s, v14.4h, #16 \n"
"shll v15.4s, v15.4h, #16 \n"
"fmla v28.4s, %8.4s, v10.4s \n"
"fmla v29.4s, %8.4s, v12.4s \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v30.4s, %8.4s, v14.4s \n"
"fmla v31.4s, %8.4s, v16.4s \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v28.4s, %9.4s, v11.4s \n"
"fmla v29.4s, %9.4s, v13.4s \n"
"fmla v30.4s, %9.4s, v15.4s \n"
"fmla v31.4s, %9.4s, v17.4s \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%2], #32 \n"// r10 r11 r12 r13
"fmla v28.4s, %10.4s, v12.4s \n"
"fmla v29.4s, %10.4s, v14.4s \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v30.4s, %10.4s, v16.4s \n"
"fmla v31.4s, %10.4s, v18.4s \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%2], #32 \n"// r14 r15 r16 r17
"shll v20.4s, v20.4h, #16 \n"
"shll v21.4s, v21.4h, #16 \n"
"shll v22.4s, v22.4h, #16 \n"
"shll v23.4s, v23.4h, #16 \n"
"prfm pldl1keep, [%2, #64] \n"
"ld1 {v19.4h}, [%2] \n"// r18
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"fmla v28.4s, %11.4s, v20.4s \n"
"fmla v29.4s, %11.4s, v22.4s \n"
"shll v26.4s, v26.4h, #16 \n"
"fmla v30.4s, %11.4s, v24.4s \n"
"fmla v31.4s, %11.4s, v26.4s \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v28.4s, %12.4s, v21.4s \n"
"fmla v29.4s, %12.4s, v23.4s \n"
"fmla v30.4s, %12.4s, v25.4s \n"
"fmla v31.4s, %12.4s, v27.4s \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v10.4h, v11.4h, v12.4h, v13.4h}, [%3], #32 \n"// r20 r21 r22 r23
"fmla v28.4s, %13.4s, v22.4s \n"
"fmla v29.4s, %13.4s, v24.4s \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v30.4s, %13.4s, v26.4s \n"
"fmla v31.4s, %13.4s, v19.4s \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v14.4h, v15.4h, v16.4h, v17.4h}, [%3], #32 \n"// r24 r25 r26 r27
"shll v10.4s, v10.4h, #16 \n"
"shll v11.4s, v11.4h, #16 \n"
"shll v12.4s, v12.4h, #16 \n"
"shll v13.4s, v13.4h, #16 \n"
"prfm pldl1keep, [%3, #64] \n"
"ld1 {v18.4h}, [%3] \n"// r28
"shll v14.4s, v14.4h, #16 \n"
"shll v15.4s, v15.4h, #16 \n"
"fmla v28.4s, %14.4s, v10.4s \n"
"fmla v29.4s, %14.4s, v12.4s \n"
"shll v16.4s, v16.4h, #16 \n"
"fmla v30.4s, %14.4s, v14.4s \n"
"fmla v31.4s, %14.4s, v16.4s \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v28.4s, %15.4s, v11.4s \n"
"fmla v29.4s, %15.4s, v13.4s \n"
"fmla v30.4s, %15.4s, v15.4s \n"
"fmla v31.4s, %15.4s, v17.4s \n"
"fmla v28.4s, %16.4s, v12.4s \n"
"fmla v29.4s, %16.4s, v14.4s \n"
"shll v18.4s, v18.4h, #16 \n"
"fmla v30.4s, %16.4s, v16.4s \n"
"fmla v31.4s, %16.4s, v18.4s \n"
"shrn v28.4h, v28.4s, #16 \n"
"shrn v29.4h, v29.4s, #16 \n"
"shrn v30.4h, v30.4s, #16 \n"
"shrn v31.4h, v31.4s, #16 \n"
"st1 {v28.4h, v29.4h, v30.4h, v31.4h}, [%0], #32 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00), // %8
"w"(_k01), // %9
"w"(_k02), // %10
"w"(_k10), // %11
"w"(_k11), // %12
"w"(_k12), // %13
"w"(_k20), // %14
"w"(_k21), // %15
"w"(_k22), // %16
"w"(_bias0) // %17
: "memory", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"
);
}
#endif // __aarch64__
for (; j+1 < outw; j+=2)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%1, #256] \n"
"ld1 {v10.4h, v11.4h, v12.4h, v13.4h}, [%1], #32 \n"// r00 r01 r02 r03
"mov v22.16b, %17.16b \n"// sum00
"mov v23.16b, %17.16b \n"// sum01
"shll v10.4s, v10.4h, #16 \n"
"shll v11.4s, v11.4h, #16 \n"
"fmul v20.4s, %8.4s, v10.4s \n"
"shll v12.4s, v12.4h, #16 \n"
"shll v13.4s, v13.4h, #16 \n"
"fmul v21.4s, %8.4s, v12.4s \n"
"prfm pldl1keep, [%1, #64] \n"
"ld1 {v14.4h}, [%1] \n"// r04
"fmla v22.4s, %9.4s, v11.4s \n"
"fmla v23.4s, %9.4s, v13.4s \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%2], #32 \n"// r10 r11 r12 r13
"shll v14.4s, v14.4h, #16 \n"
"fmla v20.4s, %10.4s, v12.4s \n"
"fmla v21.4s, %10.4s, v14.4s \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"fmla v22.4s, %11.4s, v16.4s \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v23.4s, %11.4s, v18.4s \n"
"prfm pldl1keep, [%2, #64] \n"
"ld1 {v15.4h}, [%2] \n"// r14
"fmla v20.4s, %12.4s, v17.4s \n"
"fmla v21.4s, %12.4s, v19.4s \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v10.4h, v11.4h, v12.4h, v13.4h}, [%3], #32 \n"// r20 r21 r22 r23
"shll v15.4s, v15.4h, #16 \n"
"fmla v22.4s, %13.4s, v18.4s \n"
"fmla v23.4s, %13.4s, v15.4s \n"
"shll v10.4s, v10.4h, #16 \n"
"shll v11.4s, v11.4h, #16 \n"
"fmla v20.4s, %14.4s, v10.4s \n"
"shll v12.4s, v12.4h, #16 \n"
"shll v13.4s, v13.4h, #16 \n"
"fmla v21.4s, %14.4s, v12.4s \n"
"prfm pldl1keep, [%3, #64] \n"
"ld1 {v14.4h}, [%3] \n"// r24
"fmla v22.4s, %15.4s, v11.4s \n"
"fmla v23.4s, %15.4s, v13.4s \n"
"shll v14.4s, v14.4h, #16 \n"
"fmla v20.4s, %16.4s, v12.4s \n"
"fmla v21.4s, %16.4s, v14.4s \n"
"fadd v22.4s, v20.4s, v22.4s \n"
"fadd v23.4s, v21.4s, v23.4s \n"
"shrn v22.4h, v22.4s, #16 \n"
"shrn v23.4h, v23.4s, #16 \n"
"st1 {v22.4h, v23.4h}, [%0], #16 \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00), // %8
"w"(_k01), // %9
"w"(_k02), // %10
"w"(_k10), // %11
"w"(_k11), // %12
"w"(_k12), // %13
"w"(_k20), // %14
"w"(_k21), // %15
"w"(_k22), // %16
"w"(_bias0) // %17
: "memory", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"
);
#else
asm volatile(
"pld [%1, #256] \n"
"vld1.u16 {d28-d31}, [%1 :64]! \n"// r00 r01 r02 r03
"vmov q10, %q17 \n"// sum00
"vmov q11, %q17 \n"// sum01
"vshll.u16 q12, d28, #16 \n"
"vshll.u16 q13, d29, #16 \n"
"vmla.f32 q10, %q8, q12 \n"
"vshll.u16 q14, d30, #16 \n"
"vshll.u16 q15, d31, #16 \n"
"vmla.f32 q11, %q8, q14 \n"
"vld1.u16 {d25}, [%1] \n"// r04
"vmla.f32 q10, %q9, q13 \n"
"vmla.f32 q11, %q9, q15 \n"
"vshll.u16 q12, d25, #16 \n"
"vmla.f32 q10, %q10, q14 \n"
"pld [%2, #256] \n"
"vld1.u16 {d28-d31}, [%2 :64]! \n"// r10 r11 r12 r13
"vmla.f32 q11, %q10, q12 \n"
"vshll.u16 q12, d28, #16 \n"
"vshll.u16 q13, d29, #16 \n"
"vmla.f32 q10, %q11, q12 \n"
"vshll.u16 q14, d30, #16 \n"
"vshll.u16 q15, d31, #16 \n"
"vmla.f32 q11, %q11, q14 \n"
"vld1.u16 {d25}, [%2] \n"// r14
"vmla.f32 q10, %q12, q13 \n"
"vmla.f32 q11, %q12, q15 \n"
"vshll.u16 q12, d25, #16 \n"
"vmla.f32 q10, %q13, q14 \n"
"pld [%3, #256] \n"
"vld1.u16 {d28-d31}, [%3 :64]! \n"// r20 r21 r22 r23
"vmla.f32 q11, %q13, q12 \n"
"vshll.u16 q12, d28, #16 \n"
"vshll.u16 q13, d29, #16 \n"
"vmla.f32 q10, %q14, q12 \n"
"vshll.u16 q14, d30, #16 \n"
"vshll.u16 q15, d31, #16 \n"
"vmla.f32 q11, %q14, q14 \n"
"vld1.u16 {d25}, [%3] \n"// r24
"vmla.f32 q10, %q15, q13 \n"
"vmla.f32 q11, %q15, q15 \n"
"vshll.u16 q12, d25, #16 \n"
"vmla.f32 q10, %q16, q14 \n"
"vmla.f32 q11, %q16, q12 \n"
"vshrn.u32 d20, q10, #16 \n"
"vshrn.u32 d21, q11, #16 \n"
"vst1.u16 {d20-d21}, [%0 :64]! \n"
: "=r"(outptr0), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2) // %3
: "0"(outptr0),
"1"(r0),
"2"(r1),
"3"(r2),
"w"(_k00), // %8
"w"(_k01), // %9
"w"(_k02), // %10
"w"(_k10), // %11
"w"(_k11), // %12
"w"(_k12), // %13
"w"(_k20), // %14
"w"(_k21), // %15
"w"(_k22), // %16
"w"(_bias0) // %17
: "memory", "q10", "q11", "q12", "q13", "q14", "q15"
);
#endif
}
for (; j < outw; j++)
{
float32x4_t _sum0 = _bias0;
float32x4_t _r00 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(r0), 16));
float32x4_t _r01 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(r0+4), 16));
float32x4_t _r02 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(r0+8), 16));
float32x4_t _r10 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(r1), 16));
float32x4_t _r11 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(r1+4), 16));
float32x4_t _r12 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(r1+8), 16));
float32x4_t _r20 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(r2), 16));
float32x4_t _r21 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(r2+4), 16));
float32x4_t _r22 = vreinterpretq_f32_u32(vshll_n_u16(vld1_u16(r2+8), 16));
_sum0 = vmlaq_f32(_sum0, _k00, _r00);
_sum0 = vmlaq_f32(_sum0, _k01, _r01);
_sum0 = vmlaq_f32(_sum0, _k02, _r02);
_sum0 = vmlaq_f32(_sum0, _k10, _r10);
_sum0 = vmlaq_f32(_sum0, _k11, _r11);
_sum0 = vmlaq_f32(_sum0, _k12, _r12);
_sum0 = vmlaq_f32(_sum0, _k20, _r20);
_sum0 = vmlaq_f32(_sum0, _k21, _r21);
_sum0 = vmlaq_f32(_sum0, _k22, _r22);
vst1_u16(outptr0, vshrn_n_u32(vreinterpretq_u32_f32(_sum0), 16));
r0 += 2*4;
r1 += 2*4;
r2 += 2*4;
outptr0 += 4;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
}
}
|
3d7pt.c | /*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 4;
tile_size[1] = 4;
tile_size[2] = 16;
tile_size[3] = 1024;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k])
+ beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] +
A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]);
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
irbuilder_for_unsigned_down.c | // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs
// RUN: %clang_cc1 -fopenmp-enable-irbuilder -verify -fopenmp -fopenmp-version=45 -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -o - | FileCheck %s
// expected-no-diagnostics
#ifndef HEADER
#define HEADER
// CHECK-LABEL: define {{.*}}@workshareloop_unsigned_down(
// CHECK-NEXT: [[ENTRY:.*]]:
// CHECK-NEXT: %[[A_ADDR:.+]] = alloca float*, align 8
// CHECK-NEXT: %[[I:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[AGG_CAPTURED:.+]] = alloca %struct.anon, align 8
// CHECK-NEXT: %[[AGG_CAPTURED1:.+]] = alloca %struct.anon.0, align 4
// CHECK-NEXT: %[[DOTCOUNT_ADDR:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[P_LASTITER:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[P_LOWERBOUND:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[P_UPPERBOUND:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[P_STRIDE:.+]] = alloca i32, align 4
// CHECK-NEXT: store float* %[[A:.+]], float** %[[A_ADDR]], align 8
// CHECK-NEXT: store i32 32000000, i32* %[[I]], align 4
// CHECK-NEXT: %[[TMP0:.+]] = getelementptr inbounds %struct.anon, %struct.anon* %[[AGG_CAPTURED]], i32 0, i32 0
// CHECK-NEXT: store i32* %[[I]], i32** %[[TMP0]], align 8
// CHECK-NEXT: %[[TMP1:.+]] = getelementptr inbounds %struct.anon.0, %struct.anon.0* %[[AGG_CAPTURED1]], i32 0, i32 0
// CHECK-NEXT: %[[TMP2:.+]] = load i32, i32* %[[I]], align 4
// CHECK-NEXT: store i32 %[[TMP2]], i32* %[[TMP1]], align 4
// CHECK-NEXT: call void @__captured_stmt(i32* %[[DOTCOUNT_ADDR]], %struct.anon* %[[AGG_CAPTURED]])
// CHECK-NEXT: %[[DOTCOUNT:.+]] = load i32, i32* %[[DOTCOUNT_ADDR]], align 4
// CHECK-NEXT: br label %[[OMP_LOOP_PREHEADER:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_LOOP_PREHEADER]]:
// CHECK-NEXT: store i32 0, i32* %[[P_LOWERBOUND]], align 4
// CHECK-NEXT: %[[TMP3:.+]] = sub i32 %[[DOTCOUNT]], 1
// CHECK-NEXT: store i32 %[[TMP3]], i32* %[[P_UPPERBOUND]], align 4
// CHECK-NEXT: store i32 1, i32* %[[P_STRIDE]], align 4
// CHECK-NEXT: %[[OMP_GLOBAL_THREAD_NUM:.+]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @1)
// CHECK-NEXT: call void @__kmpc_for_static_init_4u(%struct.ident_t* @1, i32 %[[OMP_GLOBAL_THREAD_NUM]], i32 34, i32* %[[P_LASTITER]], i32* %[[P_LOWERBOUND]], i32* %[[P_UPPERBOUND]], i32* %[[P_STRIDE]], i32 1, i32 0)
// CHECK-NEXT: %[[TMP4:.+]] = load i32, i32* %[[P_LOWERBOUND]], align 4
// CHECK-NEXT: %[[TMP5:.+]] = load i32, i32* %[[P_UPPERBOUND]], align 4
// CHECK-NEXT: %[[TMP6:.+]] = sub i32 %[[TMP5]], %[[TMP4]]
// CHECK-NEXT: %[[TMP7:.+]] = add i32 %[[TMP6]], 1
// CHECK-NEXT: br label %[[OMP_LOOP_HEADER:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_LOOP_HEADER]]:
// CHECK-NEXT: %[[OMP_LOOP_IV:.+]] = phi i32 [ 0, %[[OMP_LOOP_PREHEADER]] ], [ %[[OMP_LOOP_NEXT:.+]], %[[OMP_LOOP_INC:.+]] ]
// CHECK-NEXT: br label %[[OMP_LOOP_COND:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_LOOP_COND]]:
// CHECK-NEXT: %[[OMP_LOOP_CMP:.+]] = icmp ult i32 %[[OMP_LOOP_IV]], %[[TMP7]]
// CHECK-NEXT: br i1 %[[OMP_LOOP_CMP]], label %[[OMP_LOOP_BODY:.+]], label %[[OMP_LOOP_EXIT:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_LOOP_BODY]]:
// CHECK-NEXT: %[[TMP8:.+]] = add i32 %[[OMP_LOOP_IV]], %[[TMP4]]
// CHECK-NEXT: call void @__captured_stmt.1(i32* %[[I]], i32 %[[TMP8]], %struct.anon.0* %[[AGG_CAPTURED1]])
// CHECK-NEXT: %[[TMP9:.+]] = load i32, i32* %[[I]], align 4
// CHECK-NEXT: %[[CONV:.+]] = uitofp i32 %[[TMP9]] to float
// CHECK-NEXT: %[[TMP10:.+]] = load float*, float** %[[A_ADDR]], align 8
// CHECK-NEXT: %[[TMP11:.+]] = load i32, i32* %[[I]], align 4
// CHECK-NEXT: %[[IDXPROM:.+]] = zext i32 %[[TMP11]] to i64
// CHECK-NEXT: %[[ARRAYIDX:.+]] = getelementptr inbounds float, float* %[[TMP10]], i64 %[[IDXPROM]]
// CHECK-NEXT: store float %[[CONV]], float* %[[ARRAYIDX]], align 4
// CHECK-NEXT: br label %[[OMP_LOOP_INC]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_LOOP_INC]]:
// CHECK-NEXT: %[[OMP_LOOP_NEXT]] = add nuw i32 %[[OMP_LOOP_IV]], 1
// CHECK-NEXT: br label %[[OMP_LOOP_HEADER]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_LOOP_EXIT]]:
// CHECK-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @1, i32 %[[OMP_GLOBAL_THREAD_NUM]])
// CHECK-NEXT: %[[OMP_GLOBAL_THREAD_NUM2:.+]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @1)
// CHECK-NEXT: call void @__kmpc_barrier(%struct.ident_t* @2, i32 %[[OMP_GLOBAL_THREAD_NUM2]])
// CHECK-NEXT: br label %[[OMP_LOOP_AFTER:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_LOOP_AFTER]]:
// CHECK-NEXT: ret void
// CHECK-NEXT: }
extern "C" void workshareloop_unsigned_down(float *a) {
#pragma omp for
for (unsigned i = 32000000; i > 33; i -= 7) {
a[i] = i;
}
}
#endif // HEADER
//
//
//
//
//
// CHECK-LABEL: define {{.*}}@__captured_stmt(
// CHECK-NEXT: [[ENTRY:.*]]:
// CHECK-NEXT: %[[DISTANCE_ADDR:.+]] = alloca i32*, align 8
// CHECK-NEXT: %[[__CONTEXT_ADDR:.+]] = alloca %struct.anon*, align 8
// CHECK-NEXT: %[[DOTSTART:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[DOTSTOP:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[DOTSTEP:.+]] = alloca i32, align 4
// CHECK-NEXT: store i32* %[[DISTANCE:.+]], i32** %[[DISTANCE_ADDR]], align 8
// CHECK-NEXT: store %struct.anon* %[[__CONTEXT:.+]], %struct.anon** %[[__CONTEXT_ADDR]], align 8
// CHECK-NEXT: %[[TMP0:.+]] = load %struct.anon*, %struct.anon** %[[__CONTEXT_ADDR]], align 8
// CHECK-NEXT: %[[TMP1:.+]] = getelementptr inbounds %struct.anon, %struct.anon* %[[TMP0]], i32 0, i32 0
// CHECK-NEXT: %[[TMP2:.+]] = load i32*, i32** %[[TMP1]], align 8
// CHECK-NEXT: %[[TMP3:.+]] = load i32, i32* %[[TMP2]], align 4
// CHECK-NEXT: store i32 %[[TMP3]], i32* %[[DOTSTART]], align 4
// CHECK-NEXT: store i32 33, i32* %[[DOTSTOP]], align 4
// CHECK-NEXT: store i32 -7, i32* %[[DOTSTEP]], align 4
// CHECK-NEXT: %[[TMP4:.+]] = load i32, i32* %[[DOTSTART]], align 4
// CHECK-NEXT: %[[TMP5:.+]] = load i32, i32* %[[DOTSTOP]], align 4
// CHECK-NEXT: %[[CMP:.+]] = icmp ugt i32 %[[TMP4]], %[[TMP5]]
// CHECK-NEXT: br i1 %[[CMP]], label %[[COND_TRUE:.+]], label %[[COND_FALSE:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[COND_TRUE]]:
// CHECK-NEXT: %[[TMP6:.+]] = load i32, i32* %[[DOTSTART]], align 4
// CHECK-NEXT: %[[TMP7:.+]] = load i32, i32* %[[DOTSTOP]], align 4
// CHECK-NEXT: %[[SUB:.+]] = sub i32 %[[TMP6]], %[[TMP7]]
// CHECK-NEXT: %[[TMP8:.+]] = load i32, i32* %[[DOTSTEP]], align 4
// CHECK-NEXT: %[[SUB1:.+]] = sub nsw i32 0, %[[TMP8]]
// CHECK-NEXT: %[[SUB2:.+]] = sub i32 %[[SUB1]], 1
// CHECK-NEXT: %[[ADD:.+]] = add i32 %[[SUB]], %[[SUB2]]
// CHECK-NEXT: %[[TMP9:.+]] = load i32, i32* %[[DOTSTEP]], align 4
// CHECK-NEXT: %[[SUB3:.+]] = sub nsw i32 0, %[[TMP9]]
// CHECK-NEXT: %[[DIV:.+]] = udiv i32 %[[ADD]], %[[SUB3]]
// CHECK-NEXT: br label %[[COND_END:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[COND_FALSE]]:
// CHECK-NEXT: br label %[[COND_END]]
// CHECK-EMPTY:
// CHECK-NEXT: [[COND_END]]:
// CHECK-NEXT: %[[COND:.+]] = phi i32 [ %[[DIV]], %[[COND_TRUE]] ], [ 0, %[[COND_FALSE]] ]
// CHECK-NEXT: %[[TMP10:.+]] = load i32*, i32** %[[DISTANCE_ADDR]], align 8
// CHECK-NEXT: store i32 %[[COND]], i32* %[[TMP10]], align 4
// CHECK-NEXT: ret void
// CHECK-NEXT: }
// CHECK-LABEL: define {{.*}}@__captured_stmt.1(
// CHECK-NEXT: [[ENTRY:.*]]:
// CHECK-NEXT: %[[LOOPVAR_ADDR:.+]] = alloca i32*, align 8
// CHECK-NEXT: %[[LOGICAL_ADDR:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[__CONTEXT_ADDR:.+]] = alloca %struct.anon.0*, align 8
// CHECK-NEXT: store i32* %[[LOOPVAR:.+]], i32** %[[LOOPVAR_ADDR]], align 8
// CHECK-NEXT: store i32 %[[LOGICAL:.+]], i32* %[[LOGICAL_ADDR]], align 4
// CHECK-NEXT: store %struct.anon.0* %[[__CONTEXT:.+]], %struct.anon.0** %[[__CONTEXT_ADDR]], align 8
// CHECK-NEXT: %[[TMP0:.+]] = load %struct.anon.0*, %struct.anon.0** %[[__CONTEXT_ADDR]], align 8
// CHECK-NEXT: %[[TMP1:.+]] = getelementptr inbounds %struct.anon.0, %struct.anon.0* %[[TMP0]], i32 0, i32 0
// CHECK-NEXT: %[[TMP2:.+]] = load i32, i32* %[[TMP1]], align 4
// CHECK-NEXT: %[[TMP3:.+]] = load i32, i32* %[[LOGICAL_ADDR]], align 4
// CHECK-NEXT: %[[MUL:.+]] = mul i32 -7, %[[TMP3]]
// CHECK-NEXT: %[[ADD:.+]] = add i32 %[[TMP2]], %[[MUL]]
// CHECK-NEXT: %[[TMP4:.+]] = load i32*, i32** %[[LOOPVAR_ADDR]], align 8
// CHECK-NEXT: store i32 %[[ADD]], i32* %[[TMP4]], align 4
// CHECK-NEXT: ret void
// CHECK-NEXT: }
// CHECK: ![[META0:[0-9]+]] = !{i32 1, !"wchar_size", i32 4}
// CHECK: ![[META1:[0-9]+]] = !{i32 7, !"openmp", i32 45}
// CHECK: ![[META2:[0-9]+]] =
|
solver.c | #include <float.h>
#include <stdlib.h>
#include <stdio.h>
#include <stddef.h>
#include <math.h>
#include <time.h>
#include <assert.h>
#include <omp.h>
#include <mpi.h>
#include "priority_queue.h"
#include "generics.h"
#include "comforts.h"
#include "list.h"
#define SYN_TAG 11
#define SYN_ACK_TAG 22
#define DATA_TAG 33
#define INIT_LOAD_TAG 44
#define BOUND_TAG 55
#define TERM_INIT_TAG 66
#define TERM_ROUND_2 77
#define TERM_KILL_TAG 88
#define QUEUE_DATA_TAG 99
#define TERMINATION_CONDITION 4
#define NUM_THREADS 3
#define MIN_NULL_FLAG_LOAD_BAL 5
#define MAX_QLEN_LOAD_BAL 300
#define LOAD_BAL_THRESH 10000
int working[NUM_THREADS];
int null_flag;
omp_lock_t work_lock_turnstile;
omp_lock_t working_lock;
queue* shared_queue;
int err = 0;
int my_rank;
int world_size;
int X_LIM;
int Y_LIM;
int end_program = 0;
int in_loadbal = 0;
int i_stopped_chain;
int bothzero_loadbal = 0;
void* problem_data;
enum CommNum {
CENTRE,
LEFT,
RIGHT,
UP,
DOWN,
DUMMY
};
typedef struct bound_comm {
float new_bound;
int start_rank;
} bound_comm;
int ABSVAL(int a, int b) {
if(a > b)
return (a - b);
else
return (b - a);
}
void send_bound();
void receive_and_forward_bound();
void queue_balancing(int neighbor_rank, int my_queue_len, int neighbour_queue_len, int recp);
void expand_partial_solution(queue* private_queue, void* domain_data);
MPI_Comm* create_topology();
void setupCommunicators();
void load_balancing();
void loadbal_recipient(enum CommNum direc, int recv_queue_len);
void loadbal_initiator(enum CommNum direc);
int initializeLoad(void* domain_data);
int calculate_next_rank();
int calculate_prev_rank();
void termination_init();
int termination_detection(int);
void pack_array(queue_head* qh, void* outbuff, int buff_size, int* pos, void* problem_data);
void unpack_array(queue* q, void* outbuff, int buff_size, int len, int* pos, void* problem_data);
MPI_Datatype bound_comm_t;
omp_lock_t best_solution_lock;
int best_score_changed = 0;
float best_score = FLT_MAX;
list* best_solution;
MPI_Comm torus;
int neighbors_rank[5];
int torus_neighbors_rank[5];
int stop_flag = 0;
int main(int argc, char** argv) {
int thread_support_required = MPI_THREAD_MULTIPLE;
int thread_support_supplied;
MPI_Init_thread(&argc, &argv, thread_support_required, &thread_support_supplied);
// printf("THREAD SUPPORT: %d\n", thread_support_supplied);
setupCommunicators();
MPI_Comm_rank(torus, &my_rank);
srand(time(NULL));
const int dt_num = 2;
MPI_Datatype dt_type[2] = {MPI_FLOAT, MPI_INT};
int dt_blocklen[2] = {1, 1};
MPI_Aint offset[2];
offset[0] = offsetof(bound_comm, new_bound);
offset[1] = offsetof(bound_comm, start_rank);
MPI_Type_create_struct(dt_num, dt_blocklen, offset, dt_type, &bound_comm_t);
MPI_Type_commit(&bound_comm_t);
omp_init_lock(&best_solution_lock);
omp_init_lock(&work_lock_turnstile);
omp_init_lock(&working_lock);
best_solution = create_list();
problem_data = populate_domain_data(argc, argv);
shared_queue = create_queue();
initializeLoad(problem_data);
null_flag = 0;
#pragma omp parallel num_threads(NUM_THREADS)
{
int thread_rank = omp_get_thread_num();
if (thread_rank != NUM_THREADS-1) {
queue* private_queue = create_queue();
working[thread_rank] = 0;
while (!end_program) {
omp_set_lock(&work_lock_turnstile);
omp_unset_lock(&work_lock_turnstile);
omp_set_lock(&working_lock);
working[thread_rank] |= 1;
omp_unset_lock(&working_lock);
expand_partial_solution(private_queue, problem_data);
omp_set_lock(&working_lock);
working[thread_rank] &= 0;
omp_unset_lock(&working_lock);
}
} else {
int bound_send_flag, flag, left_ack_flag, up_ack_flag ;
int syn_receive, syn_ack_receive, syn_share=1;
MPI_Request syn_request_down, syn_request_right, syn_ack_up, syn_ack_left;
MPI_Request syn_receive_left, syn_receive_up, syn_ack_right, syn_ack_down, syn_qrecv_left, syn_qrecv_up;
MPI_Request termination_request;
int recv_queue_len;
int termination_token;
int syn_ack_send = 1;
int load_bal_chosen = 1;
if (neighbors_rank[LEFT] != -1) {
MPI_Irecv(&syn_receive, 1, MPI_INT, neighbors_rank[LEFT], SYN_TAG, torus,
&syn_receive_left);
left_ack_flag = 0;
}
if (neighbors_rank[UP] != -1) {
MPI_Irecv(&syn_receive, 1, MPI_INT, neighbors_rank[UP], SYN_TAG, torus,
&syn_receive_up);
up_ack_flag = 0;
}
if (my_rank != 0) {
MPI_Irecv(&termination_token, 1, MPI_INT, calculate_prev_rank(), TERM_INIT_TAG, torus,
&termination_request);
}
while (!end_program) {
bound_send_flag = 0;
omp_set_lock(&best_solution_lock);
if (best_score_changed > 0) {
bound_send_flag = 1;
}
omp_unset_lock(&best_solution_lock);
if (bound_send_flag)
send_bound();
receive_and_forward_bound();
if (neighbors_rank[LEFT] != -1) {
if(left_ack_flag){
MPI_Test(&syn_qrecv_left, &flag, MPI_STATUS_IGNORE);
if (flag) {
loadbal_recipient(LEFT, recv_queue_len);
MPI_Irecv(&syn_receive, 1, MPI_INT, neighbors_rank[LEFT], SYN_TAG,
torus, &syn_receive_left);
left_ack_flag = 0;
}
} else {
MPI_Test(&syn_receive_left, &flag, MPI_STATUS_IGNORE);
if (flag) {
MPI_Isend(&syn_ack_send, 1, MPI_INT, neighbors_rank[LEFT],
SYN_ACK_TAG, torus, &syn_ack_left);
MPI_Irecv(&recv_queue_len, 1, MPI_INT, neighbors_rank[LEFT], DATA_TAG,
torus, &syn_qrecv_left);
left_ack_flag = 1;
}
}
}
if (neighbors_rank[UP] != -1) {
if(up_ack_flag) {
MPI_Test(&syn_qrecv_up, &flag, MPI_STATUS_IGNORE);
if (flag) {
loadbal_recipient(UP, recv_queue_len);
MPI_Irecv(&syn_receive, 1, MPI_INT, neighbors_rank[UP], SYN_TAG,
torus, &syn_receive_up);
up_ack_flag = 0;
}
} else {
MPI_Test(&syn_receive_up, &flag, MPI_STATUS_IGNORE);
if (flag) {
MPI_Isend(&syn_ack_send, 1, MPI_INT, neighbors_rank[UP],
SYN_ACK_TAG, torus, &syn_ack_up);
MPI_Irecv(&recv_queue_len, 1, MPI_INT, neighbors_rank[UP], DATA_TAG,
torus, &syn_qrecv_up);
up_ack_flag = 1;
}
}
}
if (in_loadbal) {
if (neighbors_rank[RIGHT] != -1 && load_bal_chosen==0) {
MPI_Test(&syn_ack_right, &flag, MPI_STATUS_IGNORE);
if (flag) {
loadbal_initiator(RIGHT);
in_loadbal--;
}
}
if (neighbors_rank[DOWN] != -1 && load_bal_chosen==1) {
MPI_Test(&syn_ack_down, &flag, MPI_STATUS_IGNORE);
if (flag) {
loadbal_initiator(DOWN);
in_loadbal--;
}
}
}
if (my_rank==0 && bothzero_loadbal >= TERMINATION_CONDITION && !in_loadbal) {
termination_init();
}
if (my_rank != 0) {
MPI_Test(&termination_request, &flag, MPI_STATUS_IGNORE);
if (flag) {
if (termination_detection(termination_token)) {
//TODO: cleanup and exit.
end_program = 1;
break;
} else {
MPI_Irecv(&termination_token, 1, MPI_INT, calculate_prev_rank(), TERM_INIT_TAG, torus,
&termination_request);
}
}
}
if (!in_loadbal && null_flag >= MIN_NULL_FLAG_LOAD_BAL) {
//do load_balancing
load_bal_chosen += 1;
load_bal_chosen = load_bal_chosen%2;
if (neighbors_rank[RIGHT] != -1 && load_bal_chosen==0) {
in_loadbal++;
MPI_Isend(&syn_share, 1, MPI_INT, neighbors_rank[RIGHT], SYN_TAG,
torus, &syn_request_right);
MPI_Irecv(&syn_ack_receive, 1, MPI_INT,
neighbors_rank[RIGHT],SYN_ACK_TAG,torus, &syn_ack_right);
}
if (neighbors_rank[DOWN] != -1 && load_bal_chosen==1) {
in_loadbal++;
MPI_Isend(&syn_share, 1, MPI_INT, neighbors_rank[DOWN], SYN_TAG,
torus, &syn_request_down);
MPI_Irecv(&syn_ack_receive, 1, MPI_INT,
neighbors_rank[DOWN],SYN_ACK_TAG,torus, &syn_ack_down);
}
#pragma omp atomic
null_flag ^= null_flag;
} else if (!in_loadbal && pq_length(shared_queue) > MAX_QLEN_LOAD_BAL) {
//do load_balancing
load_bal_chosen += 1;
load_bal_chosen = load_bal_chosen%2;
if (neighbors_rank[RIGHT] != -1 && load_bal_chosen==0) {
in_loadbal++;
MPI_Isend(&syn_share, 1, MPI_INT, neighbors_rank[RIGHT], SYN_TAG,
torus, &syn_request_right);
MPI_Irecv(&syn_ack_receive, 1, MPI_INT,
neighbors_rank[RIGHT], SYN_ACK_TAG,torus, &syn_ack_right);
}
if (neighbors_rank[DOWN] != -1 && load_bal_chosen==1) {
in_loadbal++;
MPI_Isend(&syn_share, 1, MPI_INT, neighbors_rank[DOWN], SYN_TAG,
torus, &syn_request_down);
MPI_Irecv(&syn_ack_receive, 1, MPI_INT,
neighbors_rank[DOWN], SYN_ACK_TAG,torus, &syn_ack_down);
}
#pragma omp atomic
null_flag ^= null_flag;
}
}
}
}
float *scores_from_all;
float min_score;
if (my_rank == 0) {
scores_from_all = (float*) calloc(world_size, sizeof(float));
MPI_Gather(&best_score, 1, MPI_FLOAT, scores_from_all, 1, MPI_FLOAT, 0, torus);
min_score = best_score;
for (int i = 0; i < world_size; i++) {
if (scores_from_all[i] < min_score) {
min_score = scores_from_all[i];
}
}
MPI_Bcast(&min_score, 1, MPI_FLOAT, 0, torus);
} else {
MPI_Gather(&best_score, 1, MPI_FLOAT, NULL, 1, MPI_FLOAT, 0, torus);
MPI_Bcast(&min_score, 1, MPI_FLOAT, 0, torus);
}
if (best_score == min_score) {
list_print(best_solution, best_score);
}
MPI_Finalize();
return 0;
}
void loadbal_recipient(enum CommNum direc, int recv_queue_len) {
int send_queue_len;
omp_set_lock(&work_lock_turnstile);
int all_done = 0;
while (!all_done) {
omp_set_lock(&working_lock);
for (int i = 0; i < NUM_THREADS-1; i++) {
all_done |= working[i];
if (all_done)
break;
}
omp_unset_lock(&working_lock);
if (all_done) {
all_done = 0;
} else {
break;
}
}
send_queue_len = pq_length(shared_queue);
queue_balancing(neighbors_rank[direc], send_queue_len, recv_queue_len, 1);
omp_unset_lock(&work_lock_turnstile);
}
void loadbal_initiator(enum CommNum direc) {
int send_queue_len;
omp_set_lock(&work_lock_turnstile);
int all_done = 0;
while (!all_done) {
omp_set_lock(&working_lock);
for (int i = 0; i < NUM_THREADS-1; i++) {
all_done |= working[i];
if (all_done)
break;
}
omp_unset_lock(&working_lock);
if (all_done) {
all_done = 0;
} else {
break;
}
}
send_queue_len = pq_length(shared_queue);
MPI_Send(&send_queue_len, 1, MPI_INT, neighbors_rank[direc], DATA_TAG, torus);
queue_balancing(neighbors_rank[direc], send_queue_len, 0, 0);
omp_unset_lock(&work_lock_turnstile);
}
void setupCommunicators() {
MPI_Comm_size(MPI_COMM_WORLD, &world_size);
X_LIM = 1;
Y_LIM = world_size;
for (int i = ((int) sqrt(world_size)); i > 1; i--) {
if (world_size % i == 0) {
X_LIM = i;
Y_LIM = world_size/X_LIM;
break;
}
}
int rank;
torus = *(create_topology());
MPI_Comm_rank(torus, &rank);
int coords[2];
REPORT_ERROR(MPI_Cart_coords(torus, rank, 2, coords), "MPI_Cart_coords: ERROR\n", );
int coords_ptr[5][2];
coords_ptr[CENTRE][0] = coords[0];
coords_ptr[CENTRE][1] = coords[1];
coords_ptr[LEFT][0] = coords[0];
coords_ptr[LEFT][1] = (coords[1] - 1);
coords_ptr[RIGHT][0] = coords[0];
coords_ptr[RIGHT][1] = (coords[1] + 1);
coords_ptr[UP][0] = (coords[0] - 1);
coords_ptr[UP][1] = coords[1];
coords_ptr[DOWN][0] = (coords[0] + 1);
coords_ptr[DOWN][1] = coords[1];
for (int i = 0; i < 5; i++) {
if ((coords_ptr[i][0] >= 0 && coords_ptr[i][0] < X_LIM) &&
(coords_ptr[i][1] >= 0 && coords_ptr[i][1] < Y_LIM)) {
MPI_Cart_rank(torus, coords_ptr[i], &neighbors_rank[i]);
} else {
neighbors_rank[i] = -1;
}
MPI_Cart_rank(torus, coords_ptr[i], &torus_neighbors_rank[i]);
}
}
MPI_Comm* create_topology() {
MPI_Comm *mesh = (MPI_Comm *) malloc(sizeof(MPI_Comm));
int ndims = 2;
int dim_sz[2];
int periodic[2];
dim_sz[0] = X_LIM;
dim_sz[1] = Y_LIM;
periodic[0] = 1;
periodic[1] = 1;
int reorder = 1;
REPORT_ERROR(MPI_Cart_create(MPI_COMM_WORLD, ndims, dim_sz, periodic, reorder, mesh),
"MPI_Cart_create: ERROR\n",
NULL);
return mesh;
}
void expand_partial_solution(queue* private_queue, void* domain_data) {
float score;
solution_vector partial_solution = pq_min_extract(shared_queue, &score);
if (partial_solution != NULL) {
if (construct_candidates(partial_solution, score, private_queue, domain_data)) {
int flag = 0;
omp_set_lock(&best_solution_lock);
if (score < best_score) {
best_score = score;
list_clear(best_solution);
list_insert(best_solution, partial_solution);
best_score_changed++;
flag = 1;
} else if (score == best_score) {
list_insert(best_solution, partial_solution);
}
omp_unset_lock(&best_solution_lock);
if (flag) {
pq_prune(shared_queue, score);
}
} else {
pq_prune(private_queue, best_score);
pq_merge(shared_queue, private_queue);
}
} else {
#pragma omp atomic
null_flag += 1;
}
}
void queue_balancing(int n_rank, int my_queue_len, int neighbour_queue_len, int recp) {
int pos = 0;
int ros_flag;
if(recp){
int flag_for_comm;
if ( ABSVAL(my_queue_len, neighbour_queue_len) > LOAD_BAL_THRESH ) {
if (my_queue_len > neighbour_queue_len) {
flag_for_comm = 1;
ros_flag = 0;
} else {
flag_for_comm = 0;
ros_flag = 1;
}
} else {
flag_for_comm = -1;
ros_flag = -1;
}
MPI_Send(&flag_for_comm, 1, MPI_INT, n_rank, DATA_TAG, torus);
} else {
MPI_Recv(&ros_flag, 1, MPI_INT, n_rank, DATA_TAG, torus,
MPI_STATUS_IGNORE);
}
if (ros_flag == -1) {
if (my_rank == 0)
bothzero_loadbal++;
return;
}
void* buff;
if (!ros_flag) {
pos = 0;
int len;
queue_head* send_data = pq_extract_best(shared_queue, (LOAD_BAL_THRESH/2), &len);
if(len == 0 || send_data == NULL){
int dum = -1;
buff = malloc(sizeof(int));
int sz = sizeof(int);
MPI_Pack(&dum, 1, MPI_INT, buff, sz, &pos, torus);
} else {
buff = (void *) malloc(len*solution_vector_size);
pack_array(send_data, buff, (LOAD_BAL_THRESH/2)*solution_vector_size, &pos, problem_data);
}
MPI_Send(buff, (len*solution_vector_size), MPI_PACKED, n_rank, QUEUE_DATA_TAG, torus);
} else {
pos = 0;
MPI_Status probe_stat;
int recv_sz;
MPI_Probe(n_rank, QUEUE_DATA_TAG,torus, &probe_stat);
MPI_Get_count(&probe_stat, MPI_BYTE, &recv_sz);
buff = malloc(recv_sz);
MPI_Recv(buff, recv_sz, MPI_PACKED, n_rank, QUEUE_DATA_TAG, torus, MPI_STATUS_IGNORE);
if( recv_sz < solution_vector_size ) {
void *dummy_buff = malloc(recv_sz);
MPI_Unpack(buff, recv_sz, &pos, dummy_buff, recv_sz, MPI_BYTE, torus);
} else {
queue* new_queue = create_queue();
assert( recv_sz%solution_vector_size == 0 );
int unpack_len = recv_sz/solution_vector_size;
unpack_array(new_queue, buff, recv_sz, unpack_len, &pos, problem_data);
float bscore;
omp_set_lock(&best_solution_lock);
bscore = best_score;
omp_unset_lock(&best_solution_lock);
pq_prune(new_queue, bscore);
pq_merge(shared_queue, new_queue);
destroy_queue(new_queue);
}
}
free(buff);
}
void send_bound() {
MPI_Request request;
bound_comm recv_bound;
recv_bound.start_rank = my_rank;
omp_set_lock(&best_solution_lock);
recv_bound.new_bound = best_score;
best_score_changed = 0;
omp_unset_lock(&best_solution_lock);
REPORT_ERROR(MPI_Isend(&recv_bound, 1, bound_comm_t, torus_neighbors_rank[UP],
BOUND_TAG, torus, &request), "MPI_Isend : ERROR\n", );
REPORT_ERROR(MPI_Isend(&recv_bound, 1, bound_comm_t, torus_neighbors_rank[DOWN],
BOUND_TAG, torus, &request), "MPI_Isend : ERROR\n", );
REPORT_ERROR(MPI_Isend(&recv_bound, 1, bound_comm_t, torus_neighbors_rank[LEFT],
BOUND_TAG, torus, &request), "MPI_Isend : ERROR\n", );
REPORT_ERROR(MPI_Isend(&recv_bound, 1, bound_comm_t, torus_neighbors_rank[RIGHT],
BOUND_TAG, torus, &request), "MPI_Isend : ERROR\n", );
}
void receive_and_forward_bound() {
int recv_flag = 0;
int flag = 0;
MPI_Status status;
MPI_Request request, request2;
bound_comm recv_bound;
REPORT_ERROR(MPI_Iprobe(torus_neighbors_rank[LEFT], BOUND_TAG, torus, &recv_flag,
&status), "MPI_Iprobe : ERROR\n", );
if (recv_flag) {
REPORT_ERROR(MPI_Recv(&recv_bound, 1, bound_comm_t, torus_neighbors_rank[LEFT],
BOUND_TAG, torus, MPI_STATUS_IGNORE),
"MPI_Recv : ERROR\n", );
flag = 0;
omp_set_lock(&best_solution_lock);
if (recv_bound.new_bound < best_score) {
best_score = recv_bound.new_bound;
list_clear(best_solution);
flag = 1;
}
omp_unset_lock(&best_solution_lock);
if (flag == 1) {
pq_prune(shared_queue, best_score);
REPORT_ERROR(MPI_Isend(&recv_bound, 1, bound_comm_t, torus_neighbors_rank[RIGHT],
BOUND_TAG, torus, &request),
"MPI_Isend : ERROR\n", );
}
}
recv_flag = 0;
REPORT_ERROR(MPI_Iprobe(torus_neighbors_rank[RIGHT], BOUND_TAG, torus, &recv_flag,
&status), "MPI_Iprobe : ERROR\n", );
if (recv_flag) {
REPORT_ERROR(MPI_Recv(&recv_bound, 1, bound_comm_t, torus_neighbors_rank[RIGHT],
BOUND_TAG, torus, MPI_STATUS_IGNORE),
"MPI_Recv : ERROR\n", );
flag = 0;
omp_set_lock(&best_solution_lock);
if (recv_bound.new_bound < best_score) {
best_score = recv_bound.new_bound;
list_clear(best_solution);
flag = 1;
}
omp_unset_lock(&best_solution_lock);
if (flag == 1) {
pq_prune(shared_queue, best_score);
REPORT_ERROR(MPI_Isend(&recv_bound, 1, bound_comm_t, torus_neighbors_rank[LEFT],
BOUND_TAG, torus, &request),
"MPI_Isend : ERROR\n", );
}
}
recv_flag = 0;
REPORT_ERROR(MPI_Iprobe(torus_neighbors_rank[DOWN], BOUND_TAG, torus, &recv_flag,
&status), "MPI_Iprobe : ERROR\n", );
if (recv_flag) {
REPORT_ERROR(MPI_Recv(&recv_bound, 1, bound_comm_t, torus_neighbors_rank[DOWN],
BOUND_TAG, torus, MPI_STATUS_IGNORE),
"MPI_Recv : ERROR\n", );
flag = 0;
omp_set_lock(&best_solution_lock);
if (recv_bound.new_bound < best_score) {
best_score = recv_bound.new_bound;
list_clear(best_solution);
flag = 1;
}
omp_unset_lock(&best_solution_lock);
if (flag == 1) {
pq_prune(shared_queue, best_score);
REPORT_ERROR(MPI_Isend(&recv_bound, 1, bound_comm_t, torus_neighbors_rank[UP],
BOUND_TAG, torus, &request2),
"MPI_Isend : ERROR\n", );
REPORT_ERROR(MPI_Isend(&recv_bound, 1, bound_comm_t, torus_neighbors_rank[LEFT],
BOUND_TAG, torus, &request),
"MPI_Isend : ERROR\n", );
REPORT_ERROR(MPI_Isend(&recv_bound, 1, bound_comm_t, torus_neighbors_rank[RIGHT],
BOUND_TAG, torus, &request),
"MPI_Isend : ERROR\n", );
}
}
recv_flag = 0;
REPORT_ERROR(MPI_Iprobe(torus_neighbors_rank[UP], BOUND_TAG, torus, &recv_flag,
&status), "MPI_Iprobe : ERROR\n", );
if (recv_flag) {
REPORT_ERROR(MPI_Recv(&recv_bound, 1, bound_comm_t, torus_neighbors_rank[UP],
BOUND_TAG, torus, MPI_STATUS_IGNORE),
"MPI_Recv : ERROR\n", );
flag = 0;
omp_set_lock(&best_solution_lock);
if (recv_bound.new_bound < best_score) {
best_score = recv_bound.new_bound;
list_clear(best_solution);
flag = 1;
}
omp_unset_lock(&best_solution_lock);
if (flag == 1) {
pq_prune(shared_queue, best_score);
REPORT_ERROR(MPI_Isend(&recv_bound, 1, bound_comm_t, torus_neighbors_rank[DOWN],
BOUND_TAG, torus, &request2),
"MPI_Isend : ERROR\n", );
REPORT_ERROR(MPI_Isend(&recv_bound, 1, bound_comm_t, torus_neighbors_rank[LEFT],
BOUND_TAG, torus, &request),
"MPI_Isend : ERROR\n", );
REPORT_ERROR(MPI_Isend(&recv_bound, 1, bound_comm_t, torus_neighbors_rank[RIGHT],
BOUND_TAG, torus, &request),
"MPI_Isend : ERROR\n", );
}
}
}
// not threadsafe. use with caution.
void pack_array(queue_head* qh, void* outbuff, int buff_size, int* pos, void* problem_data) {
if (qh != NULL) {
pack_solution(outbuff, buff_size, pos, qh->partial_solution, qh->priority, torus,
problem_data);
pack_array(qh->left_subtree, outbuff, buff_size, pos, problem_data);
pack_array(qh->right_subtree, outbuff, buff_size, pos, problem_data);
}
}
// not threadsafe. use with caution.
void unpack_array(queue* q, void* outbuff, int buff_size, int len, int* pos,
void* problem_data) {
float score;
solution_vector psol;
for (int i = 0; i < len; i++) {
psol = unpack_solution(outbuff, buff_size, torus, pos, &score, problem_data);
pq_insert_nc(q, score, psol);
}
}
int initializeLoad(void* domain_data) {
if (my_rank == 0) {
pq_insert_nc(shared_queue, get_root_partial_soln_score(domain_data),
get_root_partial_solution(domain_data));
#pragma omp parallel num_threads(NUM_THREADS)
{
queue* priv_queue = create_queue();
int l;
while (1) {
l = pq_length(shared_queue);
#pragma omp barrier
if (l > X_LIM * Y_LIM) {
break;
}
if (l == 0) {
end_program = 1;
break;
}
expand_partial_solution(priv_queue, domain_data);
#pragma omp barrier
}
destroy_queue(priv_queue);
}
if (end_program) {
for (int i = 1; i < world_size; i++) {
MPI_Send(&end_program, 1, MPI_INT, i, INIT_LOAD_TAG, torus);
}
// cleanup and shutdown
return 1;
} else {
queue_head* qh;
int pos = 0;
void* outbuff = malloc(solution_vector_size);
for (int i = 1; i < world_size; i++) {
pos = 0;
qh = pq_extract(shared_queue, 1);
pack_array(qh, outbuff, solution_vector_size, &pos, domain_data);
MPI_Send(&end_program, 1, MPI_INT, i, INIT_LOAD_TAG, torus);
MPI_Send(outbuff, solution_vector_size, MPI_PACKED, i, INIT_LOAD_TAG, torus);
}
free(outbuff);
return 0;
}
} else {
MPI_Recv(&end_program, 1, MPI_INT, 0, INIT_LOAD_TAG, torus, MPI_STATUS_IGNORE);
if (end_program) {
return 1;
} else {
int pos = 0;
void* outbuff = malloc(solution_vector_size);
MPI_Recv(outbuff, solution_vector_size, MPI_PACKED, 0, INIT_LOAD_TAG, torus,
MPI_STATUS_IGNORE);
unpack_array(shared_queue, outbuff, solution_vector_size, 1, &pos,
domain_data);
free(outbuff);
return 0;
}
}
}
int calculate_next_rank() {
int next_rank = (my_rank + 1) % world_size;
return next_rank;
}
int calculate_prev_rank() {
int prev_rank;
if(my_rank == 0)
prev_rank = world_size - 1;
else
prev_rank = my_rank - 1;
return prev_rank;
}
void termination_init() {
if(pq_length(shared_queue) != 0)
return;
int next_rank = calculate_next_rank();
int prev_rank = calculate_prev_rank();
int prev_q_len;
int pos_num = 93;
int num_zero = 0;
int kill_confirmation = 0;
int kill_decline = 1;
int prev_num;
int dummy;
int all_done;
/* ROUND 1 */
MPI_Send(&num_zero, 1, MPI_INT, next_rank, TERM_INIT_TAG, torus);
MPI_Recv(&prev_num, 1, MPI_INT, prev_rank, TERM_INIT_TAG, torus, MPI_STATUS_IGNORE);
if(prev_num != 0) {
MPI_Send(&pos_num, 1, MPI_INT, next_rank, TERM_ROUND_2, torus);
} else {
/* ROUND 2 */
omp_set_lock(&work_lock_turnstile);
all_done = 0;
while (!all_done) {
omp_set_lock(&working_lock);
for (int i = 0; i < NUM_THREADS-1; i++) {
all_done |= working[i];
if (all_done)
break;
}
omp_unset_lock(&working_lock);
if (all_done) {
all_done = 0;
} else {
break;
}
}
num_zero = pq_length(shared_queue);
omp_unset_lock(&work_lock_turnstile);
MPI_Send(&num_zero, 1, MPI_INT, next_rank, TERM_ROUND_2, torus);
MPI_Recv(&prev_q_len, 1, MPI_INT, prev_rank, TERM_ROUND_2, torus, MPI_STATUS_IGNORE);
if(prev_q_len != 0){
MPI_Send(&kill_decline, 1, MPI_INT, next_rank, TERM_KILL_TAG, torus);
MPI_Recv(&dummy, 1, MPI_INT, prev_rank, TERM_KILL_TAG, torus, MPI_STATUS_IGNORE);
} else {
MPI_Send(&kill_confirmation, 1, MPI_INT, next_rank, TERM_KILL_TAG, torus);
MPI_Recv(&dummy, 1, MPI_INT, prev_rank, TERM_KILL_TAG, torus, MPI_STATUS_IGNORE);
end_program = 1;
}
}
}
int termination_detection(int term_init_msg) {
i_stopped_chain = 0;
int num_zero = 0;
int non_zero = -1;
int all_done;
int next_rank = calculate_next_rank();
int prev_rank = calculate_prev_rank();
int dummy;
int kill_msg;
int q_len, add_len, prev_q_len;
if(term_init_msg != 0) {
MPI_Send(&non_zero, 1, MPI_INT, next_rank, TERM_INIT_TAG, torus);
return 0;
} else {
q_len = pq_length(shared_queue);
if(q_len != 0) {
i_stopped_chain = 1;
MPI_Send(&non_zero, 1, MPI_INT, next_rank, TERM_INIT_TAG, torus);
MPI_Recv(&dummy, 1, MPI_INT, prev_rank, TERM_ROUND_2, torus, MPI_STATUS_IGNORE);
i_stopped_chain = 0;
return 0;
} else {
omp_set_lock(&work_lock_turnstile);
all_done = 0;
while (!all_done) {
omp_set_lock(&working_lock);
for (int i = 0; i < NUM_THREADS-1; i++) {
all_done |= working[i];
if (all_done)
break;
}
omp_unset_lock(&working_lock);
if (all_done) {
all_done = 0;
} else {
break;
}
}
q_len = pq_length(shared_queue);
omp_unset_lock(&work_lock_turnstile);
if( q_len != 0 ) {
i_stopped_chain = 1;
MPI_Send(&non_zero, 1, MPI_INT, next_rank, TERM_INIT_TAG, torus);
MPI_Recv(&dummy, 1, MPI_INT, prev_rank, TERM_ROUND_2, torus, MPI_STATUS_IGNORE);
i_stopped_chain = 0;
return 0;
} else {
MPI_Send(&num_zero, 1, MPI_INT, next_rank, TERM_INIT_TAG, torus);
/* ROUND 2 */
MPI_Recv(&prev_q_len, 1, MPI_INT, prev_rank, TERM_ROUND_2, torus, MPI_STATUS_IGNORE);
omp_set_lock(&work_lock_turnstile);
all_done = 0;
while (!all_done) {
omp_set_lock(&working_lock);
for (int i = 0; i < NUM_THREADS-1; i++) {
all_done |= working[i];
if (all_done)
break;
}
omp_unset_lock(&working_lock);
if (all_done) {
all_done = 0;
} else {
break;
}
}
add_len = pq_length(shared_queue) + prev_q_len;
omp_unset_lock(&work_lock_turnstile);
MPI_Send(&add_len, 1, MPI_INT, next_rank, TERM_ROUND_2, torus);
if(add_len != 0) {
return 0;
} else {
MPI_Recv(&kill_msg, 1, MPI_INT, prev_rank, TERM_KILL_TAG, torus, MPI_STATUS_IGNORE);
if(kill_msg != 0) {
MPI_Send(&kill_msg, 1, MPI_INT, next_rank, TERM_KILL_TAG, torus);
return 0;
} else {
MPI_Send(&kill_msg, 1, MPI_INT, next_rank, TERM_KILL_TAG, torus);
//TODO: Cleanup and exit
return 1;
}
}
}
}
}
}
|
rotlet_rsrc.c | #include "mex.h"
#include "math.h"
#define X prhs[0] // Source locations
#define F prhs[1] // Source strengths
#define IDX prhs[2] // Source indeces
#define DIS prhs[3] // Distances
#define XI prhs[4] // Ewald Param
#define U plhs[0] // Output
#ifndef VERBOSE
#define VERBOSE 0
#endif
#define PI 3.141592653589793
inline void cross(double * a,double * b, double *c)
{
c[0] = a[1]*b[2] - a[2]*b[1];
c[1] = a[2]*b[0] - a[0]*b[2];
c[2] = a[0]*b[1] - a[1]*b[0];
}
void
mexFunction( int nlhs, mxArray *plhs[],
int nrhs, const mxArray *prhs[] )
{
// input target
const int M = mxGetM(X);
const double xi = (double) mxGetScalar(XI);
const double* restrict x = mxGetPr(X);
const double* restrict f = mxGetPr(F);
// output
U = mxCreateDoubleMatrix(M, 3, mxREAL);
double* restrict u = mxGetPr(U);
if(VERBOSE)
mexPrintf("[FS Rotlet Real space ] MEX N=%d ",M);
// Loop through the cell
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int m=0; m<M; m++) {
const mxArray * _IDX = mxGetCell(IDX, m);
const mxArray * _DIS = mxGetCell(DIS, m);
const int N= mxGetN(_IDX); // number of the source points in nblist
double* idx= (double*) mxGetPr(_IDX);// index pointer of the source points in nblist
double* dis= mxGetPr(_DIS); // distance pointer of the source points in nblist
double p[3], fxr[3];
p[0] = 0; p[1] = 0; p[2] = 0;
// First element is the target itself; see MATLAB doc for rangesearch.
for(int n = 1; n<N; n++){
int idxn = (int) idx[n]-1;
double fn[] = {f[idxn], f[idxn+M], f[idxn+2*M]};
double rvec[] = {x[m]-x[idxn], x[m+M]-x[idxn+M],x[m+2*M]-x[idxn+2*M]};
double r = dis[n];
double r2= r*r;
cross(fn,rvec,fxr);
double A = ( erfc(xi*r)/r + 2.0*xi*exp(-xi*xi*r2)/sqrt(PI) )/r2;
p[0] += A*fxr[0];
p[1] += A*fxr[1];
p[2] += A*fxr[2];
}
u[m ] = p[0];
u[m+ M] = p[1];
u[m+2*M] = p[2];
}
}
|
atomic_read_codegen.c | // RUN: %clang_cc1 -verify -triple x86_64-apple-darwin10 -target-cpu core2 -fopenmp -x c -emit-llvm %s -o - | FileCheck %s
// RUN: %clang_cc1 -fopenmp -x c -triple x86_64-apple-darwin10 -target-cpu core2 -emit-pch -o %t %s
// RUN: %clang_cc1 -fopenmp -x c -triple x86_64-apple-darwin10 -target-cpu core2 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s
// RUN: %clang_cc1 -verify -triple x86_64-apple-darwin10 -target-cpu core2 -fopenmp-simd -x c -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY0 %s
// RUN: %clang_cc1 -fopenmp-simd -x c -triple x86_64-apple-darwin10 -target-cpu core2 -emit-pch -o %t %s
// RUN: %clang_cc1 -fopenmp-simd -x c -triple x86_64-apple-darwin10 -target-cpu core2 -include-pch %t -verify %s -emit-llvm -o - | FileCheck --check-prefix SIMD-ONLY0 %s
// SIMD-ONLY0-NOT: {{__kmpc|__tgt}}
// expected-no-diagnostics
// REQUIRES: x86-registered-target
#ifndef HEADER
#define HEADER
_Bool bv, bx;
char cv, cx;
unsigned char ucv, ucx;
short sv, sx;
unsigned short usv, usx;
int iv, ix;
unsigned int uiv, uix;
long lv, lx;
unsigned long ulv, ulx;
long long llv, llx;
unsigned long long ullv, ullx;
float fv, fx;
double dv, dx;
long double ldv, ldx;
_Complex int civ, cix;
_Complex float cfv, cfx;
_Complex double cdv, cdx;
typedef int int4 __attribute__((__vector_size__(16)));
int4 int4x;
struct BitFields {
int : 32;
int a : 31;
} bfx;
struct BitFields_packed {
int : 32;
int a : 31;
} __attribute__ ((__packed__)) bfx_packed;
struct BitFields2 {
int : 31;
int a : 1;
} bfx2;
struct BitFields2_packed {
int : 31;
int a : 1;
} __attribute__ ((__packed__)) bfx2_packed;
struct BitFields3 {
int : 11;
int a : 14;
} bfx3;
struct BitFields3_packed {
int : 11;
int a : 14;
} __attribute__ ((__packed__)) bfx3_packed;
struct BitFields4 {
short : 16;
int a: 1;
long b : 7;
} bfx4;
struct BitFields4_packed {
short : 16;
int a: 1;
long b : 7;
} __attribute__ ((__packed__)) bfx4_packed;
typedef float float2 __attribute__((ext_vector_type(2)));
float2 float2x;
// Register "0" is currently an invalid register for global register variables.
// Use "esp" instead of "0".
// register int rix __asm__("0");
register int rix __asm__("esp");
// CHECK-LABEL: @main(
int main() {
// CHECK: load atomic i8, i8* {{.*}} monotonic, align 1
// CHECK: store i8
#pragma omp atomic read
bv = bx;
// CHECK: load atomic i8, i8* {{.*}} monotonic, align 1
// CHECK: store i8
#pragma omp atomic read
cv = cx;
// CHECK: load atomic i8, i8* {{.*}} monotonic, align 1
// CHECK: store i8
#pragma omp atomic read
ucv = ucx;
// CHECK: load atomic i16, i16* {{.*}} monotonic, align 2
// CHECK: store i16
#pragma omp atomic read
sv = sx;
// CHECK: load atomic i16, i16* {{.*}} monotonic, align 2
// CHECK: store i16
#pragma omp atomic read
usv = usx;
// CHECK: load atomic i32, i32* {{.*}} monotonic, align 4
// CHECK: store i32
#pragma omp atomic read
iv = ix;
// CHECK: load atomic i32, i32* {{.*}} monotonic, align 4
// CHECK: store i32
#pragma omp atomic read
uiv = uix;
// CHECK: load atomic i64, i64* {{.*}} monotonic, align 8
// CHECK: store i64
#pragma omp atomic read
lv = lx;
// CHECK: load atomic i64, i64* {{.*}} monotonic, align 8
// CHECK: store i64
#pragma omp atomic read
ulv = ulx;
// CHECK: load atomic i64, i64* {{.*}} monotonic, align 8
// CHECK: store i64
#pragma omp atomic read
llv = llx;
// CHECK: load atomic i64, i64* {{.*}} monotonic, align 8
// CHECK: store i64
#pragma omp atomic read
ullv = ullx;
// CHECK: load atomic i32, i32* bitcast (float* {{.*}} monotonic, align 4
// CHECK: bitcast i32 {{.*}} to float
// CHECK: store float
#pragma omp atomic read
fv = fx;
// CHECK: load atomic i64, i64* bitcast (double* {{.*}} monotonic, align 8
// CHECK: bitcast i64 {{.*}} to double
// CHECK: store double
#pragma omp atomic read
dv = dx;
// CHECK: [[LD:%.+]] = load atomic i128, i128* bitcast (x86_fp80* {{.*}} monotonic, align 16
// CHECK: [[BITCAST:%.+]] = bitcast x86_fp80* [[LDTEMP:%.*]] to i128*
// CHECK: store i128 [[LD]], i128* [[BITCAST]]
// CHECK: [[LD:%.+]] = load x86_fp80, x86_fp80* [[LDTEMP]]
// CHECK: store x86_fp80 [[LD]]
#pragma omp atomic read
ldv = ldx;
// CHECK: call{{.*}} void @__atomic_load(i64 8,
// CHECK: store i32
// CHECK: store i32
#pragma omp atomic read
civ = cix;
// CHECK: call{{.*}} void @__atomic_load(i64 8,
// CHECK: store float
// CHECK: store float
#pragma omp atomic read
cfv = cfx;
// CHECK: call{{.*}} void @__atomic_load(i64 16,
// CHECK: call{{.*}} @__kmpc_flush(
// CHECK: store double
// CHECK: store double
#pragma omp atomic seq_cst read
cdv = cdx;
// CHECK: load atomic i64, i64* {{.*}} monotonic, align 8
// CHECK: store i8
#pragma omp atomic read
bv = ulx;
// CHECK: load atomic i8, i8* {{.*}} monotonic, align 1
// CHECK: store i8
#pragma omp atomic read
cv = bx;
// CHECK: load atomic i8, i8* {{.*}} seq_cst, align 1
// CHECK: call{{.*}} @__kmpc_flush(
// CHECK: store i8
#pragma omp atomic read seq_cst
ucv = cx;
// CHECK: load atomic i64, i64* {{.*}} monotonic, align 8
// CHECK: store i16
#pragma omp atomic read
sv = ulx;
// CHECK: load atomic i64, i64* {{.*}} monotonic, align 8
// CHECK: store i16
#pragma omp atomic read
usv = lx;
// CHECK: load atomic i32, i32* {{.*}} seq_cst, align 4
// CHECK: call{{.*}} @__kmpc_flush(
// CHECK: store i32
#pragma omp atomic seq_cst, read
iv = uix;
// CHECK: load atomic i32, i32* {{.*}} monotonic, align 4
// CHECK: store i32
#pragma omp atomic read
uiv = ix;
// CHECK: call{{.*}} void @__atomic_load(i64 8,
// CHECK: store i64
#pragma omp atomic read
lv = cix;
// CHECK: load atomic i32, i32* {{.*}} monotonic, align 4
// CHECK: store i64
#pragma omp atomic read
ulv = fx;
// CHECK: load atomic i64, i64* {{.*}} monotonic, align 8
// CHECK: store i64
#pragma omp atomic read
llv = dx;
// CHECK: load atomic i128, i128* {{.*}} monotonic, align 16
// CHECK: store i64
#pragma omp atomic read
ullv = ldx;
// CHECK: call{{.*}} void @__atomic_load(i64 8,
// CHECK: store float
#pragma omp atomic read
fv = cix;
// CHECK: load atomic i16, i16* {{.*}} monotonic, align 2
// CHECK: store double
#pragma omp atomic read
dv = sx;
// CHECK: load atomic i8, i8* {{.*}} monotonic, align 1
// CHECK: store x86_fp80
#pragma omp atomic read
ldv = bx;
// CHECK: load atomic i8, i8* {{.*}} monotonic, align 1
// CHECK: store i32
// CHECK: store i32
#pragma omp atomic read
civ = bx;
// CHECK: load atomic i16, i16* {{.*}} monotonic, align 2
// CHECK: store float
// CHECK: store float
#pragma omp atomic read
cfv = usx;
// CHECK: load atomic i64, i64* {{.*}} monotonic, align 8
// CHECK: store double
// CHECK: store double
#pragma omp atomic read
cdv = llx;
// CHECK: [[I128VAL:%.+]] = load atomic i128, i128* bitcast (<4 x i32>* @{{.+}} to i128*) monotonic, align 16
// CHECK: [[I128PTR:%.+]] = bitcast <4 x i32>* [[LDTEMP:%.+]] to i128*
// CHECK: store i128 [[I128VAL]], i128* [[I128PTR]]
// CHECK: [[LD:%.+]] = load <4 x i32>, <4 x i32>* [[LDTEMP]]
// CHECK: extractelement <4 x i32> [[LD]]
// CHECK: store i8
#pragma omp atomic read
bv = int4x[0];
// CHECK: [[LD:%.+]] = load atomic i32, i32* bitcast (i8* getelementptr (i8, i8* bitcast (%{{.+}}* @{{.+}} to i8*), i64 4) to i32*) monotonic, align 4
// CHECK: store i32 [[LD]], i32* [[LDTEMP:%.+]]
// CHECK: [[LD:%.+]] = load i32, i32* [[LDTEMP]]
// CHECK: [[SHL:%.+]] = shl i32 [[LD]], 1
// CHECK: ashr i32 [[SHL]], 1
// CHECK: store x86_fp80
#pragma omp atomic read
ldv = bfx.a;
// CHECK: [[LDTEMP_VOID_PTR:%.+]] = bitcast i32* [[LDTEMP:%.+]] to i8*
// CHECK: call void @__atomic_load(i64 4, i8* getelementptr (i8, i8* bitcast (%struct.BitFields_packed* @bfx_packed to i8*), i64 4), i8* [[LDTEMP_VOID_PTR]], i32 0)
// CHECK: [[LD:%.+]] = load i32, i32* [[LDTEMP]]
// CHECK: [[SHL:%.+]] = shl i32 [[LD]], 1
// CHECK: ashr i32 [[SHL]], 1
// CHECK: store x86_fp80
#pragma omp atomic read
ldv = bfx_packed.a;
// CHECK: [[LD:%.+]] = load atomic i32, i32* getelementptr inbounds (%struct.BitFields2, %struct.BitFields2* @bfx2, i32 0, i32 0) monotonic, align 4
// CHECK: store i32 [[LD]], i32* [[LDTEMP:%.+]]
// CHECK: [[LD:%.+]] = load i32, i32* [[LDTEMP]]
// CHECK: ashr i32 [[LD]], 31
// CHECK: store x86_fp80
#pragma omp atomic read
ldv = bfx2.a;
// CHECK: [[LD:%.+]] = load atomic i8, i8* getelementptr (i8, i8* bitcast (%struct.BitFields2_packed* @bfx2_packed to i8*), i64 3) monotonic, align 1
// CHECK: store i8 [[LD]], i8* [[LDTEMP:%.+]]
// CHECK: [[LD:%.+]] = load i8, i8* [[LDTEMP]]
// CHECK: ashr i8 [[LD]], 7
// CHECK: store x86_fp80
#pragma omp atomic read
ldv = bfx2_packed.a;
// CHECK: [[LD:%.+]] = load atomic i32, i32* getelementptr inbounds (%struct.BitFields3, %struct.BitFields3* @bfx3, i32 0, i32 0) monotonic, align 4
// CHECK: store i32 [[LD]], i32* [[LDTEMP:%.+]]
// CHECK: [[LD:%.+]] = load i32, i32* [[LDTEMP]]
// CHECK: [[SHL:%.+]] = shl i32 [[LD]], 7
// CHECK: ashr i32 [[SHL]], 18
// CHECK: store x86_fp80
#pragma omp atomic read
ldv = bfx3.a;
// CHECK: [[LDTEMP_VOID_PTR:%.+]] = bitcast i24* [[LDTEMP:%.+]] to i8*
// CHECK: call void @__atomic_load(i64 3, i8* getelementptr (i8, i8* bitcast (%struct.BitFields3_packed* @bfx3_packed to i8*), i64 1), i8* [[LDTEMP_VOID_PTR]], i32 0)
// CHECK: [[LD:%.+]] = load i24, i24* [[LDTEMP]]
// CHECK: [[SHL:%.+]] = shl i24 [[LD]], 7
// CHECK: [[ASHR:%.+]] = ashr i24 [[SHL]], 10
// CHECK: sext i24 [[ASHR]] to i32
// CHECK: store x86_fp80
#pragma omp atomic read
ldv = bfx3_packed.a;
// CHECK: [[LD:%.+]] = load atomic i64, i64* bitcast (%struct.BitFields4* @bfx4 to i64*) monotonic, align 8
// CHECK: store i64 [[LD]], i64* [[LDTEMP:%.+]]
// CHECK: [[LD:%.+]] = load i64, i64* [[LDTEMP]]
// CHECK: [[SHL:%.+]] = shl i64 [[LD]], 47
// CHECK: [[ASHR:%.+]] = ashr i64 [[SHL]], 63
// CHECK: trunc i64 [[ASHR]] to i32
// CHECK: store x86_fp80
#pragma omp atomic read
ldv = bfx4.a;
// CHECK: [[LD:%.+]] = load atomic i8, i8* getelementptr inbounds (%struct.BitFields4_packed, %struct.BitFields4_packed* @bfx4_packed, i32 0, i32 0, i64 2) monotonic, align 1
// CHECK: store i8 [[LD]], i8* [[LDTEMP:%.+]]
// CHECK: [[LD:%.+]] = load i8, i8* [[LDTEMP]]
// CHECK: [[SHL:%.+]] = shl i8 [[LD]], 7
// CHECK: [[ASHR:%.+]] = ashr i8 [[SHL]], 7
// CHECK: sext i8 [[ASHR]] to i32
// CHECK: store x86_fp80
#pragma omp atomic relaxed read
ldv = bfx4_packed.a;
// CHECK: [[LD:%.+]] = load atomic i64, i64* bitcast (%struct.BitFields4* @bfx4 to i64*) monotonic, align 8
// CHECK: store i64 [[LD]], i64* [[LDTEMP:%.+]]
// CHECK: [[LD:%.+]] = load i64, i64* [[LDTEMP]]
// CHECK: [[SHL:%.+]] = shl i64 [[LD]], 40
// CHECK: [[ASHR:%.+]] = ashr i64 [[SHL]], 57
// CHECK: store x86_fp80
#pragma omp atomic read relaxed
ldv = bfx4.b;
// CHECK: [[LD:%.+]] = load atomic i8, i8* getelementptr inbounds (%struct.BitFields4_packed, %struct.BitFields4_packed* @bfx4_packed, i32 0, i32 0, i64 2) acquire, align 1
// CHECK: store i8 [[LD]], i8* [[LDTEMP:%.+]]
// CHECK: [[LD:%.+]] = load i8, i8* [[LDTEMP]]
// CHECK: [[ASHR:%.+]] = ashr i8 [[LD]], 1
// CHECK: sext i8 [[ASHR]] to i64
// CHECK: call{{.*}} @__kmpc_flush(
// CHECK: store x86_fp80
#pragma omp atomic read acquire
ldv = bfx4_packed.b;
// CHECK: [[LD:%.+]] = load atomic i64, i64* bitcast (<2 x float>* @{{.+}} to i64*) monotonic, align 8
// CHECK: [[BITCAST:%.+]] = bitcast <2 x float>* [[LDTEMP:%.+]] to i64*
// CHECK: store i64 [[LD]], i64* [[BITCAST]]
// CHECK: [[LD:%.+]] = load <2 x float>, <2 x float>* [[LDTEMP]]
// CHECK: extractelement <2 x float> [[LD]]
// CHECK: store i64
#pragma omp atomic read
ulv = float2x.x;
// CHECK: call{{.*}} i{{[0-9]+}} @llvm.read_register
// CHECK: call{{.*}} @__kmpc_flush(
// CHECK: store double
#pragma omp atomic read seq_cst
dv = rix;
return 0;
}
#endif
|
mpncra.c | /* $Header$ */
/* This single source file may be called as three separate executables:
ncra -- netCDF record averager
nces -- netCDF ensemble statistics
ncrcat -- netCDF record concatenator */
/* Purpose: Compute averages or extract series of specified hyperslabs of
specfied variables of multiple input netCDF files and output them
to a single file. */
/* Copyright (C) 1995--present Charlie Zender
This file is part of NCO, the netCDF Operators. NCO is free software.
You may redistribute and/or modify NCO under the terms of the
3-Clause BSD License.
You are permitted to link NCO with the HDF, netCDF, OPeNDAP, and UDUnits
libraries and to distribute the resulting executables under the terms
of the BSD, but in addition obeying the extra stipulations of the
HDF, netCDF, OPeNDAP, and UDUnits licenses.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the 3-Clause BSD License for more details.
The original author of this software, Charlie Zender, seeks to improve
it with your suggestions, contributions, bug-reports, and patches.
Please contact the NCO project at http://nco.sf.net or write to
Charlie Zender
Department of Earth System Science
University of California, Irvine
Irvine, CA 92697-3100 */
/* Usage:
ncra -n 3,4,1 -p ${HOME}/nco/data h0001.nc ~/foo.nc
ncra -n 3,4,1 -p ${HOME}/nco/data -l ${HOME} h0001.nc ~/foo.nc
ncra -n 3,4,1 -p /ZENDER/tmp -l ${HOME}/nco/data h0001.nc ~/foo.nc
scp ~/nco/src/nco/ncra.c esmf.ess.uci.edu:nco/src/nco
nces in.nc in.nc ~/foo.nc
nces -n 3,4,1 -p ${HOME}/nco/data h0001.nc ~/foo.nc
nces -n 3,4,1 -p ${HOME}/nco/data -l ${HOME} h0001.nc ~/foo.nc
nces -n 3,4,1 -p /ZENDER/tmp -l ${HOME} h0001.nc ~/foo.nc */
#ifdef HAVE_CONFIG_H
# include <config.h> /* Autotools tokens */
#endif /* !HAVE_CONFIG_H */
/* Standard C headers */
#include <assert.h> /* assert() debugging macro */
#include <math.h> /* sin cos cos sin 3.14159 */
#include <stdio.h> /* stderr, FILE, NULL, etc. */
#include <stdlib.h> /* abs, getopt, malloc, strtol */
#include <string.h> /* strcmp() */
#include <sys/stat.h> /* stat() */
#include <time.h> /* machine time */
#include <unistd.h> /* POSIX stuff */
#ifndef HAVE_GETOPT_LONG
# include "nco_getopt.h"
#else /* HAVE_GETOPT_LONG */
# ifdef HAVE_GETOPT_H
# include <getopt.h>
# endif /* !HAVE_GETOPT_H */
#endif /* HAVE_GETOPT_LONG */
/* Internationalization i18n, Linux Journal 200211 p. 57--59 */
#ifdef I18N
#include <libintl.h> /* Internationalization i18n */
#include <locale.h> /* Locale setlocale() */
#define _(sng) gettext (sng)
#define gettext_noop(sng) (sng)
#define N_(sng) gettext_noop(sng)
#endif /* I18N */
#ifndef _LIBINTL_H
# define gettext(foo) foo
#endif /* _LIBINTL_H */
/* 3rd party vendors */
#include <netcdf.h> /* netCDF definitions and C library */
#ifdef ENABLE_MPI
#include <mpi.h> /* MPI definitions */
#include "nco_mpi.h" /* MPI utilities */
#endif /* !ENABLE_MPI */
/* Personal headers */
/* #define MAIN_PROGRAM_FILE MUST precede #include libnco.h */
#define MAIN_PROGRAM_FILE
#include "libnco.h" /* netCDF Operator (NCO) library */
#ifdef ENABLE_MPI
void checkpointMpi(int prc_rnk, int stage){
int msg[]={0,0};
int rcd; /* [rcd] Return code */
FILE * const fp_stderr=stderr; /* [fl] stderr filehandle CEWI */
if(prc_rnk == rnk_mgr){
msg[0]=stage;
msg[1]=stage;
} /* endif */
(void)fprintf(fp_stderr,"%d checkpointing at stage %d\n",prc_rnk,stage);
/* make everyone continue from this point. */
rcd=MPI_Bcast(msg,2,MPI_INT,rnk_mgr,MPI_COMM_WORLD);
if(prc_rnk != rnk_mgr) {
/* basic sanity check */
assert(msg[0] == stage);
assert(msg[1] == stage);
} /* end if */
} /* end checkpointMpi() */
#endif /* !ENABLE_MPI */
int
main(int argc,char **argv)
{
char **fl_lst_abb=NULL; /* Option n */
char **fl_lst_in;
char **gaa_arg=NULL; /* [sng] Global attribute arguments */
char **var_lst_in=NULL_CEWI;
char *cmd_ln;
char *cnk_arg[NC_MAX_DIMS];
char *cnk_map_sng=NULL_CEWI; /* [sng] Chunking map */
char *cnk_plc_sng=NULL_CEWI; /* [sng] Chunking policy */
char *fl_in=NULL;
char *fl_out=NULL; /* Option o */
char *fl_out_tmp=NULL_CEWI;
char *fl_pth=NULL; /* Option p */
char *fl_pth_lcl=NULL; /* Option l */
char *lmt_arg[NC_MAX_DIMS];
char *nco_op_typ_sng=NULL_CEWI; /* [sng] Operation type Option y */
char *nco_pck_plc_sng=NULL_CEWI; /* [sng] Packing policy Option P */
char *opt_crr=NULL; /* [sng] String representation of current long-option name */
char *optarg_lcl=NULL; /* [sng] Local copy of system optarg */
char *sng_cnv_rcd=NULL_CEWI; /* [sng] strtol()/strtoul() return code */
const char * const CVS_Id="$Id$";
const char * const CVS_Revision="$Revision$";
const char * const opt_sht_lst="34567ACcD:d:FHhL:l:n:Oo:p:P:rRSt:v:xY:y:-:";
dmn_sct **dim;
dmn_sct **dmn_out;
extern char *optarg;
extern int optind;
/* Using naked stdin/stdout/stderr in parallel region generates warning
Copy appropriate filehandle to variable scoped shared in parallel clause */
FILE * const fp_stderr=stderr; /* [fl] stderr filehandle CEWI */
int *in_id_arr;
int abb_arg_nbr=0;
int cnk_map=nco_cnk_map_nil; /* [enm] Chunking map */
int cnk_nbr=0; /* [nbr] Number of chunk sizes */
int cnk_plc=nco_cnk_plc_nil; /* [enm] Chunking policy */
int dfl_lvl=NCO_DFL_LVL_UNDEFINED; /* [enm] Deflate level */
int fl_idx;
int fl_nbr=0;
int fl_in_fmt; /* [enm] Input file format */
int fl_out_fmt=NCO_FORMAT_UNDEFINED; /* [enm] Output file format */
int fll_md_old; /* [enm] Old fill mode */
int gaa_nbr=0; /* [nbr] Number of global attributes to add */
int idx=int_CEWI;
int in_id;
int lmt_nbr=0; /* Option d. NB: lmt_nbr gets incremented */
int log_lvl=0; /* [enm] netCDF library debugging verbosity [0..5] */
int md_open; /* [enm] Mode flag for nc_open() call */
int nbr_dmn_fl;
int nbr_dmn_xtr;
int nbr_var_fix; /* nbr_var_fix gets incremented */
int nbr_var_fl;
int nbr_var_prc; /* nbr_var_prc gets incremented */
int xtr_nbr=0; /* xtr_nbr won't otherwise be set for -c with no -v */
int nco_op_typ=nco_op_avg; /* [enm] Default operation is averaging */
int nco_pck_plc=nco_pck_plc_nil; /* [enm] Default packing is none */
int opt;
int out_id;
int rcd=NC_NOERR; /* [rcd] Return code */
int rec_dmn_id=NCO_REC_DMN_UNDEFINED;
int thr_idx; /* [idx] Index of current thread */
int thr_nbr=int_CEWI; /* [nbr] Thread number Option t */
int var_lst_in_nbr=0;
lmt_sct **lmt=NULL_CEWI;
lmt_sct *lmt_rec=NULL_CEWI;
lmt_all_sct **lmt_all_lst; /* List of *lmt_all structures */
lmt_all_sct *lmt_all_rec=NULL_CEWI; /* Pointer to record limit structure in above list */
long idx_rec; /* [idx] Index of current record in current input file */
long rec_usd_cml=0L; /* [idx] Index of current record in output file (0 is first, ...) */
nco_bool CNV_ARM;
cnv_sct *cnv; /* [sct] Convention structure */
nco_bool EXCLUDE_INPUT_LIST=False; /* Option c */
nco_bool EXTRACT_ALL_COORDINATES=False; /* Option c */
nco_bool EXTRACT_ASSOCIATED_COORDINATES=True; /* Option C */
nco_bool FL_RTR_RMT_LCN;
nco_bool FL_LST_IN_APPEND=True; /* Option H */
nco_bool FL_LST_IN_FROM_STDIN=False; /* [flg] fl_lst_in comes from stdin */
nco_bool FORCE_APPEND=False; /* Option A */
nco_bool FORCE_OVERWRITE=False; /* Option O */
nco_bool FORTRAN_IDX_CNV=False; /* Option F */
nco_bool HISTORY_APPEND=True; /* Option h */
nco_bool HPSS_TRY=False; /* [flg] Search HPSS for unfound files */
nco_bool LAST_RECORD=False;
nco_bool RAM_CREATE=False; /* [flg] Create file in RAM */
nco_bool RAM_OPEN=False; /* [flg] Open (netCDF3-only) file(s) in RAM */
nco_bool SHARE_CREATE=False; /* [flg] Create (netCDF3-only) file(s) with unbuffered I/O */
nco_bool SHARE_OPEN=False; /* [flg] Open (netCDF3-only) file(s) with unbuffered I/O */
nco_bool RM_RMT_FL_PST_PRC=True; /* Option R */
nco_bool WRT_TMP_FL=True; /* [flg] Write output to temporary file */
nco_bool flg_mmr_cln=False; /* [flg] Clean memory prior to exit */
nco_int base_time_srt=nco_int_CEWI;
nco_int base_time_crr=nco_int_CEWI;
nm_id_sct *dmn_lst;
nm_id_sct *xtr_lst=NULL; /* xtr_lst may be alloc()'d from NULL with -c option */
size_t bfr_sz_hnt=NC_SIZEHINT_DEFAULT; /* [B] Buffer size hint */
size_t cnk_csh_byt=NCO_CNK_CSH_BYT_DFL; /* [B] Chunk cache size */
size_t cnk_min_byt=NCO_CNK_SZ_MIN_BYT_DFL; /* [B] Minimize size of variable to chunk */
size_t cnk_sz_byt=0UL; /* [B] Chunk size in bytes */
size_t cnk_sz_scl=0UL; /* [nbr] Chunk size scalar */
size_t hdr_pad=0UL; /* [B] Pad at end of header section */
var_sct **var;
var_sct **var_fix;
var_sct **var_fix_out;
var_sct **var_out=NULL_CEWI;
var_sct **var_prc;
var_sct **var_prc_out;
#ifdef ENABLE_MPI
/* Declare all MPI-specific variables here */
MPI_Status mpi_stt; /* [enm] Status check to decode msg_tag_typ */
nco_bool TKN_WRT_FREE=True; /* [flg] Write-access to output file is available */
int fl_nm_lng; /* [nbr] Output file name length */
int msg_bfr[msg_bfr_lng]; /* [bfr] Buffer containing var, idx, tkn_wrt_rsp */
int jdx=0; /* [idx] MPI index for local variables */
int lcl_idx_lst[60]; /* [arr] Array containing indices of variables processed at each Worker */
int lcl_nbr_var=0; /* [nbr] Count of variables processes at each Worker */
int msg_tag_typ; /* [enm] MPI message tag type */
int prc_rnk; /* [idx] Process rank */
int prc_nbr=0; /* [nbr] Number of MPI processes */
int tkn_wrt_rnk=0; /* [idx] Rank of process holding write token */
int tkn_wrt_rsp; /* [enm] Response to request for write token */
int var_wrt_nbr=0; /* [nbr] Variables written to output file until now */
int rnk_wrk; /* [idx] Worker rank */
int wrk_id_bfr[wrk_id_bfr_lng]; /* [bfr] Buffer for rnk_wrk */
#endif /* !ENABLE_MPI */
static struct option opt_lng[]={ /* Structure ordered by short option key if possible */
/* Long options with no argument, no short option counterpart */
{"clean",no_argument,0,0}, /* [flg] Clean memory prior to exit */
{"mmr_cln",no_argument,0,0}, /* [flg] Clean memory prior to exit */
{"drt",no_argument,0,0}, /* [flg] Allow dirty memory on exit */
{"dirty",no_argument,0,0}, /* [flg] Allow dirty memory on exit */
{"mmr_drt",no_argument,0,0}, /* [flg] Allow dirty memory on exit */
{"ram_all",no_argument,0,0}, /* [flg] Open and create (netCDF3) file(s) in RAM */
{"create_ram",no_argument,0,0}, /* [flg] Create file in RAM */
{"open_ram",no_argument,0,0}, /* [flg] Open (netCDF3) file(s) in RAM */
{"diskless_all",no_argument,0,0}, /* [flg] Open and create (netCDF3) file(s) in RAM */
{"share_all",no_argument,0,0}, /* [flg] Open and create (netCDF3) file(s) with unbuffered I/O */
{"create_share",no_argument,0,0}, /* [flg] Create (netCDF3) file(s) with unbuffered I/O */
{"open_share",no_argument,0,0}, /* [flg] Open (netCDF3) file(s) with unbuffered I/O */
{"unbuffered_io",no_argument,0,0}, /* [flg] Open and create (netCDF3) file(s) with unbuffered I/O */
{"uio",no_argument,0,0}, /* [flg] Open and create (netCDF3) file(s) with unbuffered I/O */
{"wrt_tmp_fl",no_argument,0,0}, /* [flg] Write output to temporary file */
{"write_tmp_fl",no_argument,0,0}, /* [flg] Write output to temporary file */
{"no_tmp_fl",no_argument,0,0}, /* [flg] Do not write output to temporary file */
{"version",no_argument,0,0},
{"vrs",no_argument,0,0},
/* Long options with argument, no short option counterpart */
{"bfr_sz_hnt",required_argument,0,0}, /* [B] Buffer size hint */
{"buffer_size_hint",required_argument,0,0}, /* [B] Buffer size hint */
{"cnk_byt",required_argument,0,0}, /* [B] Chunk size in bytes */
{"chunk_byte",required_argument,0,0}, /* [B] Chunk size in bytes */
{"cnk_dmn",required_argument,0,0}, /* [nbr] Chunk size */
{"chunk_dimension",required_argument,0,0}, /* [nbr] Chunk size */
{"cnk_map",required_argument,0,0}, /* [nbr] Chunking map */
{"chunk_map",required_argument,0,0}, /* [nbr] Chunking map */
{"cnk_min",required_argument,0,0}, /* [B] Minimize size of variable to chunk */
{"chunk_min",required_argument,0,0}, /* [B] Minimize size of variable to chunk */
{"cnk_plc",required_argument,0,0}, /* [nbr] Chunking policy */
{"chunk_policy",required_argument,0,0}, /* [nbr] Chunking policy */
{"cnk_scl",required_argument,0,0}, /* [nbr] Chunk size scalar */
{"chunk_scalar",required_argument,0,0}, /* [nbr] Chunk size scalar */
{"fl_fmt",required_argument,0,0},
{"file_format",required_argument,0,0},
{"gaa",required_argument,0,0}, /* [sng] Global attribute add */
{"glb_att_add",required_argument,0,0}, /* [sng] Global attribute add */
{"hdr_pad",required_argument,0,0},
{"header_pad",required_argument,0,0},
{"log_lvl",required_argument,0,0}, /* [enm] netCDF library debugging verbosity [0..5] */
{"log_level",required_argument,0,0}, /* [enm] netCDF library debugging verbosity [0..5] */
{"log_lvl",required_argument,0,0}, /* [enm] netCDF library debugging verbosity [0..5] */
{"log_level",required_argument,0,0}, /* [enm] netCDF library debugging verbosity [0..5] */
/* Long options with short counterparts */
{"3",no_argument,0,'3'},
{"4",no_argument,0,'4'},
{"netcdf4",no_argument,0,'4'},
{"5",no_argument,0,'5'},
{"64bit_data",no_argument,0,'5'},
{"cdf5",no_argument,0,'5'},
{"pnetcdf",no_argument,0,'5'},
{"64bit_offset",no_argument,0,'6'},
{"7",no_argument,0,'7'},
{"append",no_argument,0,'A'},
{"coords",no_argument,0,'c'},
{"crd",no_argument,0,'c'},
{"xtr_ass_var",no_argument,0,'c'},
{"xcl_ass_var",no_argument,0,'C'},
{"no_coords",no_argument,0,'C'},
{"no_crd",no_argument,0,'C'},
{"dbg_lvl",required_argument,0,'D'},
{"debug",required_argument,0,'D'},
{"nco_dbg_lvl",required_argument,0,'D'},
{"dimension",required_argument,0,'d'},
{"dmn",required_argument,0,'d'},
{"fortran",no_argument,0,'F'},
{"ftn",no_argument,0,'F'},
{"fl_lst_in",no_argument,0,'H'},
{"file_list",no_argument,0,'H'},
{"history",no_argument,0,'h'},
{"hst",no_argument,0,'h'},
{"dfl_lvl",required_argument,0,'L'}, /* [enm] Deflate level */
{"deflate",required_argument,0,'L'}, /* [enm] Deflate level */
{"local",required_argument,0,'l'},
{"lcl",required_argument,0,'l'},
{"nintap",required_argument,0,'n'},
{"overwrite",no_argument,0,'O'},
{"ovr",no_argument,0,'O'},
{"output",required_argument,0,'o'},
{"fl_out",required_argument,0,'o'},
{"path",required_argument,0,'p'},
{"pack",required_argument,0,'P'},
{"retain",no_argument,0,'R'},
{"rtn",no_argument,0,'R'},
{"revision",no_argument,0,'r'},
{"suspend", no_argument,0,'S'},
{"thr_nbr",required_argument,0,'t'},
{"threads",required_argument,0,'t'},
{"omp_num_threads",required_argument,0,'t'},
{"variable",required_argument,0,'v'},
{"exclude",no_argument,0,'x'},
{"xcl",no_argument,0,'x'},
{"pseudonym",required_argument,0,'Y'},
{"program",required_argument,0,'Y'},
{"prg_nm",required_argument,0,'Y'},
{"math",required_argument,0,'y'},
{"help",no_argument,0,'?'},
{"hlp",no_argument,0,'?'},
{0,0,0,0}
}; /* end opt_lng */
int opt_idx=0; /* Index of current long option into opt_lng array */
#ifdef _LIBINTL_H
setlocale(LC_ALL,""); /* LC_ALL sets all localization tokens to same value */
bindtextdomain("nco","/home/zender/share/locale"); /* ${LOCALEDIR} is e.g., /usr/share/locale */
/* MO files should be in ${LOCALEDIR}/es/LC_MESSAGES */
textdomain("nco"); /* PACKAGE is name of program */
#endif /* not _LIBINTL_H */
#ifdef ENABLE_MPI
/* MPI Initialization */
MPI_Init(&argc,&argv);
MPI_Comm_size(MPI_COMM_WORLD,&prc_nbr);
MPI_Comm_rank(MPI_COMM_WORLD,&prc_rnk);
#endif /* !ENABLE_MPI */
/* Start clock and save command line */
cmd_ln=nco_cmd_ln_sng(argc,argv);
/* Get program name and set program enum (e.g., nco_prg_id=ncra) */
nco_prg_nm=nco_prg_prs(argv[0],&nco_prg_id);
/* Parse command line arguments */
while(1){
/* getopt_long_only() allows one dash to prefix long options */
opt=getopt_long(argc,argv,opt_sht_lst,opt_lng,&opt_idx);
/* NB: access to opt_crr is only valid when long_opt is detected */
if(opt == EOF) break; /* Parse positional arguments once getopt_long() returns EOF */
opt_crr=(char *)strdup(opt_lng[opt_idx].name);
/* Process long options without short option counterparts */
if(opt == 0){
if(!strcmp(opt_crr,"bfr_sz_hnt") || !strcmp(opt_crr,"buffer_size_hint")){
bfr_sz_hnt=strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10);
if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd);
} /* endif cnk */
if(!strcmp(opt_crr,"cnk_byt") || !strcmp(opt_crr,"chunk_byte")){
cnk_sz_byt=strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10);
if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd);
} /* endif cnk_byt */
if(!strcmp(opt_crr,"cnk_min") || !strcmp(opt_crr,"chunk_min")){
cnk_min_byt=strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10);
if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd);
} /* endif cnk_min */
if(!strcmp(opt_crr,"cnk_dmn") || !strcmp(opt_crr,"chunk_dimension")){
/* Copy limit argument for later processing */
cnk_arg[cnk_nbr]=(char *)strdup(optarg);
cnk_nbr++;
} /* endif cnk */
if(!strcmp(opt_crr,"cnk_scl") || !strcmp(opt_crr,"chunk_scalar")){
cnk_sz_scl=strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10);
if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd);
} /* endif cnk */
if(!strcmp(opt_crr,"cnk_map") || !strcmp(opt_crr,"chunk_map")){
/* Chunking map */
cnk_map_sng=(char *)strdup(optarg);
cnk_map=nco_cnk_map_get(cnk_map_sng);
} /* endif cnk */
if(!strcmp(opt_crr,"cnk_plc") || !strcmp(opt_crr,"chunk_policy")){
/* Chunking policy */
cnk_plc_sng=(char *)strdup(optarg);
cnk_plc=nco_cnk_plc_get(cnk_plc_sng);
} /* endif cnk */
if(!strcmp(opt_crr,"mmr_cln") || !strcmp(opt_crr,"clean")) flg_mmr_cln=True; /* [flg] Clean memory prior to exit */
if(!strcmp(opt_crr,"drt") || !strcmp(opt_crr,"mmr_drt") || !strcmp(opt_crr,"dirty")) flg_mmr_cln=False; /* [flg] Clean memory prior to exit */
if(!strcmp(opt_crr,"fl_fmt") || !strcmp(opt_crr,"file_format")) rcd=nco_create_mode_prs(optarg,&fl_out_fmt);
if(!strcmp(opt_crr,"gaa") || !strcmp(opt_crr,"glb_att_add")){
gaa_arg=(char **)nco_realloc(gaa_arg,(gaa_nbr+1)*sizeof(char *));
gaa_arg[gaa_nbr++]=(char *)strdup(optarg);
} /* endif gaa */
if(!strcmp(opt_crr,"hdr_pad") || !strcmp(opt_crr,"header_pad")){
hdr_pad=strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10);
if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd);
} /* endif "hdr_pad" */
if(!strcmp(opt_crr,"log_lvl") || !strcmp(opt_crr,"log_level")){
log_lvl=(int)strtol(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10);
if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtol",sng_cnv_rcd);
nc_set_log_level(log_lvl);
} /* !log_lvl */
if(!strcmp(opt_crr,"ram_all") || !strcmp(opt_crr,"create_ram") || !strcmp(opt_crr,"diskless_all")) RAM_CREATE=True; /* [flg] Create (netCDF3) file(s) in RAM */
if(!strcmp(opt_crr,"ram_all") || !strcmp(opt_crr,"open_ram") || !strcmp(opt_crr,"diskless_all")) RAM_OPEN=True; /* [flg] Open (netCDF3) file(s) in RAM */
if(!strcmp(opt_crr,"share_all") || !strcmp(opt_crr,"unbuffered_io") || !strcmp(opt_crr,"uio") || !strcmp(opt_crr,"create_share")) SHARE_CREATE=True; /* [flg] Create (netCDF3) file(s) with unbuffered I/O */
if(!strcmp(opt_crr,"share_all") || !strcmp(opt_crr,"unbuffered_io") || !strcmp(opt_crr,"uio") || !strcmp(opt_crr,"open_share")) SHARE_OPEN=True; /* [flg] Open (netCDF3) file(s) with unbuffered I/O */
if(!strcmp(opt_crr,"vrs") || !strcmp(opt_crr,"version")){
(void)nco_vrs_prn(CVS_Id,CVS_Revision);
nco_exit(EXIT_SUCCESS);
} /* endif "vrs" */
if(!strcmp(opt_crr,"wrt_tmp_fl") || !strcmp(opt_crr,"write_tmp_fl")) WRT_TMP_FL=True;
if(!strcmp(opt_crr,"no_tmp_fl")) WRT_TMP_FL=False;
} /* opt != 0 */
/* Process short options */
switch(opt){
case 0: /* Long options have already been processed, return */
break;
case '3': /* Request netCDF3 output storage format */
fl_out_fmt=NC_FORMAT_CLASSIC;
break;
case '4': /* Request netCDF4 output storage format */
fl_out_fmt=NC_FORMAT_NETCDF4;
break;
case '5': /* Request netCDF3 64-bit offset+data storage (i.e., pnetCDF) format */
fl_out_fmt=NC_FORMAT_CDF5;
break;
case '6': /* Request netCDF3 64-bit offset output storage format */
fl_out_fmt=NC_FORMAT_64BIT_OFFSET;
break;
case '7': /* Request netCDF4-classic output storage format */
fl_out_fmt=NC_FORMAT_NETCDF4_CLASSIC;
break;
case 'A': /* Toggle FORCE_APPEND */
FORCE_APPEND=!FORCE_APPEND;
break;
case 'C': /* Extract all coordinates associated with extracted variables? */
EXTRACT_ASSOCIATED_COORDINATES=False;
break;
case 'c':
EXTRACT_ALL_COORDINATES=True;
break;
case 'D': /* Debugging level. Default is 0. */
nco_dbg_lvl=(unsigned short int)strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10);
if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd);
break;
case 'd': /* Copy limit argument for later processing */
lmt_arg[lmt_nbr]=(char *)strdup(optarg);
lmt_nbr++;
break;
case 'F': /* Toggle index convention. Default is 0-based arrays (C-style). */
FORTRAN_IDX_CNV=!FORTRAN_IDX_CNV;
break;
case 'H': /* Toggle writing input file list attribute */
FL_LST_IN_APPEND=!FL_LST_IN_APPEND;
break;
case 'h': /* Toggle appending to history global attribute */
HISTORY_APPEND=!HISTORY_APPEND;
break;
case 'L': /* [enm] Deflate level. Default is 0. */
dfl_lvl=(int)strtol(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10);
if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtol",sng_cnv_rcd);
break;
case 'l': /* Local path prefix for files retrieved from remote file system */
fl_pth_lcl=(char *)strdup(optarg);
break;
case 'n': /* NINTAP-style abbreviation of files to average */
fl_lst_abb=nco_lst_prs_2D(optarg,",",&abb_arg_nbr);
if(abb_arg_nbr < 1 || abb_arg_nbr > 6){
(void)fprintf(stdout,gettext("%s: ERROR Incorrect abbreviation for file list\n"),nco_prg_nm_get());
(void)nco_usg_prn();
nco_exit(EXIT_FAILURE);
} /* end if */
break;
case 'O': /* Toggle FORCE_OVERWRITE */
FORCE_OVERWRITE=!FORCE_OVERWRITE;
break;
case 'o': /* Name of output file */
fl_out=(char *)strdup(optarg);
break;
case 'p': /* Common file path */
fl_pth=(char *)strdup(optarg);
break;
case 'P': /* Packing policy */
nco_pck_plc_sng=(char *)strdup(optarg);
nco_pck_plc=nco_pck_plc_get(nco_pck_plc_sng);
break;
case 'R': /* Toggle removal of remotely-retrieved-files. Default is True. */
RM_RMT_FL_PST_PRC=!RM_RMT_FL_PST_PRC;
break;
case 'r': /* Print CVS program information and copyright notice */
(void)nco_vrs_prn(CVS_Id,CVS_Revision);
(void)nco_lbr_vrs_prn();
(void)nco_cpy_prn();
(void)nco_cnf_prn();
nco_exit(EXIT_SUCCESS);
break;
#ifdef ENABLE_MPI
case 'S': /* Suspend with signal handler to facilitate debugging */
if(signal(SIGUSR1,nco_cnt_run) == SIG_ERR) (void)fprintf(fp_stderr,"%s: ERROR Could not install suspend handler.\n",nco_prg_nm_get());
while(!nco_spn_lck_brk) usleep(nco_spn_lck_us); /* Spinlock. fxm: should probably insert a sched_yield */
break;
#endif /* !ENABLE_MPI */
case 't': /* Thread number */
thr_nbr=(int)strtol(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10);
if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtol",sng_cnv_rcd);
break;
case 'v': /* Variables to extract/exclude */
/* Replace commas with hashes when within braces (convert back later) */
optarg_lcl=(char *)strdup(optarg);
(void)nco_rx_comma2hash(optarg_lcl);
var_lst_in=nco_lst_prs_2D(optarg_lcl,",",&var_lst_in_nbr);
optarg_lcl=(char *)nco_free(optarg_lcl);
xtr_nbr=var_lst_in_nbr;
break;
case 'x': /* Exclude rather than extract variables specified with -v */
EXCLUDE_INPUT_LIST=True;
break;
case 'Y': /* Pseudonym */
/* Call nco_prg_prs to reset pseudonym */
optarg_lcl=(char *)strdup(optarg);
if(nco_prg_nm) nco_prg_nm=(char *)nco_free(nco_prg_nm);
nco_prg_nm=nco_prg_prs(optarg_lcl,&nco_prg_id);
optarg_lcl=(char *)nco_free(optarg_lcl);
break;
case 'y': /* Operation type */
nco_op_typ_sng=(char *)strdup(optarg);
if(nco_prg_id == ncra || nco_prg_id == ncfe || nco_prg_id == ncge) nco_op_typ=nco_op_typ_get(nco_op_typ_sng);
break;
case '?': /* Print proper usage */
(void)nco_usg_prn();
nco_exit(EXIT_SUCCESS);
break;
case '-': /* Long options are not allowed */
(void)fprintf(stderr,"%s: ERROR Long options are not available in this build. Use single letter options instead.\n",nco_prg_nm_get());
nco_exit(EXIT_FAILURE);
break;
default: /* Print proper usage */
(void)fprintf(stdout,"%s ERROR in command-line syntax/options. Please reformulate command accordingly.\n",nco_prg_nm_get());
(void)nco_usg_prn();
nco_exit(EXIT_FAILURE);
break;
} /* end switch */
if(opt_crr) opt_crr=(char *)nco_free(opt_crr);
} /* end while loop */
/* Process positional arguments and fill-in filenames */
fl_lst_in=nco_fl_lst_mk(argv,argc,optind,&fl_nbr,&fl_out,&FL_LST_IN_FROM_STDIN,FORCE_OVERWRITE);
/* Make uniform list of user-specified chunksizes */
if(cnk_nbr > 0) cnk_dmn=nco_cnk_prs(cnk_nbr,cnk_arg);
/* Make uniform list of user-specified dimension limits */
if(lmt_nbr > 0) lmt=nco_lmt_prs(lmt_nbr,lmt_arg);
/* Initialize thread information */
thr_nbr=nco_openmp_ini(thr_nbr);
in_id_arr=(int *)nco_malloc(thr_nbr*sizeof(int));
/* Parse filename */
fl_in=nco_fl_nm_prs(fl_in,0,&fl_nbr,fl_lst_in,abb_arg_nbr,fl_lst_abb,fl_pth);
/* Make sure file is on local system and is readable or die trying */
fl_in=nco_fl_mk_lcl(fl_in,fl_pth_lcl,HPSS_TRY,&FL_RTR_RMT_LCN);
/* Open file using appropriate buffer size hints and verbosity */
if(RAM_OPEN) md_open=NC_NOWRITE|NC_DISKLESS; else md_open=NC_NOWRITE;
if(SHARE_OPEN) md_open=md_open|NC_SHARE;
rcd+=nco_fl_open(fl_in,md_open,&bfr_sz_hnt,&in_id);
/* Get number of variables, dimensions, and record dimension ID of input file */
(void)nco_inq(in_id,&nbr_dmn_fl,&nbr_var_fl,(int *)NULL,&rec_dmn_id);
(void)nco_inq_format(in_id,&fl_in_fmt);
/* Form initial extraction list which may include extended regular expressions */
xtr_lst=nco_var_lst_mk(in_id,nbr_var_fl,var_lst_in,EXCLUDE_INPUT_LIST,EXTRACT_ALL_COORDINATES,&xtr_nbr);
/* Change included variables to excluded variables */
if(EXCLUDE_INPUT_LIST) xtr_lst=nco_var_lst_xcl(in_id,nbr_var_fl,xtr_lst,&xtr_nbr);
/* Determine conventions (ARM/CCM/CCSM/CF/MPAS) for treating file */
cnv=nco_cnv_ini(in_id);
/* Add all coordinate variables to extraction list */
if(EXTRACT_ALL_COORDINATES) xtr_lst=nco_var_lst_crd_add(in_id,nbr_dmn_fl,nbr_var_fl,xtr_lst,&xtr_nbr,cnv);
/* Extract coordinates associated with extracted variables */
if(EXTRACT_ASSOCIATED_COORDINATES) xtr_lst=nco_var_lst_crd_ass_add(in_id,xtr_lst,&xtr_nbr,cnv);
/* Sort extraction list by variable ID for fastest I/O */
if(xtr_nbr > 1) xtr_lst=nco_lst_srt_nm_id(xtr_lst,xtr_nbr,False);
/* We now have final list of variables to extract. Phew. */
/* Find coordinate/dimension values associated with user-specified limits
NB: nco_lmt_evl() with same nc_id contains OpenMP critical region */
for(idx=0;idx<lmt_nbr;idx++) (void)nco_lmt_evl(in_id,lmt[idx],0L,FORTRAN_IDX_CNV);
/* Place all dimensions in lmt_all_lst */
lmt_all_lst=(lmt_all_sct **)nco_malloc(nbr_dmn_fl*sizeof(lmt_all_sct *));
/* Initialize lmt_all_sct's */
(void)nco_msa_lmt_all_ntl(in_id,MSA_USR_RDR,lmt_all_lst,nbr_dmn_fl,lmt,lmt_nbr);
/* Find dimensions associated with variables to be extracted */
dmn_lst=nco_dmn_lst_ass_var(in_id,xtr_lst,xtr_nbr,&nbr_dmn_xtr);
/* Fill-in dimension structure for all extracted dimensions */
dim=(dmn_sct **)nco_malloc(nbr_dmn_xtr*sizeof(dmn_sct *));
for(idx=0;idx<nbr_dmn_xtr;idx++) dim[idx]=nco_dmn_fll(in_id,dmn_lst[idx].id,dmn_lst[idx].nm);
/* Dimension list no longer needed */
dmn_lst=nco_nm_id_lst_free(dmn_lst,nbr_dmn_xtr);
/* Merge hyperslab limit information into dimension structures */
if(nbr_dmn_fl > 0) (void)nco_dmn_lmt_all_mrg(dmn_out,nbr_dmn_xtr,lmt_all_lst,nbr_dmn_fl);
/* Duplicate input dimension structures for output dimension structures */
dmn_out=(dmn_sct **)nco_malloc(nbr_dmn_xtr*sizeof(dmn_sct *));
for(idx=0;idx<nbr_dmn_xtr;idx++){
dmn_out[idx]=nco_dmn_dpl(dim[idx]);
(void)nco_dmn_xrf(dim[idx],dmn_out[idx]);
} /* end loop over idx */
/* Create stand-alone limit structure just for record dimension */
if(rec_dmn_id == NCO_REC_DMN_UNDEFINED){
if(nco_prg_id == ncra || nco_prg_id == ncrcat){
(void)fprintf(stdout,gettext("%s: ERROR input file %s lacks a record dimension\n"),nco_prg_nm_get(),fl_in);
if(fl_nbr == 1)(void)fprintf(stdout,gettext("%s: HINT Use ncks instead of %s\n"),nco_prg_nm_get(),nco_prg_nm_get());
nco_exit(EXIT_FAILURE);
} /* endif */
}else{ /* Record dimension exists */
lmt_rec=nco_lmt_sct_mk(in_id,rec_dmn_id,lmt,lmt_nbr,FORTRAN_IDX_CNV);
/* Initialize record coordinate re-basing */
if(nco_prg_id == ncra || nco_prg_id == ncrcat){
int var_id;
lmt_rec->cln_typ=cln_nil;
lmt_rec->origin=0.0;
lmt_rec->rbs_sng=NULL;
/* Obtain metadata for record coordinate */
rcd=nco_inq_varid_flg(in_id,lmt_rec->nm,&var_id);
if(rcd == NC_NOERR){
char *cln_att_sng=NULL;
lmt_rec->rbs_sng=nco_lmt_get_udu_att(in_id,var_id,"units");
cln_att_sng=nco_lmt_get_udu_att(in_id,var_id,"calendar");
lmt_rec->cln_typ=nco_cln_get_cln_typ(cln_att_sng);
if(cln_att_sng) cln_att_sng=(char*)nco_free(cln_att_sng);
}else{ /* endif record coordinate exists */
/* Record dimension, but not record coordinate, exists, which is fine. Reset return code. */
rcd=NC_NOERR;
} /* endif record coordinate exists */
} /* endif ncra, ncrcat */
} /* endif record dimension exists */
if(rec_dmn_id != NCO_REC_DMN_UNDEFINED){
for(idx=0;idx<nbr_dmn_fl;idx++){
if(!strcmp(lmt_rec->nm,lmt_all_lst[idx]->dmn_nm)){
lmt_all_rec=lmt_all_lst[idx];
/* Can only have one record limit */
if(lmt_all_rec->lmt_dmn_nbr > 1L){
(void)fprintf(stdout,"%s: Although this program allows multiple hyperslab limits for a single dimension, it allows only one unwrapped limit for the record dimension \"%s\". You have specified %i.\n",nco_prg_nm_get(),lmt_all_rec->dmn_nm,lmt_all_rec->lmt_dmn_nbr);
nco_exit(EXIT_FAILURE);
} /* end if */
if(nco_prg_id==ncra || nco_prg_id==ncrcat){
/* Change record dim in lmt_all_lst so that cnt=1 */
lmt_all_lst[idx]->dmn_cnt=1L;
lmt_all_lst[idx]->lmt_dmn[0]->srt=0L;
lmt_all_lst[idx]->lmt_dmn[0]->end=0L;
lmt_all_lst[idx]->lmt_dmn[0]->cnt=1L;
lmt_all_lst[idx]->lmt_dmn[0]->srd=1L;
} /* endif ncra || ncrcat */
break;
} /* endif current limit applies to record dimension */
} /* end loop over all dimensions */
} /* end if file has record dimension */
/* Is this an ARM-format data file? */
CNV_ARM=nco_cnv_arm_inq(in_id);
/* NB: nco_cnv_arm_base_time_get() with same nc_id contains OpenMP critical region */
if(CNV_ARM) base_time_srt=nco_cnv_arm_base_time_get(in_id);
/* Fill-in variable structure list for all extracted variables */
var=(var_sct **)nco_malloc(xtr_nbr*sizeof(var_sct *));
var_out=(var_sct **)nco_malloc(xtr_nbr*sizeof(var_sct *));
for(idx=0;idx<xtr_nbr;idx++){
var[idx]=nco_var_fll(in_id,xtr_lst[idx].id,xtr_lst[idx].nm,dim,nbr_dmn_xtr);
var_out[idx]=nco_var_dpl(var[idx]);
(void)nco_xrf_var(var[idx],var_out[idx]);
(void)nco_xrf_dmn(var_out[idx]);
} /* end loop over idx */
/* Extraction list no longer needed */
xtr_lst=nco_nm_id_lst_free(xtr_lst,xtr_nbr);
/* Divide variable lists into lists of fixed variables and variables to be processed */
(void)nco_var_lst_dvd(var,var_out,xtr_nbr,cnv,True,nco_pck_plc_nil,nco_pck_map_nil,NULL,0,&var_fix,&var_fix_out,&nbr_var_fix,&var_prc,&var_prc_out,&nbr_var_prc);
#ifdef ENABLE_MPI
if(prc_rnk == rnk_mgr){ /* MPI manager code */
#endif /* !ENABLE_MPI */
/* Make output and input files consanguinous */
if(fl_out_fmt == NCO_FORMAT_UNDEFINED) fl_out_fmt=fl_in_fmt;
/* Verify output file format supports requested actions */
(void)nco_fl_fmt_vet(fl_out_fmt,cnk_nbr,dfl_lvl);
/* Open output file */
fl_out_tmp=nco_fl_out_open(fl_out,&FORCE_APPEND,FORCE_OVERWRITE,fl_out_fmt,&bfr_sz_hnt,RAM_CREATE,RAM_OPEN,SHARE_CREATE,SHARE_OPEN,WRT_TMP_FL,&out_id);
/* Copy global attributes */
(void)nco_att_cpy(in_id,out_id,NC_GLOBAL,NC_GLOBAL,(nco_bool)True);
/* Catenate time-stamped command line to "history" global attribute */
if(HISTORY_APPEND) (void)nco_hst_att_cat(out_id,cmd_ln);
if(HISTORY_APPEND && FORCE_APPEND) (void)nco_prv_att_cat(fl_in,in_id,out_id);
if(gaa_nbr > 0) (void)nco_glb_att_add(out_id,gaa_arg,gaa_nbr);
if(HISTORY_APPEND) (void)nco_vrs_att_cat(out_id);
/* Add input file list global attribute */
if(FL_LST_IN_APPEND && HISTORY_APPEND && FL_LST_IN_FROM_STDIN) (void)nco_fl_lst_att_cat(out_id,fl_lst_in,fl_nbr);
#ifdef ENABLE_MPI
/* Initialize MPI task information */
if(prc_nbr > 0 && HISTORY_APPEND) (void)nco_mpi_att_cat(out_id,prc_nbr);
#endif /* !ENABLE_MPI */
if(thr_nbr > 1 && HISTORY_APPEND) (void)nco_thr_att_cat(out_id,thr_nbr);
/* Define dimensions in output file */
(void)nco_dmn_dfn(fl_out,out_id,dmn_out,nbr_dmn_xtr);
/* Define variables in output file, copy their attributes */
(void)nco_var_dfn(in_id,fl_out,out_id,var_out,xtr_nbr,(dmn_sct **)NULL,(int)0,nco_pck_plc_nil,nco_pck_map_nil,dfl_lvl);
/* Set chunksize parameters */
if(fl_out_fmt == NC_FORMAT_NETCDF4 || fl_out_fmt == NC_FORMAT_NETCDF4_CLASSIC) (void)nco_cnk_sz_set(out_id,lmt_all_lst,nbr_dmn_fl,&cnk_map,&cnk_plc,cnk_sz_scl,cnk_dmn,cnk_nbr);
/* Turn-off default filling behavior to enhance efficiency */
(void)nco_set_fill(out_id,NC_NOFILL,&fll_md_old);
/* Take output file out of define mode */
if(hdr_pad == 0UL){
(void)nco_enddef(out_id);
}else{
(void)nco__enddef(out_id,hdr_pad);
if(nco_dbg_lvl >= nco_dbg_scl) (void)fprintf(stderr,"%s: INFO Padding header with %lu extra bytes\n",nco_prg_nm_get(),(unsigned long)hdr_pad);
} /* hdr_pad */
#ifdef ENABLE_MPI
} /* prc_rnk != rnk_mgr */
/* Manager obtains output filename and broadcasts to workers */
if(prc_rnk == rnk_mgr) fl_nm_lng=(int)strlen(fl_out_tmp);
MPI_Bcast(&fl_nm_lng,1,MPI_INT,0,MPI_COMM_WORLD);
if(prc_rnk != rnk_mgr) fl_out_tmp=(char *)nco_malloc((fl_nm_lng+1)*sizeof(char));
MPI_Bcast(fl_out_tmp,fl_nm_lng+1,MPI_CHAR,0,MPI_COMM_WORLD);
#endif /* !ENABLE_MPI */
/* Pre-processor token spaghetti here is necessary so that
1. UP/SMP/MPI codes all zero srt vectors before calling nco_var_val_cpy()
2. No codes zero srt vectors more than once */
/* Assign zero to start and unity to stride vectors in output variables */
(void)nco_var_srd_srt_set(var_out,xtr_nbr);
#ifdef ENABLE_MPI
if(prc_rnk == rnk_mgr){ /* MPI manager code */
TKN_WRT_FREE=False;
#endif /* !ENABLE_MPI */
/* Copy variable data for non-processed variables */
/* (void)nco_var_val_cpy(in_id,out_id,var_fix,nbr_var_fix); */
(void)nco_msa_var_val_cpy(in_id,out_id,var_fix,nbr_var_fix,lmt_all_lst,nbr_dmn_fl);
#ifdef ENABLE_MPI
/* Close output file so workers can open it */
nco_close(out_id);
TKN_WRT_FREE=True;
} /* prc_rnk != rnk_mgr */
#else /* !ENABLE_MPI */
/* Close first input netCDF file (SMP only since MPI code immediate re-opens) */
(void)nco_close(in_id);
#endif /* !ENABLE_MPI */
/* Allocate and, if necesssary, initialize accumulation space for processed variables */
for(idx=0;idx<nbr_var_prc;idx++){
if(nco_prg_id == ncra || nco_prg_id == ncrcat){
/* Allocate space for only one record */
var_prc_out[idx]->sz=var_prc[idx]->sz=var_prc[idx]->sz_rec;
} /* endif */
if(nco_prg_id == ncra || nco_prg_id == ncfe){
var_prc_out[idx]->tally=var_prc[idx]->tally=(long *)nco_malloc(var_prc_out[idx]->sz*sizeof(long int));
(void)nco_zero_long(var_prc_out[idx]->sz,var_prc_out[idx]->tally);
var_prc_out[idx]->val.vp=(void *)nco_malloc(var_prc_out[idx]->sz*nco_typ_lng(var_prc_out[idx]->type));
(void)nco_var_zero(var_prc_out[idx]->type,var_prc_out[idx]->sz,var_prc_out[idx]->val);
} /* end if */
} /* end loop over idx */
#ifdef ENABLE_MPI
/* NB: Only manager code manipulates value of TKN_WRT_FREE
Pass 1: Workers construct local persistant variable lists
Open first file
mpncra and mpncrcat process first record only
mpnces ingests complete file
Workers create local list of their variables
Pass 2: Complete record/file loops with local variable lists
Workers skip first timestep (mpncra/mpncrcat)
Workers process only variables in their local list from Pass 1
This variable persistance is necessary for mpncra and mpnces
since their workers must maintain running tallies for each variable.
Variable persistance is not necessary for mpncrcat
However, we do it anyway to keep mpncrcat and mpncra similar
mpncrcat writes records as it reads them and finishes after pass 2
Pass 3:
mpnces and mpncra require a final loop to normalize and write
Write-token for this loop is passed sequentially through the ranks */
/* Begin Pass 1: Workers construct local persistant variable lists */
fl_idx=0;
/* Variables may have different ID, missing_value, type, in each file */
for(idx=0;idx<nbr_var_prc;idx++) (void)nco_var_mtd_refresh(in_id,var_prc[idx]);
/* Each file can have a different number of records to process
NB: nco_lmt_evl() with same nc_id contains OpenMP critical region */
if(nco_prg_id == ncra || nco_prg_id == ncrcat) (void)nco_lmt_evl(in_id,lmt_rec,rec_usd_cml,FORTRAN_IDX_CNV);
/* NB: nco_cnv_arm_base_time_get() with same nc_id contains OpenMP critical region */
if(CNV_ARM) base_time_crr=nco_cnv_arm_base_time_get(in_id);
/* Perform various error-checks on input file */
if(False) (void)nco_fl_cmp_err_chk();
if(nco_prg_id == ncra || nco_prg_id == ncrcat){ /* ncfe jumps to else branch */
/* Loop over each record in current file */
if(nco_dbg_lvl >= nco_dbg_std && lmt_rec->srt > lmt_rec->end) (void)fprintf(stdout,gettext("%s: WARNING %s (input file %d) is superfluous\n"),nco_prg_nm_get(),fl_in,fl_idx);
idx_rec=lmt_rec->srt;
if(fl_idx == fl_nbr-1 && idx_rec >= 1L+lmt_rec->end-lmt_rec->srd) LAST_RECORD=True;
/* Process all variables in first record */
if(nco_dbg_lvl > nco_dbg_scl) (void)fprintf(stderr,gettext("Record %ld of %s is output record %ld\n"),idx_rec,fl_in,rec_usd_cml);
if(prc_rnk == rnk_mgr){ /* MPI manager code */
/* Compensate for incrementing on each worker's first message */
var_wrt_nbr=-prc_nbr+1;
idx=0;
/* While variables remain to be processed or written... */
while(var_wrt_nbr < nbr_var_prc){
/* Receive any message from any worker */
MPI_Recv(wrk_id_bfr,wrk_id_bfr_lng,MPI_INT,MPI_ANY_SOURCE,MPI_ANY_TAG,MPI_COMM_WORLD,&mpi_stt);
/* Obtain MPI message tag type */
msg_tag_typ=mpi_stt.MPI_TAG;
/* Get sender's prc_rnk */
rnk_wrk=wrk_id_bfr[0];
/* Allocate next variable, if any, to worker */
if(msg_tag_typ == msg_tag_wrk_rqs){
var_wrt_nbr++; /* [nbr] Number of variables written */
/* Worker closed output file before sending msg_tag_wrk_rqs */
if(nco_prg_id == ncrcat) TKN_WRT_FREE=True; /* File written to at this point only for ncrcat */
if(idx > nbr_var_prc-1){
msg_bfr[0]=idx_all_wrk_ass; /* [enm] All variables already assigned */
msg_bfr[1]=out_id; /* Output file ID */
}else{
/* Tell requesting worker to allocate space for next variable */
msg_bfr[0]=idx; /* [idx] Variable to be processed */
/* csz: fxm Workers do not need to know Master's out_id */
msg_bfr[1]=out_id; /* Output file ID */
msg_bfr[2]=var_prc_out[idx]->id; /* [id] Variable ID in output file */
/* Point to next variable on list */
idx++;
} /* endif idx */
MPI_Send(msg_bfr,msg_bfr_lng,MPI_INT,rnk_wrk,msg_tag_wrk_rsp,MPI_COMM_WORLD);
}else if(msg_tag_typ == msg_tag_tkn_wrt_rqs && nco_prg_id == ncrcat){ /* msg_tag_typ != msg_tag_wrk_rqs */
/* Allocate token if free, else ask worker to try later */
if(TKN_WRT_FREE){
TKN_WRT_FREE=False;
msg_bfr[0]=tkn_wrt_rqs_xcp; /* Accept request for write token */
}else{
msg_bfr[0]=tkn_wrt_rqs_dny; /* Deny request for write token */
} /* !TKN_WRT_FREE */
MPI_Send(msg_bfr,msg_bfr_lng,MPI_INT,rnk_wrk,msg_tag_tkn_wrt_rsp,MPI_COMM_WORLD);
} /* msg_tag_typ != msg_tag_tkn_wrt_rqs */
} /* end while var_wrt_nbr < nbr_var_prc */
}else{ /* prc_rnk != rnk_mgr, end Manager code begin Worker code */
/* csz: fxm delete redundant statement with two lines further down */
wrk_id_bfr[0]=prc_rnk;
var_wrt_nbr=0;
while(1){ /* While work remains... */
/* Send msg_tag_wrk_rqs */
wrk_id_bfr[0]=prc_rnk;
MPI_Send(wrk_id_bfr,wrk_id_bfr_lng,MPI_INT,rnk_mgr,msg_tag_wrk_rqs,MPI_COMM_WORLD);
/* Receive msg_tag_wrk_rsp */
MPI_Recv(msg_bfr,msg_bfr_lng,MPI_INT,rnk_mgr,msg_tag_wrk_rsp,MPI_COMM_WORLD,&mpi_stt);
idx=msg_bfr[0];
/* csz: fxm dangerous---workers must get and use their own out_id's, not master's out_id */
out_id=msg_bfr[1];
if(idx == idx_all_wrk_ass){
break;
}else{ /* idx != idx_all_wrk_ass */
/* Assign this variable to this worker for rest of program */
lcl_idx_lst[lcl_nbr_var]=idx;
/* csz: got to here reading logic */
lcl_nbr_var++;
var_prc_out[idx]->id=msg_bfr[2];
if(nco_dbg_lvl >= nco_dbg_var) rcd+=nco_var_prc_crr_prn(idx,var_prc[idx]->nm);
if(nco_dbg_lvl >= nco_dbg_var) (void)fflush(fp_stderr);
/* Update hyperslab start indices to current record for each variable */
var_prc[idx]->srt[0]=idx_rec;
var_prc[idx]->end[0]=idx_rec;
var_prc[idx]->cnt[0]=1L;
/* Retrieve variable from disk into memory */
/* NB: nco_var_get() with same nc_id contains OpenMP critical region */
(void)nco_var_get(in_id,var_prc[idx]);
if(nco_prg_id == ncra){
/* Convert char, short, long, int, and float types to doubles before arithmetic */
/* Output variable type is "sticky" so only convert on first record */
if(rec_usd_cml == 0) var_prc_out[idx]=nco_typ_cnv_rth(var_prc_out[idx],nco_op_typ);
/* Convert var_prc to type of var_prc_out in case type of variable on disk has changed */
var_prc[idx]=nco_var_cnf_typ(var_prc_out[idx]->type,var_prc[idx]);
/* Perform arithmetic operations: avg, min, max, ttl, ... */
nco_opr_drv(rec_usd_cml,nco_op_typ,var_prc[idx],var_prc_out[idx]);
} /* !ncra */
/* Append current record to output file */
if(nco_prg_id == ncrcat){
var_prc_out[idx]->srt[0]=var_prc_out[idx]->end[0]=rec_usd_cml;
var_prc_out[idx]->cnt[0]=1L;
/* Replace this time_offset value with time_offset from initial file base_time */
if(CNV_ARM && !strcmp(var_prc[idx]->nm,"time_offset")) var_prc[idx]->val.dp[0]+=(base_time_crr-base_time_srt);
/* Obtain token and prepare to write */
while(1){ /* Send msg_tag_tkn_wrt_rqs repeatedly until token obtained */
wrk_id_bfr[0]=prc_rnk;
MPI_Send(wrk_id_bfr,wrk_id_bfr_lng,MPI_INT,rnk_mgr,msg_tag_tkn_wrt_rqs,MPI_COMM_WORLD);
MPI_Recv(msg_bfr,msg_bfr_lng,MPI_INT,rnk_mgr,msg_tag_tkn_wrt_rsp,MPI_COMM_WORLD,&mpi_stt);
tkn_wrt_rsp=msg_bfr[0];
/* Wait then re-send request */
if(tkn_wrt_rsp == tkn_wrt_rqs_dny) sleep(tkn_wrt_rqs_ntv); else break;
} /* end while loop waiting for write token */
/* Worker has token---prepare to write */
if(tkn_wrt_rsp == tkn_wrt_rqs_xcp){
if(RAM_OPEN) md_open=NC_WRITE|NC_SHARE|NC_DISKLESS; else md_open=NC_WRITE|NC_SHARE;
rcd=nco_fl_open(fl_out_tmp,md_open,&bfr_sz_hnt,&out_id);
/* Set chunksize parameters */
if(fl_out_fmt == NC_FORMAT_NETCDF4 || fl_out_fmt == NC_FORMAT_NETCDF4_CLASSIC) (void)nco_cnk_sz_set(out_id,lmt_all_lst,nbr_dmn_fl,&cnk_map,&cnk_plc,cnk_sz_scl,cnk_dmn,cnk_nbr);
/* Turn-off default filling behavior to enhance efficiency */
nco_set_fill(out_id,NC_NOFILL,&fll_md_old);
if(var_prc_out[idx]->sz_rec > 1L) (void)nco_put_vara(out_id,var_prc_out[idx]->id,var_prc_out[idx]->srt,var_prc_out[idx]->cnt,var_prc[idx]->val.vp,var_prc_out[idx]->type);
else (void)nco_put_var1(out_id,var_prc_out[idx]->id,var_prc_out[idx]->srt,var_prc[idx]->val.vp,var_prc_out[idx]->type);
/* Close output file and increment written counter */
nco_close(out_id);
var_wrt_nbr++;
} /* endif tkn_wrt_rqs_xcp */
} /* end if ncrcat */
/* Make sure record coordinate, if any, is monotonic */
if(nco_prg_id == ncrcat && var_prc[idx]->is_crd_var) (void)rec_crd_chk(var_prc[idx],fl_in,fl_out,idx_rec,rec_usd_cml);
/* Convert missing_value, if any, back to unpacked type */
if(var_prc[idx]->has_mss_val && var_prc[idx]->type != var_prc[idx]->typ_upk && !LAST_RECORD)
var_prc[idx]=nco_cnv_mss_val_typ(var_prc[idx],var_prc[idx]->typ_upk);
/* Free current input buffer */
var_prc[idx]->val.vp=nco_free(var_prc[idx]->val.vp);
if(nco_dbg_lvl >= nco_dbg_var) (void)fprintf(stderr,"\n");
} /* !idx_all_wrk_ass */
} /* while(1) loop requesting work/token in Worker */
rec_usd_cml++; /* [idx] Index of current record in output file (0 is first, ...) */
} /* endif Worker */
printf("DEBUG: End of first pass of ncra/ncrcat at node %d\n",prc_rnk);
/* End of ncra, ncrcat section */
}else{ /* ncfe */
if(prc_rnk == rnk_mgr){ /* MPI manager code */
/* Compensate for incrementing on each worker's first message */
var_wrt_nbr=-prc_nbr+1;
idx=0;
/* While variables remain to be processed or written... */
while(var_wrt_nbr < nbr_var_prc){
/* Receive message from any worker */
MPI_Recv(wrk_id_bfr,wrk_id_bfr_lng,MPI_INT,MPI_ANY_SOURCE,MPI_ANY_TAG,MPI_COMM_WORLD,&mpi_stt);
/* Obtain MPI message tag type */
msg_tag_typ=mpi_stt.MPI_TAG;
/* Get sender's prc_rnk */
rnk_wrk=wrk_id_bfr[0];
/* Allocate next variable, if any, to worker */
if(msg_tag_typ == msg_tag_wrk_rqs){
var_wrt_nbr++; /* [nbr] Number of variables written */
/* Worker closed output file before sending msg_tag_wrk_rqs */
/* TKN_WRT_FREE=True; ncfe does not do file write here */
if(idx > nbr_var_prc-1){
msg_bfr[0]=idx_all_wrk_ass; /* [enm] All variables already assigned */
msg_bfr[1]=out_id; /* Output file ID */
}else{
/* Tell requesting worker to allocate space for next variable */
msg_bfr[0]=idx; /* [idx] Variable to be processed */
msg_bfr[1]=out_id; /* Output file ID */
msg_bfr[2]=var_prc_out[idx]->id; /* [id] Variable ID in output file */
/* Point to next variable on list */
idx++;
} /* endif idx */
MPI_Send(msg_bfr,msg_bfr_lng,MPI_INT,rnk_wrk,msg_tag_wrk_rsp,MPI_COMM_WORLD);
} /* msg_tag_typ != msg_tag_wrk_rqs */
} /* end while var_wrt_nbr < nbr_var_prc */
}else{ /* prc_rnk != rnk_mgr, end Manager code begin Worker code */
while(1){ /* While work remains... */
/* Send msg_tag_wrk_rqs */
wrk_id_bfr[0]=prc_rnk;
MPI_Send(wrk_id_bfr,wrk_id_bfr_lng,MPI_INT,rnk_mgr,msg_tag_wrk_rqs,MPI_COMM_WORLD);
/* Receive msg_tag_wrk_rsp */
MPI_Recv(msg_bfr,msg_bfr_lng,MPI_INT,rnk_mgr,msg_tag_wrk_rsp,MPI_COMM_WORLD,&mpi_stt);
idx=msg_bfr[0];
out_id=msg_bfr[1];
if(idx == idx_all_wrk_ass) break;
else{
lcl_idx_lst[lcl_nbr_var]=idx; /* storing the indices for subsequent processing by the worker */
lcl_nbr_var++;
var_prc_out[idx]->id=msg_bfr[2];
if(nco_dbg_lvl >= nco_dbg_var) rcd+=nco_var_prc_crr_prn(idx,var_prc[idx]->nm);
if(nco_dbg_lvl >= nco_dbg_var) (void)fflush(fp_stderr);
/* Retrieve variable from disk into memory */
/* NB: nco_var_get() with same nc_id contains OpenMP critical region */
(void)nco_var_get(in_id,var_prc[idx]);
/* Convert char, short, long, int, and float types to doubles before arithmetic */
/* var_prc[idx]=nco_typ_cnv_rth(var_prc[idx],nco_op_typ); */
/* Output variable type is "sticky" so only convert on first record */
if(fl_idx == 0) var_prc_out[idx]=nco_typ_cnv_rth(var_prc_out[idx],nco_op_typ);
/* Convert var_prc to type of var_prc_out in case type of variable on disk has changed */
var_prc[idx]=nco_var_cnf_typ(var_prc_out[idx]->type,var_prc[idx]);
/* Perform arithmetic operations: avg, min, max, ttl, ... */ /* Note: fl_idx not rec_usd_cml! */
nco_opr_drv(fl_idx,nco_op_typ,var_prc[idx],var_prc_out[idx]);
/* Free current input buffer */
var_prc[idx]->val.vp=nco_free(var_prc[idx]->val.vp);
} /* !idx_all_wrk_ass */
} /* while(1) loop requesting work/token in Worker */
} /* endif Worker */
} /* end else ncfe */
if(nco_dbg_lvl > nco_dbg_scl) (void)fprintf(stderr,"\n");
/* Close input netCDF file */
nco_close(in_id);
#ifdef ENABLE_MPI
/* This barrier ensures that all nodes have reached this point together.
Otherwise, the manager code should be altered so it can deal with
nodes in different stages of execution at any time.
Daniel: I think we should be convinced of this parallelization
structure before bothering with implementing the code restructuring in
the manager that would let us remove the barrier. The barrier
should only negligibly impact performance. */
checkpointMpi(prc_rnk, 1);
#endif /* ENABLE_MPI */
/* End Pass 1: Workers construct local persistant variable lists */
printf("DEBUG: prc_rnk %d is done with 1st pass\n",prc_rnk);
/* Begin Pass 2: Complete record/file loops with local variable lists */
#endif /* !ENABLE_MPI */
/* Loop over input files */
for(fl_idx=0;fl_idx<fl_nbr;fl_idx++){
/* Parse filename */
if(fl_idx != 0) fl_in=nco_fl_nm_prs(fl_in,fl_idx,(int *)NULL,fl_lst_in,abb_arg_nbr,fl_lst_abb,fl_pth);
if(nco_dbg_lvl >= nco_dbg_fl) (void)fprintf(stderr,gettext("\nInput file %d is %s; "),fl_idx,fl_in);
/* Make sure file is on local system and is readable or die trying */
if(fl_idx != 0) fl_in=nco_fl_mk_lcl(fl_in,fl_pth_lcl,HPSS_TRY,&FL_RTR_RMT_LCN);
if(nco_dbg_lvl >= nco_dbg_fl) (void)fprintf(stderr,gettext("local file %s:\n"),fl_in);
/* Open file once per thread to improve caching */
for(thr_idx=0;thr_idx<thr_nbr;thr_idx++) rcd+=nco_fl_open(fl_in,md_open,&bfr_sz_hnt,in_id_arr+thr_idx);
in_id=in_id_arr[0];
#ifdef ENABLE_MPI
printf("DEBUG: input file opened in prc_rnk %d inside the loop\n",prc_rnk);
#endif /* !ENABLE_MPI */
/* Variables may have different IDs and missing_values in each file */
for(idx=0;idx<nbr_var_prc;idx++) (void)nco_var_mtd_refresh(in_id,var_prc[idx]);
/* Each file can have a different number of records to process
NB: nco_lmt_evl() with same nc_id contains OpenMP critical region */
if(nco_prg_id == ncra || nco_prg_id == ncrcat) (void)nco_lmt_evl(in_id,lmt_rec,rec_usd_cml,FORTRAN_IDX_CNV);
/* NB: nco_cnv_arm_base_time_get() with same nc_id contains OpenMP critical region */
if(CNV_ARM) base_time_crr=nco_cnv_arm_base_time_get(in_id);
/* Perform various error-checks on input file */
if(False) (void)nco_fl_cmp_err_chk();
if(nco_prg_id == ncra || nco_prg_id == ncrcat){ /* ncfe jumps to else branch */
/* Loop over each record in current file */
if(nco_dbg_lvl >= nco_dbg_std && lmt_rec->srt > lmt_rec->end) (void)fprintf(stdout,gettext("%s: WARNING %s (input file %d) is superfluous\n"),nco_prg_nm_get(),fl_in,fl_idx);
for(idx_rec=lmt_rec->srt;idx_rec<=lmt_rec->end;idx_rec+=lmt_rec->srd){
if(fl_idx == fl_nbr-1 && idx_rec >= 1L+lmt_rec->end-lmt_rec->srd) LAST_RECORD=True;
#ifdef ENABLE_MPI
if(fl_idx == 0 && idx_rec == lmt_rec->srt){
/* MPI operators processed first record in first-stage loop */
continue;
}else{ /* a loop of idx = stored indices */
if(prc_rnk == rnk_mgr){ /* For ncrcat, Manager gives write access for each record in each file */
if(nco_prg_id == ncrcat){ /* Give Write access to write current record */
/* var_wrt_nbr=-prc_nbr+1; */
var_wrt_nbr=0;
while(var_wrt_nbr < nbr_var_prc){ /* Give write access to Workers who have some variables; wrong condn? */
/* Receive message from any worker */
MPI_Recv(wrk_id_bfr,wrk_id_bfr_lng,MPI_INT,MPI_ANY_SOURCE,MPI_ANY_TAG,MPI_COMM_WORLD,&mpi_stt);
/* Obtain MPI message tag type */
msg_tag_typ=mpi_stt.MPI_TAG;
/* Get sender's prc_rnk */
rnk_wrk=wrk_id_bfr[0];
if(msg_tag_typ == msg_tag_wrk_done) TKN_WRT_FREE=True;
if(msg_tag_typ == msg_tag_tkn_wrt_rqs){
if(rnk_wrk == tkn_wrt_rnk){ /* Prev write completed */
TKN_WRT_FREE=True;
} /* rnk_wrk != tkn_wrt_rnk */
/* Allocate token if free, else ask worker to try later */
if(TKN_WRT_FREE){
TKN_WRT_FREE=False;
msg_bfr[0]=tkn_wrt_rqs_xcp; /* Accept request for write token */
tkn_wrt_rnk=rnk_wrk; /* To track who has the token */
var_wrt_nbr++;
}else{
msg_bfr[0]=tkn_wrt_rqs_dny; /* Deny request for write token */
} /* !TKN_WRT_FREE */
MPI_Send(msg_bfr,msg_bfr_lng,MPI_INT,rnk_wrk,msg_tag_tkn_wrt_rsp,MPI_COMM_WORLD);
} /* msg_tag_typ != msg_tag_tkn_wrt_rqs */
} /* End-while token request loop */
} /* !ncrcat */
}else{ /* prc_rnk != rnk_mgr, end Manager code begin Worker code */
wrk_id_bfr[0]=prc_rnk;
var_wrt_nbr=0;
/* if(fl_idx == 0 && idx_rec == lmt_rec->srt) continue;
else a loop of idx = stored indices */
for(jdx=0;jdx<lcl_nbr_var;jdx++){
idx=lcl_idx_lst[jdx];
#endif /* !ENABLE_MPI */
/* Process all variables in current record */
if(nco_dbg_lvl > nco_dbg_scl) (void)fprintf(stderr,gettext("Record %ld of %s is output record %ld\n"),idx_rec,fl_in,rec_usd_cml);
#if 0
/* NB: Immediately preceding MPI for scope confounds Emacs indentation
Fake end scope restores correct indentation, simplifies code-checking */
} /* fake end for */
#endif /* !0 */
#ifndef ENABLE_MPI
#ifdef _OPENMP
#pragma omp parallel for default(none) private(idx,in_id) shared(CNV_ARM,base_time_crr,base_time_srt,nco_dbg_lvl,fl_in,fl_out,idx_rec,rec_usd_cml,in_id_arr,LAST_RECORD,nbr_var_prc,nco_op_typ,out_id,prg,rcd,var_prc,var_prc_out)
#endif /* !_OPENMP */
/* UP and SMP codes main loop over variables */
for(idx=0;idx<nbr_var_prc;idx++){
#endif /* ENABLE_MPI */
in_id=in_id_arr[omp_get_thread_num()];
if(nco_dbg_lvl >= nco_dbg_var) rcd+=nco_var_prc_crr_prn(idx,var_prc[idx]->nm);
if(nco_dbg_lvl >= nco_dbg_var) (void)fflush(fp_stderr);
/* Update hyperslab start indices to current record for each variable */
var_prc[idx]->srt[0]=idx_rec;
var_prc[idx]->end[0]=idx_rec;
var_prc[idx]->cnt[0]=1L;
/* Retrieve variable from disk into memory */
/* NB: nco_var_get() with same nc_id contains OpenMP critical region */
(void)nco_var_get(in_id,var_prc[idx]);
if(nco_prg_id == ncra){
/* Convert char, short, long, int, and float types to doubles before arithmetic */
var_prc[idx]=nco_typ_cnv_rth(var_prc[idx],nco_op_typ);
/* Output variable type is "sticky" so only convert on first record */
if(rec_usd_cml == 0) var_prc_out[idx]=nco_typ_cnv_rth(var_prc_out[idx],nco_op_typ);
/* Convert var_prc to type of var_prc_out in case type of variable on disk has changed */
var_prc[idx]=nco_var_cnf_typ(var_prc_out[idx]->type,var_prc[idx]);
/* Perform arithmetic operations: avg, min, max, ttl, ... */
nco_opr_drv(rec_usd_cml,nco_op_typ,var_prc[idx],var_prc_out[idx]);
} /* end if ncra */
/* Append current record to output file */
if(nco_prg_id == ncrcat){
var_prc_out[idx]->srt[0]=var_prc_out[idx]->end[0]=rec_usd_cml;
var_prc_out[idx]->cnt[0]=1L;
/* Replace this time_offset value with time_offset from initial file base_time */
if(CNV_ARM && !strcmp(var_prc[idx]->nm,"time_offset")) var_prc[idx]->val.dp[0]+=(base_time_crr-base_time_srt);
#ifdef ENABLE_MPI
/* Obtain token and prepare to write */
while(1){ /* Send msg_tag_tkn_wrt_rqs repeatedly until token obtained */
wrk_id_bfr[0]=prc_rnk;
MPI_Send(wrk_id_bfr,wrk_id_bfr_lng,MPI_INT,rnk_mgr,msg_tag_tkn_wrt_rqs,MPI_COMM_WORLD);
MPI_Recv(msg_bfr,msg_bfr_lng,MPI_INT,rnk_mgr,msg_tag_tkn_wrt_rsp,MPI_COMM_WORLD,&mpi_stt);
tkn_wrt_rsp=msg_bfr[0];
/* Wait then re-send request */
if(tkn_wrt_rsp == tkn_wrt_rqs_dny) sleep(tkn_wrt_rqs_ntv); else break;
} /* end while loop waiting for write token */
/* Worker has token---prepare to write */
if(tkn_wrt_rsp == tkn_wrt_rqs_xcp){
rcd=nco_fl_open(fl_out_tmp,NC_WRITE|NC_SHARE,&bfr_sz_hnt,&out_id);
/* Set chunksize parameters */
if(fl_out_fmt == NC_FORMAT_NETCDF4 || fl_out_fmt == NC_FORMAT_NETCDF4_CLASSIC) (void)nco_cnk_sz_set(out_id,lmt_all_lst,nbr_dmn_fl,&cnk_map,&cnk_plc,cnk_sz_scl,cnk_dmn,cnk_nbr);
/* Turn-off default filling behavior to enhance efficiency */
nco_set_fill(out_id,NC_NOFILL,&fll_md_old);
#else /* !ENABLE_MPI */
#ifdef _OPENMP
#pragma omp critical
#endif /* _OPENMP */
#endif /* !ENABLE_MPI */
if(var_prc_out[idx]->sz_rec > 1) (void)nco_put_vara(out_id,var_prc_out[idx]->id,var_prc_out[idx]->srt,var_prc_out[idx]->cnt,var_prc[idx]->val.vp,var_prc_out[idx]->type);
else (void)nco_put_var1(out_id,var_prc_out[idx]->id,var_prc_out[idx]->srt,var_prc[idx]->val.vp,var_prc_out[idx]->type);
#ifdef ENABLE_MPI
/* Close output file and increment written counter */
nco_close(out_id);
var_wrt_nbr++;
} /* endif tkn_wrt_rqs_xcp */
#endif /* !ENABLE_MPI */
} /* end if ncrcat */
/* Make sure record coordinate, if any, is monotonic */
if(nco_prg_id == ncrcat && var_prc[idx]->is_crd_var) (void)rec_crd_chk(var_prc[idx],fl_in,fl_out,idx_rec,rec_usd_cml);
/* Convert missing_value, if any, back to disk type */
if(var_prc[idx]->has_mss_val && var_prc[idx]->type != var_prc[idx]->typ_upk && !LAST_RECORD)
var_prc[idx]=nco_cnv_mss_val_typ(var_prc[idx],var_prc[idx]->typ_upk);
/* Free current input buffer */
var_prc[idx]->val.vp=nco_free(var_prc[idx]->val.vp);
} /* end (OpenMP Parallel for) loop over variables */
#ifdef ENABLE_MPI
if(nco_prg_id == ncrcat){
/* Return token after writing record's last variable */
wrk_id_bfr[0]=prc_rnk;
MPI_Send(wrk_id_bfr,wrk_id_bfr_lng,MPI_INT,rnk_mgr,msg_tag_wrk_done,MPI_COMM_WORLD);
} /* !ncrcat */
#endif /* !ENABLE_MPI */
rec_usd_cml++; /* [idx] Index of current record in output file (0 is first, ...) */
if(nco_dbg_lvl >= nco_dbg_var) (void)fprintf(stderr,"\n");
#ifdef ENABLE_MPI
} /* !Worker */
} /* end else ! fl_idx=0,idx_rec=srt */
#endif /* !ENABLE_MPI */
} /* end loop over idx_rec */
#ifdef ENABLE_MPI
if(prc_rnk != rnk_mgr){ /* Only Worker */
#endif /* !ENABLE_MPI */
/* Warn if fewer than number of requested records were read and final file has been processed */
if(lmt_rec->lmt_typ == lmt_dmn_idx && lmt_rec->is_usr_spc_min && lmt_rec->is_usr_spc_max){
long rec_nbr_rqs; /* Number of records user requested */
rec_nbr_rqs=1L+(lmt_rec->max_idx-lmt_rec->min_idx)/lmt_rec->srd;
if(nco_dbg_lvl >= nco_dbg_std && fl_idx == fl_nbr-1 && rec_nbr_rqs != rec_usd_cml) (void)fprintf(stdout,gettext("%s: WARNING User requested %li records but only %li were found\n"),nco_prg_nm_get(),rec_nbr_rqs,rec_usd_cml);
} /* end if */
/* Error if no records were read and final file has been processed */
if(rec_usd_cml <= 0 && fl_idx == fl_nbr-1){
(void)fprintf(stdout,gettext("%s: ERROR No records lay within specified hyperslab\n"),nco_prg_nm_get());
nco_exit(EXIT_FAILURE);
} /* end if */
#ifdef ENABLE_MPI
} /* !Worker */
printf("DEBUG: prc_rnk %d at the end of ncra/rcat\n",prc_rnk);
#endif /* !ENABLE_MPI */
/* End of ncra, ncrcat section */
}else{ /* ncfe */
#ifdef ENABLE_MPI
if(prc_rnk != rnk_mgr){ /* Only Worker does the ncfe processing */
if(fl_idx == 0){
continue;
}else{ /* a loop of idx = stored indices */
for(jdx=0;jdx<lcl_nbr_var;jdx++){
idx=lcl_idx_lst[jdx];
#else /* !ENABLE_MPI */
#ifdef _OPENMP
#pragma omp parallel for default(none) private(idx,in_id) shared(nco_dbg_lvl,fl_idx,in_id_arr,nbr_var_prc,nco_op_typ,rcd,var_prc,var_prc_out)
#endif /* !_OPENMP */
for(idx=0;idx<nbr_var_prc;idx++){ /* Process all variables in current file */
#endif /* !ENABLE_MPI */
in_id=in_id_arr[omp_get_thread_num()];
if(nco_dbg_lvl >= nco_dbg_var) rcd+=nco_var_prc_crr_prn(idx,var_prc[idx]->nm);
if(nco_dbg_lvl >= nco_dbg_var) (void)fflush(fp_stderr);
/* Retrieve variable from disk into memory */
/* NB: nco_var_get() with same nc_id contains OpenMP critical region */
(void)nco_var_get(in_id,var_prc[idx]);
/* Convert char, short, long, int, and float types to doubles before arithmetic */
/* var_prc[idx]=nco_typ_cnv_rth(var_prc[idx],nco_op_typ); */
/* Output variable type is "sticky" so only convert on first record */
if(fl_idx == 0) var_prc_out[idx]=nco_typ_cnv_rth(var_prc_out[idx],nco_op_typ);
/* Convert var_prc to type of var_prc_out in case type of variable on disk has changed */
var_prc[idx]=nco_var_cnf_typ(var_prc_out[idx]->type,var_prc[idx]);
/* Perform arithmetic operations: avg, min, max, ttl, ... */ /* Note: fl_idx not rec_usd_cml! */
nco_opr_drv(fl_idx,nco_op_typ,var_prc[idx],var_prc_out[idx]);
/* Free current input buffer */
var_prc[idx]->val.vp=nco_free(var_prc[idx]->val.vp);
} /* end (OpenMP parallel for) loop over idx */
#ifdef ENABLE_MPI
} /* end else !fl_idx=0 */
} /* !Worker */
#endif /* !ENABLE_MPI */
} /* end else ncfe */
if(nco_dbg_lvl > nco_dbg_scl) (void)fprintf(stderr,"\n");
/* Close input netCDF file */
for(thr_idx=0;thr_idx<thr_nbr;thr_idx++) nco_close(in_id_arr[thr_idx]);
/* Dispose local copy of file */
if(FL_RTR_RMT_LCN && RM_RMT_FL_PST_PRC) (void)nco_fl_rm(fl_in);
} /* end loop over fl_idx */
#ifdef ENABLE_MPI
printf("DEBUG: prc_rnk %d is out of file idx loop\n",prc_rnk);
#endif /* !ENABLE_MPI */
/* Normalize, multiply, etc where necessary */
if(nco_prg_id == ncra || nco_prg_id == ncfe){
#ifdef ENABLE_MPI
if(prc_rnk != rnk_mgr){ /* Only workers have indices of variables to process */
for(jdx=0;jdx<lcl_nbr_var;jdx++){
idx=lcl_idx_lst[jdx];
#if 0
/* NB: Immediately preceding MPI if/for scopes confound Emacs indentation
Fake end scopes restore correct indentation, simplify code-checking */
} /* fake end for */
} /* fake end if */
#endif /* !0 */
#else /* !ENABLE_MPI */
#ifdef _OPENMP
#pragma omp parallel for default(none) private(idx) shared(nbr_var_prc,nco_op_typ,var_prc,var_prc_out)
#endif /* !_OPENMP */
for(idx=0;idx<nbr_var_prc;idx++){
#endif /* !ENABLE_MPI */
if(var_prc[idx]->is_crd_var){
/* Return linear averages of coordinates unless computing extrema
Prevent coordinate variables from encountering nco_var_nrm_sdn() */
if((nco_op_typ != nco_op_min) && (nco_op_typ != nco_op_max)) (void)nco_var_nrm(var_prc_out[idx]->type,var_prc_out[idx]->sz,var_prc[idx]->has_mss_val,var_prc[idx]->mss_val,var_prc[idx]->tally,var_prc_out[idx]->val);
}else{ /* !var_prc[idx]->is_crd_var */
switch(nco_op_typ){
case nco_op_avg: /* Normalize sum by tally to create mean */
case nco_op_sqrt: /* Normalize sum by tally to create mean */
case nco_op_sqravg: /* Normalize sum by tally to create mean */
case nco_op_rms: /* Normalize sum of squares by tally to create mean square */
case nco_op_avgsqr: /* Normalize sum of squares by tally to create mean square */
(void)nco_var_nrm(var_prc_out[idx]->type,var_prc_out[idx]->sz,var_prc[idx]->has_mss_val,var_prc[idx]->mss_val,var_prc[idx]->tally,var_prc_out[idx]->val);
break;
case nco_op_rmssdn: /* Normalize sum of squares by tally-1 to create mean square for sdn */
(void)nco_var_nrm_sdn(var_prc_out[idx]->type,var_prc_out[idx]->sz,var_prc[idx]->has_mss_val,var_prc[idx]->mss_val,var_prc[idx]->tally,var_prc_out[idx]->val);
break;
case nco_op_min: /* Minimum is already in buffer, do nothing */
case nco_op_max: /* Maximum is already in buffer, do nothing */
case nco_op_ttl: /* Total is already in buffer, stuff missing values into elements with zero tally */
(void)nco_var_tll_zro_mss_val(var_prc_out[idx]->type,var_prc_out[idx]->sz,var_prc[idx]->has_mss_val,var_prc[idx]->mss_val,var_prc[idx]->tally,var_prc_out[idx]->val);
default:
break;
} /* end switch */
/* Some operations require additional processing */
switch(nco_op_typ){
case nco_op_rms: /* Take root of mean of sum of squares to create root mean square */
case nco_op_rmssdn: /* Take root of sdn mean of sum of squares to create root mean square for sdn */
case nco_op_sqrt: /* Take root of mean to create root mean */
(void)nco_var_sqrt(var_prc_out[idx]->type,var_prc_out[idx]->sz,var_prc[idx]->has_mss_val,var_prc[idx]->mss_val,var_prc[idx]->tally,var_prc_out[idx]->val,var_prc_out[idx]->val);
break;
case nco_op_sqravg: /* Square mean to create square of the mean (for sdn) */
(void)nco_var_mlt(var_prc_out[idx]->type,var_prc_out[idx]->sz,var_prc_out[idx]->has_mss_val,var_prc_out[idx]->mss_val,var_prc_out[idx]->val,var_prc_out[idx]->val);
break;
default:
break;
} /* end switch */
} /* !var_prc[idx]->is_crd_var */
var_prc_out[idx]->tally=var_prc[idx]->tally=(long *)nco_free(var_prc[idx]->tally);
} /* end (OpenMP parallel for) loop over variables */
#ifdef ENABLE_MPI
printf("DEBUG: End of Normzn at prc_rnk %d\n",prc_rnk);
} /* prc_rnk == rnk_mgr */
for(idx = 0; idx < nbr_var_prc; idx++) {
assert(var_prc_out[idx]->tally == var_prc[idx]->tally);
if (var_prc_out[idx]->tally == 0) continue;
printf("DEBUG: node %d reset idx %d tally for var_prc(out) (cleanup)\n",prc_rnk,idx);
var_prc_out[idx]->tally=var_prc[idx]->tally=(long *)nco_free(var_prc[idx]->tally);
}
printf("DEBUG: Mgr shud prnt this too, prc_rnk %d\n",prc_rnk);
#endif /* !ENABLE_MPI */
} /* !ncra/ncfe */
#ifdef ENABLE_MPI
printf("DEBUG: After all processing; Before barrier, prc_rnk %d\n",prc_rnk);
if(prc_rnk == rnk_mgr){ /* Only Manager */
rcd=nco_fl_open(fl_out_tmp,NC_WRITE|NC_SHARE,&bfr_sz_hnt,&out_id);
printf("DEBUG: prc_rnk %d opened out file\n",prc_rnk);
#endif /* !ENABLE_MPI */
/* Manually fix YYMMDD date which was mangled by averaging */
if(cnv->CCM_CCSM_CF && nco_prg_id == ncra) (void)nco_cnv_ccm_ccsm_cf_date(out_id,var_out,xtr_nbr);
/* End Pass 2: Complete record/file loops with local variable lists */
/* Begin Pass 3: */
/* End Pass 3: */
/* Add time variable to output file
NB: nco_cnv_arm_time_install() contains OpenMP critical region */
if(CNV_ARM && nco_prg_id == ncrcat) (void)nco_cnv_arm_time_install(out_id,base_time_srt,dfl_lvl);
#ifdef ENABLE_MPI
nco_close(out_id);
printf("DEBUG: Mgr prc_rnk %d closed out file %d after fixing date, time \n", prc_rnk, out_id);
MPI_Send(msg_bfr,msg_bfr_lng,MPI_INT,prc_rnk+1,msg_tag_tkn_wrt_rsp,MPI_COMM_WORLD);
printf("DEBUG: Mgr sent token to worker 1 for final write\n");
}else{ /* Workers */
printf("DEBUG: prc_rnk %d waiting for msg from Mgr for final write\n",prc_rnk);
MPI_Recv(msg_bfr,msg_bfr_lng,MPI_INT,prc_rnk-1,msg_tag_tkn_wrt_rsp,MPI_COMM_WORLD,&mpi_stt);
printf("DEBUG: prc_rnk %d got token for final write to %d\n",prc_rnk, out_id);
if(nco_prg_id == ncra || nco_prg_id == ncfe){
/* Copy averages to output file and free averaging buffers */
rcd=nco_fl_open(fl_out_tmp,NC_WRITE|NC_SHARE,&bfr_sz_hnt,&out_id);
printf("DEBUG: prc_rnk %d opened output file for final write\n",prc_rnk);
for(jdx=0;jdx<lcl_nbr_var;jdx++){
idx=lcl_idx_lst[jdx];
/* Revert any arithmetic promotion but leave unpacked (for now) */
/* printf("DEBUG: Before nco_var_cnf_typ prc_rnk %d var val %f\n",prc_rnk,var_prc_out[idx]->val.ip[0]); */
var_prc_out[idx]=nco_var_cnf_typ(var_prc_out[idx]->typ_upk,var_prc_out[idx]);
/* printf("DEBUG: After nco_var_cnf_typ prc_rnk %d var val %f\n",prc_rnk,var_prc_out[idx]->val.ip[0]); */
/* Packing/Unpacking */
if(nco_pck_plc == nco_pck_plc_all_new_att) var_prc_out[idx]=nco_put_var_pck(out_id,var_prc_out[idx],nco_pck_plc);
printf("DEBUG: prc_rnk %d to final write var %s with idx %d val %g\n",prc_rnk,var_prc_out[idx]->nm,idx,var_prc_out[idx]->val.fp[0]);
if(var_prc_out[idx]->nbr_dim == 0){
(void)nco_put_var1(out_id,var_prc_out[idx]->id,var_prc_out[idx]->srt,var_prc_out[idx]->val.vp,var_prc_out[idx]->type);
}else{ /* end if variable is scalar */
/* Size of record dimension is one in output file */
if(nco_prg_id == ncra) var_prc_out[idx]->cnt[0]=1L;
(void)nco_put_vara(out_id,var_prc_out[idx]->id,var_prc_out[idx]->srt,var_prc_out[idx]->cnt,var_prc_out[idx]->val.vp,var_prc_out[idx]->type);
} /* end if variable is an array */
var_prc_out[idx]->val.vp=nco_free(var_prc_out[idx]->val.vp);
} /* end loop over jdx */
/* Close output file */
nco_close(out_id);
printf("DEBUG: prc_rnk %d closed out file after writing\n",prc_rnk);
/* Send Token to Manager */
} /* end if */
if(prc_rnk == prc_nbr-1) MPI_Send(msg_bfr,msg_bfr_lng,MPI_INT,rnk_mgr,msg_tag_tkn_wrt_rsp,MPI_COMM_WORLD); else MPI_Send(msg_bfr,msg_bfr_lng,MPI_INT,prc_rnk+1,msg_tag_tkn_wrt_rsp,MPI_COMM_WORLD);
} /* !Workers */
if(prc_rnk == rnk_mgr){ /* Only Manager */
MPI_Recv(msg_bfr,msg_bfr_lng,MPI_INT,prc_nbr-1,msg_tag_tkn_wrt_rsp,MPI_COMM_WORLD,&mpi_stt);
(void)nco_fl_mv(fl_out_tmp,fl_out);
} /* !Manager */
MPI_Finalize();
#else /* !ENABLE_MPI */
/* Copy averages to output file and free averaging buffers */
if(nco_prg_id == ncra || nco_prg_id == ncfe){
for(idx=0;idx<nbr_var_prc;idx++){
/* Revert any arithmetic promotion but leave unpacked (for now) */
var_prc_out[idx]=nco_var_cnf_typ(var_prc_out[idx]->typ_upk,var_prc_out[idx]);
/* Packing/Unpacking */
if(nco_pck_plc == nco_pck_plc_all_new_att) var_prc_out[idx]=nco_put_var_pck(out_id,var_prc_out[idx],nco_pck_plc);
if(var_prc_out[idx]->nbr_dim == 0){
(void)nco_put_var1(out_id,var_prc_out[idx]->id,var_prc_out[idx]->srt,var_prc_out[idx]->val.vp,var_prc_out[idx]->type);
}else{ /* end if variable is scalar */
/* Size of record dimension is 1 in output file */
if(nco_prg_id == ncra) var_prc_out[idx]->cnt[0]=1L;
(void)nco_put_vara(out_id,var_prc_out[idx]->id,var_prc_out[idx]->srt,var_prc_out[idx]->cnt,var_prc_out[idx]->val.vp,var_prc_out[idx]->type);
} /* end if variable is an array */
var_prc_out[idx]->val.vp=nco_free(var_prc_out[idx]->val.vp);
} /* end loop over idx */
} /* end if */
/* Close output file and move it from temporary to permanent location */
(void)nco_fl_out_cls(fl_out,fl_out_tmp,out_id);
#endif /* !ENABLE_MPI */
/* Clean memory unless dirty memory allowed */
if(flg_mmr_cln){
/* ncra-specific memory cleanup */
if(nco_prg_id == ncra || nco_prg_id == ncrcat) lmt_rec=nco_lmt_free(lmt_rec);
/* NCO-generic clean-up */
/* Free individual strings/arrays */
if(cmd_ln) cmd_ln=(char *)nco_free(cmd_ln);
if(cnk_map_sng) cnk_map_sng=(char *)nco_free(cnk_map_sng);
if(cnk_plc_sng) cnk_plc_sng=(char *)nco_free(cnk_plc_sng);
if(fl_in) fl_in=(char *)nco_free(fl_in);
if(fl_out) fl_out=(char *)nco_free(fl_out);
if(fl_out_tmp) fl_out_tmp=(char *)nco_free(fl_out_tmp);
if(fl_pth) fl_pth=(char *)nco_free(fl_pth);
if(fl_pth_lcl) fl_pth_lcl=(char *)nco_free(fl_pth_lcl);
if(in_id_arr) in_id_arr=(int *)nco_free(in_id_arr);
/* Free lists of strings */
if(fl_lst_in && fl_lst_abb == NULL) fl_lst_in=nco_sng_lst_free(fl_lst_in,fl_nbr);
if(fl_lst_in && fl_lst_abb) fl_lst_in=nco_sng_lst_free(fl_lst_in,1);
if(fl_lst_abb) fl_lst_abb=nco_sng_lst_free(fl_lst_abb,abb_arg_nbr);
if(gaa_nbr > 0) gaa_arg=nco_sng_lst_free(gaa_arg,gaa_nbr);
if(var_lst_in_nbr > 0) var_lst_in=nco_sng_lst_free(var_lst_in,var_lst_in_nbr);
/* Free limits */
for(idx=0;idx<lmt_nbr;idx++) lmt_arg[idx]=(char *)nco_free(lmt_arg[idx]);
if(lmt_nbr > 0) lmt=nco_lmt_lst_free(lmt,lmt_nbr);
/* Free chunking information */
for(idx=0;idx<cnk_nbr;idx++) cnk_arg[idx]=(char *)nco_free(cnk_arg[idx]);
if(cnk_nbr > 0) cnk_dmn=nco_cnk_lst_free(cnk_dmn,cnk_nbr);
/* Free dimension lists */
if(nbr_dmn_xtr > 0) dim=nco_dmn_lst_free(dim,nbr_dmn_xtr);
if(nbr_dmn_xtr > 0) dmn_out=nco_dmn_lst_free(dmn_out,nbr_dmn_xtr);
#if 1
/* Free variable lists */
if(xtr_nbr > 0) var=nco_var_lst_free(var,xtr_nbr);
if(xtr_nbr > 0) var_out=nco_var_lst_free(var_out,xtr_nbr);
var_prc=(var_sct **)nco_free(var_prc);
var_prc_out=(var_sct **)nco_free(var_prc_out);
var_fix=(var_sct **)nco_free(var_fix);
var_fix_out=(var_sct **)nco_free(var_fix_out);
#endif /* !1 */
#if 0
/* 20051027: Try ncwa free()'ing technique to avoid freeing dangling pointers */
if(xtr_nbr > 0) var=nco_var_lst_free(var,xtr_nbr);
/* ncwa uses nco_var_lst_free() on var_prc_out because var_out has dangling pointers */
if(nbr_var_fix > 0) var_fix_out=nco_var_lst_free(var_fix_out,nbr_var_fix);
if(nbr_var_prc > 0) var_prc_out=nco_var_lst_free(var_prc_out,nbr_var_prc);
var_prc=(var_sct **)nco_free(var_prc);
var_fix=(var_sct **)nco_free(var_fix);
var_out=(var_sct **)nco_free(var_out);
#endif /* !0 */
} /* !flg_mmr_cln */
nco_exit_gracefully();
return EXIT_SUCCESS;
} /* end main() */
|
private.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
int main(int argc, char *argv[]){
int numeroDeHilo=strtol(argv[1],NULL,10);
int x=5;
#pragma omp parallel num_threads(numeroDeHilo) private(x)
{
int my_rank=omp_get_thread_num();
//printf("Este es el valor de x=%d en el hilo %d antes de la operación\n",x,my_rank);
x=my_rank*2+2;
printf("Este es el valor de x=%d en el hilo %d después de la operación\n",x,my_rank);
}
printf("Este es el valor de x=%d en el hilo principal\n",x);
return 0;
} |
GB_unaryop__lnot_fp64_uint16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_fp64_uint16
// op(A') function: GB_tran__lnot_fp64_uint16
// C type: double
// A type: uint16_t
// cast: double cij = (double) aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
uint16_t
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, aij) \
double z = (double) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_FP64 || GxB_NO_UINT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_fp64_uint16
(
double *Cx, // Cx and Ax may be aliased
uint16_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_fp64_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
atomic-2.c | /* { dg-do run } */
/* { dg-additional-options "-mcx16" { target { { i?86-*-* x86_64-*-* } && lp64 } } } */
#ifdef __x86_64__
#include "cpuid.h"
#endif
double d = 1.5;
long double ld = 3;
extern void abort (void);
void
test (void)
{
#pragma omp atomic
d *= 1.25;
#pragma omp atomic
ld /= 0.75;
if (d != 1.875 || ld != 4.0L)
abort ();
}
int
main (void)
{
#ifdef __x86_64__
unsigned int eax, ebx, ecx, edx;
if (!__get_cpuid (1, &eax, &ebx, &ecx, &edx))
return 0;
if (!(ecx & bit_CMPXCHG16B))
return 0;
#endif
test ();
return 0;
}
|
16_omp_reduction.c | // clang-format off
// RUN: %c-to-llvm -fno-discard-value-names %omp_c_flags %s | %apply-typeart -typeart-alloca -call-filter -S 2>&1 | %filecheck %s
// RUN: %c-to-llvm -fno-discard-value-names %omp_c_flags %s | %opt -O2 -S | %apply-typeart -typeart-alloca -call-filter -S 2>&1 | %filecheck %s
// RUN: %c-to-llvm -fno-discard-value-names %omp_c_flags %s | %apply-typeart -typeart-alloca -call-filter -S | %filecheck %s --check-prefix=check-inst
// RUN: %c-to-llvm -fno-discard-value-names %omp_c_flags %s | %opt -O2 -S | %apply-typeart -typeart-alloca -call-filter -S | %filecheck %s --check-prefix=check-inst
// REQUIRES: openmp
// clang-format on
extern void MPI_send(void*);
float sum(const float* a, int n) {
float total = 0.;
#pragma omp parallel for reduction(+ : total)
for (int i = 0; i < n; i++) {
total += a[i];
}
return total;
}
void foo() {
const int n = 10;
float array[n] = {0};
// check-inst: define {{.*}} @foo
// check-inst: %loc = alloca
// check-inst: %0 = bitcast float* %loc to i8*
// check-inst: call void @__typeart_alloc_stack(i8* %0, i32 5, i64 1)
// check-inst-not: __typeart_alloc_stack_omp
float loc = sum(array, n);
MPI_send((void*)&loc);
}
// CHECK: TypeArtPass [Heap & Stack]
// CHECK: Malloc : 0
// CHECK: Free : 0
// CHECK: Alloca : 1
// CHECK: Global : 0
|
sigmoid_kernel_arm.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2021, OPEN AI LAB
* Author: haitao@openailab.com
*/
#include "sigmoid_kernel_arm.h"
#include <math.h>
#include <arm_neon.h>
#define SIGMOID_MAX(a, b) ((a) > (b) ? (a) : (b))
#define SIGMOID_MIN(a, b) ((a) < (b) ? (a) : (b))
static inline float fast_exp(float x)
{
union
{
uint32_t i;
float f;
} v;
v.i = (1 << 23) * (1.4426950409 * x + 126.93490512f);
return v.f;
}
static float fast_exp1(float x)
{
volatile union
{
float f;
unsigned int i;
} cvt;
/* exp(x) = 2^i * 2^f; i = floor (log2(e) * x), 0 <= f <= 1 */
float t = x * 1.442695041f;
float fi = floorf(t);
float f = t - fi;
int i = ( int )fi;
cvt.f = (0.3371894346f * f + 0.657636276f) * f + 1.00172476f; /* compute 2^f */
cvt.i += (i << 23); /* scale by 2^i */
return cvt.f;
}
static float acl_exp(float x)
{
volatile union
{
float f;
unsigned int i;
} cvt;
/* exp(x) = = 2^k * exp(x-k ln2); k = round(x/ln2)*/
float t = x * 1.4426950408f;
float f = x - (( int )t) * 0.6931471805f;
int i = ( int )t;
/// cvt.f = (0.3371894346f * f + 0.657636276f) * f + 1.00172476f; /* compute 2^f */
cvt.f =
1 + f * 1.00000011921f + (0.0416598916054f + f * 0.00833693705499f) * f * f +
((0.500000596046f + f * 0.166665703058f) + (0.0014122662833f + f * 0.000195780929062f) * f * f) * f * f * f * f;
cvt.i += (i << 23); /* scale by 2^i */
return cvt.f;
}
static float exp10_f32(float x)
{
x = 1.0 + x * 0.0009765625f;
x *= x;
x *= x;
x *= x;
x *= x;
x *= x;
x *= x;
x *= x;
x *= x;
x *= x;
x *= x;
return x;
}
static struct tab exp_tab;
static void init_tab(void)
{
exp_tab.a0 = vdupq_n_f32(1.f);
exp_tab.a1 = vdupq_n_f32(0.0416598916054f);
exp_tab.a2 = vdupq_n_f32(0.500000596046f);
exp_tab.a3 = vdupq_n_f32(0.0014122662833f);
exp_tab.a4 = vdupq_n_f32(1.00000011921f);
exp_tab.a5 = vdupq_n_f32(0.00833693705499f);
exp_tab.a6 = vdupq_n_f32(0.166665703058f);
exp_tab.a7 = vdupq_n_f32(0.000195780929062f);
}
static inline float32x4_t vtaylor_polyq_f32(float32x4_t x, struct tab* coeffs)
{
float32x4_t A = vmlaq_f32(coeffs->a0, coeffs->a4, x);
float32x4_t B = vmlaq_f32(coeffs->a2, coeffs->a6, x);
float32x4_t C = vmlaq_f32(coeffs->a1, coeffs->a5, x);
float32x4_t D = vmlaq_f32(coeffs->a3, coeffs->a7, x);
float32x4_t x2 = vmulq_f32(x, x);
float32x4_t x4 = vmulq_f32(x2, x2);
float32x4_t res = vmlaq_f32(vmlaq_f32(A, B, x2), vmlaq_f32(C, D, x2), x4);
return res;
}
/* ACL exp function impelement */
static inline float32x4_t vexpq_f32(float32x4_t x)
{
const float32x4_t CONST_LN2 = vdupq_n_f32(0.6931471805f); // ln(2)
const float32x4_t CONST_INV_LN2 = vdupq_n_f32(1.4426950408f); // 1/ln(2)
const float32x4_t CONST_0 = vdupq_n_f32(0.f);
const int32x4_t CONST_NEGATIVE_126 = vdupq_n_s32(-126);
// Perform range reduction [-log(2),log(2)]
int32x4_t m = vcvtq_s32_f32(vmulq_f32(x, CONST_INV_LN2));
float32x4_t val = vmlsq_f32(x, vcvtq_f32_s32(m), CONST_LN2);
// Polynomial Approximation
float32x4_t poly = vtaylor_polyq_f32(val, &exp_tab);
// Reconstruct
poly = vreinterpretq_f32_s32(vqaddq_s32(vreinterpretq_s32_f32(poly), vqshlq_n_s32(m, 23)));
poly = vbslq_f32(vcltq_s32(m, CONST_NEGATIVE_126), CONST_0, poly);
return poly;
}
/*
exp(x) = lim(1+x/n)^n // n=10
*/
static inline float32x4_t vexpq10_f32(float32x4_t x)
{
x = vmlaq_n_f32(vdupq_n_f32(1.0f), x, 0.0009765625f); // n = 10
x = vmulq_f32(x, x);
x = vmulq_f32(x, x);
x = vmulq_f32(x, x);
x = vmulq_f32(x, x);
x = vmulq_f32(x, x);
x = vmulq_f32(x, x);
x = vmulq_f32(x, x);
x = vmulq_f32(x, x);
x = vmulq_f32(x, x);
x = vmulq_f32(x, x);
return x;
}
int sigmoid_run(struct tensor* output_tensor, struct tensor* input_tensor, int num_thread)
{
init_tab();
float* input = ( float* )input_tensor->data;
float* output = ( float* )output_tensor->data;
float32x4_t min = vdupq_n_f32(-30.0f);
float32x4_t max = vdupq_n_f32(30.0f);
float32x4_t tmp_vec = vdupq_n_f32(1);
int chan_num = input_tensor->dims[0] * input_tensor->dims[1];
int chan_size = input_tensor->dims[2] * input_tensor->dims[3];
#pragma omp parallel for num_threads(num_thread)
for (int j = 0; j < chan_num; j++)
{
float* pinput = input + j * chan_size;
float* poutput = output + j * chan_size;
for (int i = 0; i < (chan_size & -4); i += 4)
{
float32x4_t _input = vld1q_f32(pinput + i);
_input = vmaxq_f32(_input, min);
_input = vminq_f32(_input, max);
float32x4_t tmp_exp = vaddq_f32(tmp_vec, vexpq10_f32(vmulq_n_f32(_input, -1.0f)));
float32x4_t out = vrecpeq_f32(tmp_exp);
out = vmulq_f32(vrecpsq_f32(tmp_exp, out), out);
out = vmulq_f32(vrecpsq_f32(tmp_exp, out), out);
vst1q_f32(poutput, out);
poutput += 4;
}
for (int i = chan_size & ~3; i < chan_size; i++)
{
pinput[i] = SIGMOID_MIN(pinput[i], 30.0f);
pinput[i] = SIGMOID_MAX(pinput[i], -30.0f);
float tmp_exp = exp10_f32(-pinput[i]);
*poutput++ = 1 / (1 + tmp_exp);
}
}
return 0;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.